xref: /openbmc/qemu/linux-user/syscall.c (revision 4f4e5567)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 
144 #ifndef CLONE_IO
145 #define CLONE_IO                0x80000000      /* Clone io context */
146 #endif
147 
148 /* We can't directly call the host clone syscall, because this will
149  * badly confuse libc (breaking mutexes, for example). So we must
150  * divide clone flags into:
151  *  * flag combinations that look like pthread_create()
152  *  * flag combinations that look like fork()
153  *  * flags we can implement within QEMU itself
154  *  * flags we can't support and will return an error for
155  */
156 /* For thread creation, all these flags must be present; for
157  * fork, none must be present.
158  */
159 #define CLONE_THREAD_FLAGS                              \
160     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
161      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
162 
163 /* These flags are ignored:
164  * CLONE_DETACHED is now ignored by the kernel;
165  * CLONE_IO is just an optimisation hint to the I/O scheduler
166  */
167 #define CLONE_IGNORED_FLAGS                     \
168     (CLONE_DETACHED | CLONE_IO)
169 
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS               \
172     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
173      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
174 
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
177     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
178      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
179 
180 #define CLONE_INVALID_FORK_FLAGS                                        \
181     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
182 
183 #define CLONE_INVALID_THREAD_FLAGS                                      \
184     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
185        CLONE_IGNORED_FLAGS))
186 
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188  * have almost all been allocated. We cannot support any of
189  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191  * The checks against the invalid thread masks above will catch these.
192  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
193  */
194 
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196  * once. This exercises the codepaths for restart.
197  */
198 //#define DEBUG_ERESTARTSYS
199 
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
205 
206 #undef _syscall0
207 #undef _syscall1
208 #undef _syscall2
209 #undef _syscall3
210 #undef _syscall4
211 #undef _syscall5
212 #undef _syscall6
213 
214 #define _syscall0(type,name)		\
215 static type name (void)			\
216 {					\
217 	return syscall(__NR_##name);	\
218 }
219 
220 #define _syscall1(type,name,type1,arg1)		\
221 static type name (type1 arg1)			\
222 {						\
223 	return syscall(__NR_##name, arg1);	\
224 }
225 
226 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
227 static type name (type1 arg1,type2 arg2)		\
228 {							\
229 	return syscall(__NR_##name, arg1, arg2);	\
230 }
231 
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
233 static type name (type1 arg1,type2 arg2,type3 arg3)		\
234 {								\
235 	return syscall(__NR_##name, arg1, arg2, arg3);		\
236 }
237 
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
242 }
243 
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
245 		  type5,arg5)							\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
249 }
250 
251 
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
253 		  type5,arg5,type6,arg6)					\
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
255                   type6 arg6)							\
256 {										\
257 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
258 }
259 
260 
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
271 #endif
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
274 #endif
275 #define __NR_sys_inotify_init __NR_inotify_init
276 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
277 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
278 #define __NR_sys_statx __NR_statx
279 
280 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
281 #define __NR__llseek __NR_lseek
282 #endif
283 
284 /* Newer kernel ports have llseek() instead of _llseek() */
285 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
286 #define TARGET_NR__llseek TARGET_NR_llseek
287 #endif
288 
289 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
290 #ifndef TARGET_O_NONBLOCK_MASK
291 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
292 #endif
293 
294 #define __NR_sys_gettid __NR_gettid
295 _syscall0(int, sys_gettid)
296 
297 /* For the 64-bit guest on 32-bit host case we must emulate
298  * getdents using getdents64, because otherwise the host
299  * might hand us back more dirent records than we can fit
300  * into the guest buffer after structure format conversion.
301  * Otherwise we emulate getdents with getdents if the host has it.
302  */
303 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
304 #define EMULATE_GETDENTS_WITH_GETDENTS
305 #endif
306 
307 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
308 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
309 #endif
310 #if (defined(TARGET_NR_getdents) && \
311       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
312     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
313 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
314 #endif
315 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
316 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
317           loff_t *, res, uint, wh);
318 #endif
319 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
320 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
321           siginfo_t *, uinfo)
322 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
323 #ifdef __NR_exit_group
324 _syscall1(int,exit_group,int,error_code)
325 #endif
326 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
327 _syscall1(int,set_tid_address,int *,tidptr)
328 #endif
329 #if defined(__NR_futex)
330 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
331           const struct timespec *,timeout,int *,uaddr2,int,val3)
332 #endif
333 #if defined(__NR_futex_time64)
334 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
335           const struct timespec *,timeout,int *,uaddr2,int,val3)
336 #endif
337 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
338 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
339           unsigned long *, user_mask_ptr);
340 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
341 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
342           unsigned long *, user_mask_ptr);
343 /* sched_attr is not defined in glibc */
344 struct sched_attr {
345     uint32_t size;
346     uint32_t sched_policy;
347     uint64_t sched_flags;
348     int32_t sched_nice;
349     uint32_t sched_priority;
350     uint64_t sched_runtime;
351     uint64_t sched_deadline;
352     uint64_t sched_period;
353     uint32_t sched_util_min;
354     uint32_t sched_util_max;
355 };
356 #define __NR_sys_sched_getattr __NR_sched_getattr
357 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
358           unsigned int, size, unsigned int, flags);
359 #define __NR_sys_sched_setattr __NR_sched_setattr
360 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
361           unsigned int, flags);
362 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
363 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
364 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
365 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
366           const struct sched_param *, param);
367 #define __NR_sys_sched_getparam __NR_sched_getparam
368 _syscall2(int, sys_sched_getparam, pid_t, pid,
369           struct sched_param *, param);
370 #define __NR_sys_sched_setparam __NR_sched_setparam
371 _syscall2(int, sys_sched_setparam, pid_t, pid,
372           const struct sched_param *, param);
373 #define __NR_sys_getcpu __NR_getcpu
374 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
375 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
376           void *, arg);
377 _syscall2(int, capget, struct __user_cap_header_struct *, header,
378           struct __user_cap_data_struct *, data);
379 _syscall2(int, capset, struct __user_cap_header_struct *, header,
380           struct __user_cap_data_struct *, data);
381 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
382 _syscall2(int, ioprio_get, int, which, int, who)
383 #endif
384 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
385 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
386 #endif
387 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
388 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
389 #endif
390 
391 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
392 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
393           unsigned long, idx1, unsigned long, idx2)
394 #endif
395 
396 /*
397  * It is assumed that struct statx is architecture independent.
398  */
399 #if defined(TARGET_NR_statx) && defined(__NR_statx)
400 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
401           unsigned int, mask, struct target_statx *, statxbuf)
402 #endif
403 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
404 _syscall2(int, membarrier, int, cmd, int, flags)
405 #endif
406 
407 static const bitmask_transtbl fcntl_flags_tbl[] = {
408   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
409   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
410   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
411   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
412   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
413   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
414   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
415   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
416   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
417   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
418   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
419   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
420   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
421 #if defined(O_DIRECT)
422   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
423 #endif
424 #if defined(O_NOATIME)
425   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
426 #endif
427 #if defined(O_CLOEXEC)
428   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
429 #endif
430 #if defined(O_PATH)
431   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
432 #endif
433 #if defined(O_TMPFILE)
434   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
435 #endif
436   /* Don't terminate the list prematurely on 64-bit host+guest.  */
437 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
438   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
439 #endif
440   { 0, 0, 0, 0 }
441 };
442 
443 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
444 
445 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
446 #if defined(__NR_utimensat)
447 #define __NR_sys_utimensat __NR_utimensat
448 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
449           const struct timespec *,tsp,int,flags)
450 #else
451 static int sys_utimensat(int dirfd, const char *pathname,
452                          const struct timespec times[2], int flags)
453 {
454     errno = ENOSYS;
455     return -1;
456 }
457 #endif
458 #endif /* TARGET_NR_utimensat */
459 
460 #ifdef TARGET_NR_renameat2
461 #if defined(__NR_renameat2)
462 #define __NR_sys_renameat2 __NR_renameat2
463 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
464           const char *, new, unsigned int, flags)
465 #else
466 static int sys_renameat2(int oldfd, const char *old,
467                          int newfd, const char *new, int flags)
468 {
469     if (flags == 0) {
470         return renameat(oldfd, old, newfd, new);
471     }
472     errno = ENOSYS;
473     return -1;
474 }
475 #endif
476 #endif /* TARGET_NR_renameat2 */
477 
478 #ifdef CONFIG_INOTIFY
479 #include <sys/inotify.h>
480 
481 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
482 static int sys_inotify_init(void)
483 {
484   return (inotify_init());
485 }
486 #endif
487 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
488 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
489 {
490   return (inotify_add_watch(fd, pathname, mask));
491 }
492 #endif
493 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
494 static int sys_inotify_rm_watch(int fd, int32_t wd)
495 {
496   return (inotify_rm_watch(fd, wd));
497 }
498 #endif
499 #ifdef CONFIG_INOTIFY1
500 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
501 static int sys_inotify_init1(int flags)
502 {
503   return (inotify_init1(flags));
504 }
505 #endif
506 #endif
507 #else
508 /* Userspace can usually survive runtime without inotify */
509 #undef TARGET_NR_inotify_init
510 #undef TARGET_NR_inotify_init1
511 #undef TARGET_NR_inotify_add_watch
512 #undef TARGET_NR_inotify_rm_watch
513 #endif /* CONFIG_INOTIFY  */
514 
515 #if defined(TARGET_NR_prlimit64)
516 #ifndef __NR_prlimit64
517 # define __NR_prlimit64 -1
518 #endif
519 #define __NR_sys_prlimit64 __NR_prlimit64
520 /* The glibc rlimit structure may not be that used by the underlying syscall */
521 struct host_rlimit64 {
522     uint64_t rlim_cur;
523     uint64_t rlim_max;
524 };
525 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
526           const struct host_rlimit64 *, new_limit,
527           struct host_rlimit64 *, old_limit)
528 #endif
529 
530 
531 #if defined(TARGET_NR_timer_create)
532 /* Maximum of 32 active POSIX timers allowed at any one time. */
533 static timer_t g_posix_timers[32] = { 0, } ;
534 
535 static inline int next_free_host_timer(void)
536 {
537     int k ;
538     /* FIXME: Does finding the next free slot require a lock? */
539     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
540         if (g_posix_timers[k] == 0) {
541             g_posix_timers[k] = (timer_t) 1;
542             return k;
543         }
544     }
545     return -1;
546 }
547 #endif
548 
549 static inline int host_to_target_errno(int host_errno)
550 {
551     switch (host_errno) {
552 #define E(X)  case X: return TARGET_##X;
553 #include "errnos.c.inc"
554 #undef E
555     default:
556         return host_errno;
557     }
558 }
559 
560 static inline int target_to_host_errno(int target_errno)
561 {
562     switch (target_errno) {
563 #define E(X)  case TARGET_##X: return X;
564 #include "errnos.c.inc"
565 #undef E
566     default:
567         return target_errno;
568     }
569 }
570 
571 static inline abi_long get_errno(abi_long ret)
572 {
573     if (ret == -1)
574         return -host_to_target_errno(errno);
575     else
576         return ret;
577 }
578 
579 const char *target_strerror(int err)
580 {
581     if (err == QEMU_ERESTARTSYS) {
582         return "To be restarted";
583     }
584     if (err == QEMU_ESIGRETURN) {
585         return "Successful exit from sigreturn";
586     }
587 
588     return strerror(target_to_host_errno(err));
589 }
590 
591 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
592 {
593     int i;
594     uint8_t b;
595     if (usize <= ksize) {
596         return 1;
597     }
598     for (i = ksize; i < usize; i++) {
599         if (get_user_u8(b, addr + i)) {
600             return -TARGET_EFAULT;
601         }
602         if (b != 0) {
603             return 0;
604         }
605     }
606     return 1;
607 }
608 
609 #define safe_syscall0(type, name) \
610 static type safe_##name(void) \
611 { \
612     return safe_syscall(__NR_##name); \
613 }
614 
615 #define safe_syscall1(type, name, type1, arg1) \
616 static type safe_##name(type1 arg1) \
617 { \
618     return safe_syscall(__NR_##name, arg1); \
619 }
620 
621 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
622 static type safe_##name(type1 arg1, type2 arg2) \
623 { \
624     return safe_syscall(__NR_##name, arg1, arg2); \
625 }
626 
627 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
628 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
629 { \
630     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
631 }
632 
633 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
634     type4, arg4) \
635 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
636 { \
637     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
638 }
639 
640 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
641     type4, arg4, type5, arg5) \
642 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
643     type5 arg5) \
644 { \
645     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
646 }
647 
648 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
649     type4, arg4, type5, arg5, type6, arg6) \
650 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
651     type5 arg5, type6 arg6) \
652 { \
653     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
654 }
655 
656 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
657 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
658 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
659               int, flags, mode_t, mode)
660 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
661 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
662               struct rusage *, rusage)
663 #endif
664 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
665               int, options, struct rusage *, rusage)
666 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
667 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
668     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
669 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
670               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
671 #endif
672 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
673 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
674               struct timespec *, tsp, const sigset_t *, sigmask,
675               size_t, sigsetsize)
676 #endif
677 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
678               int, maxevents, int, timeout, const sigset_t *, sigmask,
679               size_t, sigsetsize)
680 #if defined(__NR_futex)
681 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
682               const struct timespec *,timeout,int *,uaddr2,int,val3)
683 #endif
684 #if defined(__NR_futex_time64)
685 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
686               const struct timespec *,timeout,int *,uaddr2,int,val3)
687 #endif
688 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
689 safe_syscall2(int, kill, pid_t, pid, int, sig)
690 safe_syscall2(int, tkill, int, tid, int, sig)
691 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
692 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
693 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
694 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
695               unsigned long, pos_l, unsigned long, pos_h)
696 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
697               unsigned long, pos_l, unsigned long, pos_h)
698 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
699               socklen_t, addrlen)
700 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
701               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
702 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
703               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
704 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
705 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
706 safe_syscall2(int, flock, int, fd, int, operation)
707 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
708 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
709               const struct timespec *, uts, size_t, sigsetsize)
710 #endif
711 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
712               int, flags)
713 #if defined(TARGET_NR_nanosleep)
714 safe_syscall2(int, nanosleep, const struct timespec *, req,
715               struct timespec *, rem)
716 #endif
717 #if defined(TARGET_NR_clock_nanosleep) || \
718     defined(TARGET_NR_clock_nanosleep_time64)
719 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
720               const struct timespec *, req, struct timespec *, rem)
721 #endif
722 #ifdef __NR_ipc
723 #ifdef __s390x__
724 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
725               void *, ptr)
726 #else
727 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
728               void *, ptr, long, fifth)
729 #endif
730 #endif
731 #ifdef __NR_msgsnd
732 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
733               int, flags)
734 #endif
735 #ifdef __NR_msgrcv
736 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
737               long, msgtype, int, flags)
738 #endif
739 #ifdef __NR_semtimedop
740 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
741               unsigned, nsops, const struct timespec *, timeout)
742 #endif
743 #if defined(TARGET_NR_mq_timedsend) || \
744     defined(TARGET_NR_mq_timedsend_time64)
745 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
746               size_t, len, unsigned, prio, const struct timespec *, timeout)
747 #endif
748 #if defined(TARGET_NR_mq_timedreceive) || \
749     defined(TARGET_NR_mq_timedreceive_time64)
750 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
751               size_t, len, unsigned *, prio, const struct timespec *, timeout)
752 #endif
753 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
754 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
755               int, outfd, loff_t *, poutoff, size_t, length,
756               unsigned int, flags)
757 #endif
758 
759 /* We do ioctl like this rather than via safe_syscall3 to preserve the
760  * "third argument might be integer or pointer or not present" behaviour of
761  * the libc function.
762  */
763 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
764 /* Similarly for fcntl. Note that callers must always:
765  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
766  *  use the flock64 struct rather than unsuffixed flock
767  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
768  */
769 #ifdef __NR_fcntl64
770 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
771 #else
772 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
773 #endif
774 
775 static inline int host_to_target_sock_type(int host_type)
776 {
777     int target_type;
778 
779     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
780     case SOCK_DGRAM:
781         target_type = TARGET_SOCK_DGRAM;
782         break;
783     case SOCK_STREAM:
784         target_type = TARGET_SOCK_STREAM;
785         break;
786     default:
787         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
788         break;
789     }
790 
791 #if defined(SOCK_CLOEXEC)
792     if (host_type & SOCK_CLOEXEC) {
793         target_type |= TARGET_SOCK_CLOEXEC;
794     }
795 #endif
796 
797 #if defined(SOCK_NONBLOCK)
798     if (host_type & SOCK_NONBLOCK) {
799         target_type |= TARGET_SOCK_NONBLOCK;
800     }
801 #endif
802 
803     return target_type;
804 }
805 
806 static abi_ulong target_brk;
807 static abi_ulong target_original_brk;
808 static abi_ulong brk_page;
809 
810 void target_set_brk(abi_ulong new_brk)
811 {
812     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
813     brk_page = HOST_PAGE_ALIGN(target_brk);
814 }
815 
816 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
817 #define DEBUGF_BRK(message, args...)
818 
819 /* do_brk() must return target values and target errnos. */
820 abi_long do_brk(abi_ulong new_brk)
821 {
822     abi_long mapped_addr;
823     abi_ulong new_alloc_size;
824 
825     /* brk pointers are always untagged */
826 
827     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
828 
829     if (!new_brk) {
830         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
831         return target_brk;
832     }
833     if (new_brk < target_original_brk) {
834         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
835                    target_brk);
836         return target_brk;
837     }
838 
839     /* If the new brk is less than the highest page reserved to the
840      * target heap allocation, set it and we're almost done...  */
841     if (new_brk <= brk_page) {
842         /* Heap contents are initialized to zero, as for anonymous
843          * mapped pages.  */
844         if (new_brk > target_brk) {
845             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
846         }
847 	target_brk = new_brk;
848         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
849 	return target_brk;
850     }
851 
852     /* We need to allocate more memory after the brk... Note that
853      * we don't use MAP_FIXED because that will map over the top of
854      * any existing mapping (like the one with the host libc or qemu
855      * itself); instead we treat "mapped but at wrong address" as
856      * a failure and unmap again.
857      */
858     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
859     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
860                                         PROT_READ|PROT_WRITE,
861                                         MAP_ANON|MAP_PRIVATE, 0, 0));
862 
863     if (mapped_addr == brk_page) {
864         /* Heap contents are initialized to zero, as for anonymous
865          * mapped pages.  Technically the new pages are already
866          * initialized to zero since they *are* anonymous mapped
867          * pages, however we have to take care with the contents that
868          * come from the remaining part of the previous page: it may
869          * contains garbage data due to a previous heap usage (grown
870          * then shrunken).  */
871         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
872 
873         target_brk = new_brk;
874         brk_page = HOST_PAGE_ALIGN(target_brk);
875         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
876             target_brk);
877         return target_brk;
878     } else if (mapped_addr != -1) {
879         /* Mapped but at wrong address, meaning there wasn't actually
880          * enough space for this brk.
881          */
882         target_munmap(mapped_addr, new_alloc_size);
883         mapped_addr = -1;
884         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
885     }
886     else {
887         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
888     }
889 
890 #if defined(TARGET_ALPHA)
891     /* We (partially) emulate OSF/1 on Alpha, which requires we
892        return a proper errno, not an unchanged brk value.  */
893     return -TARGET_ENOMEM;
894 #endif
895     /* For everything else, return the previous break. */
896     return target_brk;
897 }
898 
899 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
900     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
901 static inline abi_long copy_from_user_fdset(fd_set *fds,
902                                             abi_ulong target_fds_addr,
903                                             int n)
904 {
905     int i, nw, j, k;
906     abi_ulong b, *target_fds;
907 
908     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
909     if (!(target_fds = lock_user(VERIFY_READ,
910                                  target_fds_addr,
911                                  sizeof(abi_ulong) * nw,
912                                  1)))
913         return -TARGET_EFAULT;
914 
915     FD_ZERO(fds);
916     k = 0;
917     for (i = 0; i < nw; i++) {
918         /* grab the abi_ulong */
919         __get_user(b, &target_fds[i]);
920         for (j = 0; j < TARGET_ABI_BITS; j++) {
921             /* check the bit inside the abi_ulong */
922             if ((b >> j) & 1)
923                 FD_SET(k, fds);
924             k++;
925         }
926     }
927 
928     unlock_user(target_fds, target_fds_addr, 0);
929 
930     return 0;
931 }
932 
933 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
934                                                  abi_ulong target_fds_addr,
935                                                  int n)
936 {
937     if (target_fds_addr) {
938         if (copy_from_user_fdset(fds, target_fds_addr, n))
939             return -TARGET_EFAULT;
940         *fds_ptr = fds;
941     } else {
942         *fds_ptr = NULL;
943     }
944     return 0;
945 }
946 
947 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
948                                           const fd_set *fds,
949                                           int n)
950 {
951     int i, nw, j, k;
952     abi_long v;
953     abi_ulong *target_fds;
954 
955     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
956     if (!(target_fds = lock_user(VERIFY_WRITE,
957                                  target_fds_addr,
958                                  sizeof(abi_ulong) * nw,
959                                  0)))
960         return -TARGET_EFAULT;
961 
962     k = 0;
963     for (i = 0; i < nw; i++) {
964         v = 0;
965         for (j = 0; j < TARGET_ABI_BITS; j++) {
966             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
967             k++;
968         }
969         __put_user(v, &target_fds[i]);
970     }
971 
972     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
973 
974     return 0;
975 }
976 #endif
977 
978 #if defined(__alpha__)
979 #define HOST_HZ 1024
980 #else
981 #define HOST_HZ 100
982 #endif
983 
984 static inline abi_long host_to_target_clock_t(long ticks)
985 {
986 #if HOST_HZ == TARGET_HZ
987     return ticks;
988 #else
989     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
990 #endif
991 }
992 
993 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
994                                              const struct rusage *rusage)
995 {
996     struct target_rusage *target_rusage;
997 
998     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
999         return -TARGET_EFAULT;
1000     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1001     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1002     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1003     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1004     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1005     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1006     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1007     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1008     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1009     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1010     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1011     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1012     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1013     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1014     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1015     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1016     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1017     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1018     unlock_user_struct(target_rusage, target_addr, 1);
1019 
1020     return 0;
1021 }
1022 
1023 #ifdef TARGET_NR_setrlimit
1024 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1025 {
1026     abi_ulong target_rlim_swap;
1027     rlim_t result;
1028 
1029     target_rlim_swap = tswapal(target_rlim);
1030     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1031         return RLIM_INFINITY;
1032 
1033     result = target_rlim_swap;
1034     if (target_rlim_swap != (rlim_t)result)
1035         return RLIM_INFINITY;
1036 
1037     return result;
1038 }
1039 #endif
1040 
1041 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1042 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1043 {
1044     abi_ulong target_rlim_swap;
1045     abi_ulong result;
1046 
1047     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1048         target_rlim_swap = TARGET_RLIM_INFINITY;
1049     else
1050         target_rlim_swap = rlim;
1051     result = tswapal(target_rlim_swap);
1052 
1053     return result;
1054 }
1055 #endif
1056 
1057 static inline int target_to_host_resource(int code)
1058 {
1059     switch (code) {
1060     case TARGET_RLIMIT_AS:
1061         return RLIMIT_AS;
1062     case TARGET_RLIMIT_CORE:
1063         return RLIMIT_CORE;
1064     case TARGET_RLIMIT_CPU:
1065         return RLIMIT_CPU;
1066     case TARGET_RLIMIT_DATA:
1067         return RLIMIT_DATA;
1068     case TARGET_RLIMIT_FSIZE:
1069         return RLIMIT_FSIZE;
1070     case TARGET_RLIMIT_LOCKS:
1071         return RLIMIT_LOCKS;
1072     case TARGET_RLIMIT_MEMLOCK:
1073         return RLIMIT_MEMLOCK;
1074     case TARGET_RLIMIT_MSGQUEUE:
1075         return RLIMIT_MSGQUEUE;
1076     case TARGET_RLIMIT_NICE:
1077         return RLIMIT_NICE;
1078     case TARGET_RLIMIT_NOFILE:
1079         return RLIMIT_NOFILE;
1080     case TARGET_RLIMIT_NPROC:
1081         return RLIMIT_NPROC;
1082     case TARGET_RLIMIT_RSS:
1083         return RLIMIT_RSS;
1084     case TARGET_RLIMIT_RTPRIO:
1085         return RLIMIT_RTPRIO;
1086     case TARGET_RLIMIT_SIGPENDING:
1087         return RLIMIT_SIGPENDING;
1088     case TARGET_RLIMIT_STACK:
1089         return RLIMIT_STACK;
1090     default:
1091         return code;
1092     }
1093 }
1094 
1095 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1096                                               abi_ulong target_tv_addr)
1097 {
1098     struct target_timeval *target_tv;
1099 
1100     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1101         return -TARGET_EFAULT;
1102     }
1103 
1104     __get_user(tv->tv_sec, &target_tv->tv_sec);
1105     __get_user(tv->tv_usec, &target_tv->tv_usec);
1106 
1107     unlock_user_struct(target_tv, target_tv_addr, 0);
1108 
1109     return 0;
1110 }
1111 
1112 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1113                                             const struct timeval *tv)
1114 {
1115     struct target_timeval *target_tv;
1116 
1117     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1118         return -TARGET_EFAULT;
1119     }
1120 
1121     __put_user(tv->tv_sec, &target_tv->tv_sec);
1122     __put_user(tv->tv_usec, &target_tv->tv_usec);
1123 
1124     unlock_user_struct(target_tv, target_tv_addr, 1);
1125 
1126     return 0;
1127 }
1128 
1129 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1130 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1131                                                 abi_ulong target_tv_addr)
1132 {
1133     struct target__kernel_sock_timeval *target_tv;
1134 
1135     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1136         return -TARGET_EFAULT;
1137     }
1138 
1139     __get_user(tv->tv_sec, &target_tv->tv_sec);
1140     __get_user(tv->tv_usec, &target_tv->tv_usec);
1141 
1142     unlock_user_struct(target_tv, target_tv_addr, 0);
1143 
1144     return 0;
1145 }
1146 #endif
1147 
1148 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1149                                               const struct timeval *tv)
1150 {
1151     struct target__kernel_sock_timeval *target_tv;
1152 
1153     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1154         return -TARGET_EFAULT;
1155     }
1156 
1157     __put_user(tv->tv_sec, &target_tv->tv_sec);
1158     __put_user(tv->tv_usec, &target_tv->tv_usec);
1159 
1160     unlock_user_struct(target_tv, target_tv_addr, 1);
1161 
1162     return 0;
1163 }
1164 
1165 #if defined(TARGET_NR_futex) || \
1166     defined(TARGET_NR_rt_sigtimedwait) || \
1167     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1168     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1169     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1170     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1171     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1172     defined(TARGET_NR_timer_settime) || \
1173     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1174 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1175                                                abi_ulong target_addr)
1176 {
1177     struct target_timespec *target_ts;
1178 
1179     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1180         return -TARGET_EFAULT;
1181     }
1182     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1183     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1184     unlock_user_struct(target_ts, target_addr, 0);
1185     return 0;
1186 }
1187 #endif
1188 
1189 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1190     defined(TARGET_NR_timer_settime64) || \
1191     defined(TARGET_NR_mq_timedsend_time64) || \
1192     defined(TARGET_NR_mq_timedreceive_time64) || \
1193     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1194     defined(TARGET_NR_clock_nanosleep_time64) || \
1195     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1196     defined(TARGET_NR_utimensat) || \
1197     defined(TARGET_NR_utimensat_time64) || \
1198     defined(TARGET_NR_semtimedop_time64) || \
1199     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1200 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1201                                                  abi_ulong target_addr)
1202 {
1203     struct target__kernel_timespec *target_ts;
1204 
1205     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1206         return -TARGET_EFAULT;
1207     }
1208     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1209     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1210     /* in 32bit mode, this drops the padding */
1211     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1212     unlock_user_struct(target_ts, target_addr, 0);
1213     return 0;
1214 }
1215 #endif
1216 
1217 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1218                                                struct timespec *host_ts)
1219 {
1220     struct target_timespec *target_ts;
1221 
1222     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1223         return -TARGET_EFAULT;
1224     }
1225     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1226     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1227     unlock_user_struct(target_ts, target_addr, 1);
1228     return 0;
1229 }
1230 
1231 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1232                                                  struct timespec *host_ts)
1233 {
1234     struct target__kernel_timespec *target_ts;
1235 
1236     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1237         return -TARGET_EFAULT;
1238     }
1239     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1240     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1241     unlock_user_struct(target_ts, target_addr, 1);
1242     return 0;
1243 }
1244 
1245 #if defined(TARGET_NR_gettimeofday)
1246 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1247                                              struct timezone *tz)
1248 {
1249     struct target_timezone *target_tz;
1250 
1251     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1252         return -TARGET_EFAULT;
1253     }
1254 
1255     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1256     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1257 
1258     unlock_user_struct(target_tz, target_tz_addr, 1);
1259 
1260     return 0;
1261 }
1262 #endif
1263 
1264 #if defined(TARGET_NR_settimeofday)
1265 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1266                                                abi_ulong target_tz_addr)
1267 {
1268     struct target_timezone *target_tz;
1269 
1270     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1271         return -TARGET_EFAULT;
1272     }
1273 
1274     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1275     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1276 
1277     unlock_user_struct(target_tz, target_tz_addr, 0);
1278 
1279     return 0;
1280 }
1281 #endif
1282 
1283 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1284 #include <mqueue.h>
1285 
1286 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1287                                               abi_ulong target_mq_attr_addr)
1288 {
1289     struct target_mq_attr *target_mq_attr;
1290 
1291     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1292                           target_mq_attr_addr, 1))
1293         return -TARGET_EFAULT;
1294 
1295     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1296     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1297     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1298     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1299 
1300     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1301 
1302     return 0;
1303 }
1304 
1305 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1306                                             const struct mq_attr *attr)
1307 {
1308     struct target_mq_attr *target_mq_attr;
1309 
1310     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1311                           target_mq_attr_addr, 0))
1312         return -TARGET_EFAULT;
1313 
1314     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1315     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1316     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1317     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1318 
1319     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1320 
1321     return 0;
1322 }
1323 #endif
1324 
1325 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1326 /* do_select() must return target values and target errnos. */
1327 static abi_long do_select(int n,
1328                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1329                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1330 {
1331     fd_set rfds, wfds, efds;
1332     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1333     struct timeval tv;
1334     struct timespec ts, *ts_ptr;
1335     abi_long ret;
1336 
1337     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1338     if (ret) {
1339         return ret;
1340     }
1341     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1342     if (ret) {
1343         return ret;
1344     }
1345     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1346     if (ret) {
1347         return ret;
1348     }
1349 
1350     if (target_tv_addr) {
1351         if (copy_from_user_timeval(&tv, target_tv_addr))
1352             return -TARGET_EFAULT;
1353         ts.tv_sec = tv.tv_sec;
1354         ts.tv_nsec = tv.tv_usec * 1000;
1355         ts_ptr = &ts;
1356     } else {
1357         ts_ptr = NULL;
1358     }
1359 
1360     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1361                                   ts_ptr, NULL));
1362 
1363     if (!is_error(ret)) {
1364         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1365             return -TARGET_EFAULT;
1366         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1367             return -TARGET_EFAULT;
1368         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1369             return -TARGET_EFAULT;
1370 
1371         if (target_tv_addr) {
1372             tv.tv_sec = ts.tv_sec;
1373             tv.tv_usec = ts.tv_nsec / 1000;
1374             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1375                 return -TARGET_EFAULT;
1376             }
1377         }
1378     }
1379 
1380     return ret;
1381 }
1382 
1383 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1384 static abi_long do_old_select(abi_ulong arg1)
1385 {
1386     struct target_sel_arg_struct *sel;
1387     abi_ulong inp, outp, exp, tvp;
1388     long nsel;
1389 
1390     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1391         return -TARGET_EFAULT;
1392     }
1393 
1394     nsel = tswapal(sel->n);
1395     inp = tswapal(sel->inp);
1396     outp = tswapal(sel->outp);
1397     exp = tswapal(sel->exp);
1398     tvp = tswapal(sel->tvp);
1399 
1400     unlock_user_struct(sel, arg1, 0);
1401 
1402     return do_select(nsel, inp, outp, exp, tvp);
1403 }
1404 #endif
1405 #endif
1406 
1407 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1408 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1409                             abi_long arg4, abi_long arg5, abi_long arg6,
1410                             bool time64)
1411 {
1412     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1413     fd_set rfds, wfds, efds;
1414     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1415     struct timespec ts, *ts_ptr;
1416     abi_long ret;
1417 
1418     /*
1419      * The 6th arg is actually two args smashed together,
1420      * so we cannot use the C library.
1421      */
1422     sigset_t set;
1423     struct {
1424         sigset_t *set;
1425         size_t size;
1426     } sig, *sig_ptr;
1427 
1428     abi_ulong arg_sigset, arg_sigsize, *arg7;
1429     target_sigset_t *target_sigset;
1430 
1431     n = arg1;
1432     rfd_addr = arg2;
1433     wfd_addr = arg3;
1434     efd_addr = arg4;
1435     ts_addr = arg5;
1436 
1437     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1438     if (ret) {
1439         return ret;
1440     }
1441     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1442     if (ret) {
1443         return ret;
1444     }
1445     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1446     if (ret) {
1447         return ret;
1448     }
1449 
1450     /*
1451      * This takes a timespec, and not a timeval, so we cannot
1452      * use the do_select() helper ...
1453      */
1454     if (ts_addr) {
1455         if (time64) {
1456             if (target_to_host_timespec64(&ts, ts_addr)) {
1457                 return -TARGET_EFAULT;
1458             }
1459         } else {
1460             if (target_to_host_timespec(&ts, ts_addr)) {
1461                 return -TARGET_EFAULT;
1462             }
1463         }
1464             ts_ptr = &ts;
1465     } else {
1466         ts_ptr = NULL;
1467     }
1468 
1469     /* Extract the two packed args for the sigset */
1470     if (arg6) {
1471         sig_ptr = &sig;
1472         sig.size = SIGSET_T_SIZE;
1473 
1474         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1475         if (!arg7) {
1476             return -TARGET_EFAULT;
1477         }
1478         arg_sigset = tswapal(arg7[0]);
1479         arg_sigsize = tswapal(arg7[1]);
1480         unlock_user(arg7, arg6, 0);
1481 
1482         if (arg_sigset) {
1483             sig.set = &set;
1484             if (arg_sigsize != sizeof(*target_sigset)) {
1485                 /* Like the kernel, we enforce correct size sigsets */
1486                 return -TARGET_EINVAL;
1487             }
1488             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1489                                       sizeof(*target_sigset), 1);
1490             if (!target_sigset) {
1491                 return -TARGET_EFAULT;
1492             }
1493             target_to_host_sigset(&set, target_sigset);
1494             unlock_user(target_sigset, arg_sigset, 0);
1495         } else {
1496             sig.set = NULL;
1497         }
1498     } else {
1499         sig_ptr = NULL;
1500     }
1501 
1502     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1503                                   ts_ptr, sig_ptr));
1504 
1505     if (!is_error(ret)) {
1506         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1507             return -TARGET_EFAULT;
1508         }
1509         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1510             return -TARGET_EFAULT;
1511         }
1512         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1513             return -TARGET_EFAULT;
1514         }
1515         if (time64) {
1516             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1517                 return -TARGET_EFAULT;
1518             }
1519         } else {
1520             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1521                 return -TARGET_EFAULT;
1522             }
1523         }
1524     }
1525     return ret;
1526 }
1527 #endif
1528 
1529 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1530     defined(TARGET_NR_ppoll_time64)
1531 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1532                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1533 {
1534     struct target_pollfd *target_pfd;
1535     unsigned int nfds = arg2;
1536     struct pollfd *pfd;
1537     unsigned int i;
1538     abi_long ret;
1539 
1540     pfd = NULL;
1541     target_pfd = NULL;
1542     if (nfds) {
1543         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1544             return -TARGET_EINVAL;
1545         }
1546         target_pfd = lock_user(VERIFY_WRITE, arg1,
1547                                sizeof(struct target_pollfd) * nfds, 1);
1548         if (!target_pfd) {
1549             return -TARGET_EFAULT;
1550         }
1551 
1552         pfd = alloca(sizeof(struct pollfd) * nfds);
1553         for (i = 0; i < nfds; i++) {
1554             pfd[i].fd = tswap32(target_pfd[i].fd);
1555             pfd[i].events = tswap16(target_pfd[i].events);
1556         }
1557     }
1558     if (ppoll) {
1559         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1560         target_sigset_t *target_set;
1561         sigset_t _set, *set = &_set;
1562 
1563         if (arg3) {
1564             if (time64) {
1565                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1566                     unlock_user(target_pfd, arg1, 0);
1567                     return -TARGET_EFAULT;
1568                 }
1569             } else {
1570                 if (target_to_host_timespec(timeout_ts, arg3)) {
1571                     unlock_user(target_pfd, arg1, 0);
1572                     return -TARGET_EFAULT;
1573                 }
1574             }
1575         } else {
1576             timeout_ts = NULL;
1577         }
1578 
1579         if (arg4) {
1580             if (arg5 != sizeof(target_sigset_t)) {
1581                 unlock_user(target_pfd, arg1, 0);
1582                 return -TARGET_EINVAL;
1583             }
1584 
1585             target_set = lock_user(VERIFY_READ, arg4,
1586                                    sizeof(target_sigset_t), 1);
1587             if (!target_set) {
1588                 unlock_user(target_pfd, arg1, 0);
1589                 return -TARGET_EFAULT;
1590             }
1591             target_to_host_sigset(set, target_set);
1592         } else {
1593             set = NULL;
1594         }
1595 
1596         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1597                                    set, SIGSET_T_SIZE));
1598 
1599         if (!is_error(ret) && arg3) {
1600             if (time64) {
1601                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1602                     return -TARGET_EFAULT;
1603                 }
1604             } else {
1605                 if (host_to_target_timespec(arg3, timeout_ts)) {
1606                     return -TARGET_EFAULT;
1607                 }
1608             }
1609         }
1610         if (arg4) {
1611             unlock_user(target_set, arg4, 0);
1612         }
1613     } else {
1614           struct timespec ts, *pts;
1615 
1616           if (arg3 >= 0) {
1617               /* Convert ms to secs, ns */
1618               ts.tv_sec = arg3 / 1000;
1619               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1620               pts = &ts;
1621           } else {
1622               /* -ve poll() timeout means "infinite" */
1623               pts = NULL;
1624           }
1625           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1626     }
1627 
1628     if (!is_error(ret)) {
1629         for (i = 0; i < nfds; i++) {
1630             target_pfd[i].revents = tswap16(pfd[i].revents);
1631         }
1632     }
1633     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1634     return ret;
1635 }
1636 #endif
1637 
1638 static abi_long do_pipe2(int host_pipe[], int flags)
1639 {
1640 #ifdef CONFIG_PIPE2
1641     return pipe2(host_pipe, flags);
1642 #else
1643     return -ENOSYS;
1644 #endif
1645 }
1646 
1647 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1648                         int flags, int is_pipe2)
1649 {
1650     int host_pipe[2];
1651     abi_long ret;
1652     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1653 
1654     if (is_error(ret))
1655         return get_errno(ret);
1656 
1657     /* Several targets have special calling conventions for the original
1658        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1659     if (!is_pipe2) {
1660 #if defined(TARGET_ALPHA)
1661         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1662         return host_pipe[0];
1663 #elif defined(TARGET_MIPS)
1664         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1665         return host_pipe[0];
1666 #elif defined(TARGET_SH4)
1667         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1668         return host_pipe[0];
1669 #elif defined(TARGET_SPARC)
1670         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1671         return host_pipe[0];
1672 #endif
1673     }
1674 
1675     if (put_user_s32(host_pipe[0], pipedes)
1676         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1677         return -TARGET_EFAULT;
1678     return get_errno(ret);
1679 }
1680 
1681 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1682                                               abi_ulong target_addr,
1683                                               socklen_t len)
1684 {
1685     struct target_ip_mreqn *target_smreqn;
1686 
1687     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1688     if (!target_smreqn)
1689         return -TARGET_EFAULT;
1690     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1691     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1692     if (len == sizeof(struct target_ip_mreqn))
1693         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1694     unlock_user(target_smreqn, target_addr, 0);
1695 
1696     return 0;
1697 }
1698 
1699 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1700                                                abi_ulong target_addr,
1701                                                socklen_t len)
1702 {
1703     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1704     sa_family_t sa_family;
1705     struct target_sockaddr *target_saddr;
1706 
1707     if (fd_trans_target_to_host_addr(fd)) {
1708         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1709     }
1710 
1711     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1712     if (!target_saddr)
1713         return -TARGET_EFAULT;
1714 
1715     sa_family = tswap16(target_saddr->sa_family);
1716 
1717     /* Oops. The caller might send a incomplete sun_path; sun_path
1718      * must be terminated by \0 (see the manual page), but
1719      * unfortunately it is quite common to specify sockaddr_un
1720      * length as "strlen(x->sun_path)" while it should be
1721      * "strlen(...) + 1". We'll fix that here if needed.
1722      * Linux kernel has a similar feature.
1723      */
1724 
1725     if (sa_family == AF_UNIX) {
1726         if (len < unix_maxlen && len > 0) {
1727             char *cp = (char*)target_saddr;
1728 
1729             if ( cp[len-1] && !cp[len] )
1730                 len++;
1731         }
1732         if (len > unix_maxlen)
1733             len = unix_maxlen;
1734     }
1735 
1736     memcpy(addr, target_saddr, len);
1737     addr->sa_family = sa_family;
1738     if (sa_family == AF_NETLINK) {
1739         struct sockaddr_nl *nladdr;
1740 
1741         nladdr = (struct sockaddr_nl *)addr;
1742         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1743         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1744     } else if (sa_family == AF_PACKET) {
1745 	struct target_sockaddr_ll *lladdr;
1746 
1747 	lladdr = (struct target_sockaddr_ll *)addr;
1748 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1749 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1750     }
1751     unlock_user(target_saddr, target_addr, 0);
1752 
1753     return 0;
1754 }
1755 
1756 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1757                                                struct sockaddr *addr,
1758                                                socklen_t len)
1759 {
1760     struct target_sockaddr *target_saddr;
1761 
1762     if (len == 0) {
1763         return 0;
1764     }
1765     assert(addr);
1766 
1767     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1768     if (!target_saddr)
1769         return -TARGET_EFAULT;
1770     memcpy(target_saddr, addr, len);
1771     if (len >= offsetof(struct target_sockaddr, sa_family) +
1772         sizeof(target_saddr->sa_family)) {
1773         target_saddr->sa_family = tswap16(addr->sa_family);
1774     }
1775     if (addr->sa_family == AF_NETLINK &&
1776         len >= sizeof(struct target_sockaddr_nl)) {
1777         struct target_sockaddr_nl *target_nl =
1778                (struct target_sockaddr_nl *)target_saddr;
1779         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1780         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1781     } else if (addr->sa_family == AF_PACKET) {
1782         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1783         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1784         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1785     } else if (addr->sa_family == AF_INET6 &&
1786                len >= sizeof(struct target_sockaddr_in6)) {
1787         struct target_sockaddr_in6 *target_in6 =
1788                (struct target_sockaddr_in6 *)target_saddr;
1789         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1790     }
1791     unlock_user(target_saddr, target_addr, len);
1792 
1793     return 0;
1794 }
1795 
1796 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1797                                            struct target_msghdr *target_msgh)
1798 {
1799     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1800     abi_long msg_controllen;
1801     abi_ulong target_cmsg_addr;
1802     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1803     socklen_t space = 0;
1804 
1805     msg_controllen = tswapal(target_msgh->msg_controllen);
1806     if (msg_controllen < sizeof (struct target_cmsghdr))
1807         goto the_end;
1808     target_cmsg_addr = tswapal(target_msgh->msg_control);
1809     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1810     target_cmsg_start = target_cmsg;
1811     if (!target_cmsg)
1812         return -TARGET_EFAULT;
1813 
1814     while (cmsg && target_cmsg) {
1815         void *data = CMSG_DATA(cmsg);
1816         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1817 
1818         int len = tswapal(target_cmsg->cmsg_len)
1819             - sizeof(struct target_cmsghdr);
1820 
1821         space += CMSG_SPACE(len);
1822         if (space > msgh->msg_controllen) {
1823             space -= CMSG_SPACE(len);
1824             /* This is a QEMU bug, since we allocated the payload
1825              * area ourselves (unlike overflow in host-to-target
1826              * conversion, which is just the guest giving us a buffer
1827              * that's too small). It can't happen for the payload types
1828              * we currently support; if it becomes an issue in future
1829              * we would need to improve our allocation strategy to
1830              * something more intelligent than "twice the size of the
1831              * target buffer we're reading from".
1832              */
1833             qemu_log_mask(LOG_UNIMP,
1834                           ("Unsupported ancillary data %d/%d: "
1835                            "unhandled msg size\n"),
1836                           tswap32(target_cmsg->cmsg_level),
1837                           tswap32(target_cmsg->cmsg_type));
1838             break;
1839         }
1840 
1841         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1842             cmsg->cmsg_level = SOL_SOCKET;
1843         } else {
1844             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1845         }
1846         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1847         cmsg->cmsg_len = CMSG_LEN(len);
1848 
1849         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1850             int *fd = (int *)data;
1851             int *target_fd = (int *)target_data;
1852             int i, numfds = len / sizeof(int);
1853 
1854             for (i = 0; i < numfds; i++) {
1855                 __get_user(fd[i], target_fd + i);
1856             }
1857         } else if (cmsg->cmsg_level == SOL_SOCKET
1858                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1859             struct ucred *cred = (struct ucred *)data;
1860             struct target_ucred *target_cred =
1861                 (struct target_ucred *)target_data;
1862 
1863             __get_user(cred->pid, &target_cred->pid);
1864             __get_user(cred->uid, &target_cred->uid);
1865             __get_user(cred->gid, &target_cred->gid);
1866         } else {
1867             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1868                           cmsg->cmsg_level, cmsg->cmsg_type);
1869             memcpy(data, target_data, len);
1870         }
1871 
1872         cmsg = CMSG_NXTHDR(msgh, cmsg);
1873         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1874                                          target_cmsg_start);
1875     }
1876     unlock_user(target_cmsg, target_cmsg_addr, 0);
1877  the_end:
1878     msgh->msg_controllen = space;
1879     return 0;
1880 }
1881 
1882 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1883                                            struct msghdr *msgh)
1884 {
1885     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1886     abi_long msg_controllen;
1887     abi_ulong target_cmsg_addr;
1888     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1889     socklen_t space = 0;
1890 
1891     msg_controllen = tswapal(target_msgh->msg_controllen);
1892     if (msg_controllen < sizeof (struct target_cmsghdr))
1893         goto the_end;
1894     target_cmsg_addr = tswapal(target_msgh->msg_control);
1895     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1896     target_cmsg_start = target_cmsg;
1897     if (!target_cmsg)
1898         return -TARGET_EFAULT;
1899 
1900     while (cmsg && target_cmsg) {
1901         void *data = CMSG_DATA(cmsg);
1902         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1903 
1904         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1905         int tgt_len, tgt_space;
1906 
1907         /* We never copy a half-header but may copy half-data;
1908          * this is Linux's behaviour in put_cmsg(). Note that
1909          * truncation here is a guest problem (which we report
1910          * to the guest via the CTRUNC bit), unlike truncation
1911          * in target_to_host_cmsg, which is a QEMU bug.
1912          */
1913         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1914             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1915             break;
1916         }
1917 
1918         if (cmsg->cmsg_level == SOL_SOCKET) {
1919             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1920         } else {
1921             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1922         }
1923         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1924 
1925         /* Payload types which need a different size of payload on
1926          * the target must adjust tgt_len here.
1927          */
1928         tgt_len = len;
1929         switch (cmsg->cmsg_level) {
1930         case SOL_SOCKET:
1931             switch (cmsg->cmsg_type) {
1932             case SO_TIMESTAMP:
1933                 tgt_len = sizeof(struct target_timeval);
1934                 break;
1935             default:
1936                 break;
1937             }
1938             break;
1939         default:
1940             break;
1941         }
1942 
1943         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1944             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1945             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1946         }
1947 
1948         /* We must now copy-and-convert len bytes of payload
1949          * into tgt_len bytes of destination space. Bear in mind
1950          * that in both source and destination we may be dealing
1951          * with a truncated value!
1952          */
1953         switch (cmsg->cmsg_level) {
1954         case SOL_SOCKET:
1955             switch (cmsg->cmsg_type) {
1956             case SCM_RIGHTS:
1957             {
1958                 int *fd = (int *)data;
1959                 int *target_fd = (int *)target_data;
1960                 int i, numfds = tgt_len / sizeof(int);
1961 
1962                 for (i = 0; i < numfds; i++) {
1963                     __put_user(fd[i], target_fd + i);
1964                 }
1965                 break;
1966             }
1967             case SO_TIMESTAMP:
1968             {
1969                 struct timeval *tv = (struct timeval *)data;
1970                 struct target_timeval *target_tv =
1971                     (struct target_timeval *)target_data;
1972 
1973                 if (len != sizeof(struct timeval) ||
1974                     tgt_len != sizeof(struct target_timeval)) {
1975                     goto unimplemented;
1976                 }
1977 
1978                 /* copy struct timeval to target */
1979                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1980                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1981                 break;
1982             }
1983             case SCM_CREDENTIALS:
1984             {
1985                 struct ucred *cred = (struct ucred *)data;
1986                 struct target_ucred *target_cred =
1987                     (struct target_ucred *)target_data;
1988 
1989                 __put_user(cred->pid, &target_cred->pid);
1990                 __put_user(cred->uid, &target_cred->uid);
1991                 __put_user(cred->gid, &target_cred->gid);
1992                 break;
1993             }
1994             default:
1995                 goto unimplemented;
1996             }
1997             break;
1998 
1999         case SOL_IP:
2000             switch (cmsg->cmsg_type) {
2001             case IP_TTL:
2002             {
2003                 uint32_t *v = (uint32_t *)data;
2004                 uint32_t *t_int = (uint32_t *)target_data;
2005 
2006                 if (len != sizeof(uint32_t) ||
2007                     tgt_len != sizeof(uint32_t)) {
2008                     goto unimplemented;
2009                 }
2010                 __put_user(*v, t_int);
2011                 break;
2012             }
2013             case IP_RECVERR:
2014             {
2015                 struct errhdr_t {
2016                    struct sock_extended_err ee;
2017                    struct sockaddr_in offender;
2018                 };
2019                 struct errhdr_t *errh = (struct errhdr_t *)data;
2020                 struct errhdr_t *target_errh =
2021                     (struct errhdr_t *)target_data;
2022 
2023                 if (len != sizeof(struct errhdr_t) ||
2024                     tgt_len != sizeof(struct errhdr_t)) {
2025                     goto unimplemented;
2026                 }
2027                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2028                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2029                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2030                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2031                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2032                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2033                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2034                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2035                     (void *) &errh->offender, sizeof(errh->offender));
2036                 break;
2037             }
2038             default:
2039                 goto unimplemented;
2040             }
2041             break;
2042 
2043         case SOL_IPV6:
2044             switch (cmsg->cmsg_type) {
2045             case IPV6_HOPLIMIT:
2046             {
2047                 uint32_t *v = (uint32_t *)data;
2048                 uint32_t *t_int = (uint32_t *)target_data;
2049 
2050                 if (len != sizeof(uint32_t) ||
2051                     tgt_len != sizeof(uint32_t)) {
2052                     goto unimplemented;
2053                 }
2054                 __put_user(*v, t_int);
2055                 break;
2056             }
2057             case IPV6_RECVERR:
2058             {
2059                 struct errhdr6_t {
2060                    struct sock_extended_err ee;
2061                    struct sockaddr_in6 offender;
2062                 };
2063                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2064                 struct errhdr6_t *target_errh =
2065                     (struct errhdr6_t *)target_data;
2066 
2067                 if (len != sizeof(struct errhdr6_t) ||
2068                     tgt_len != sizeof(struct errhdr6_t)) {
2069                     goto unimplemented;
2070                 }
2071                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2072                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2073                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2074                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2075                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2076                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2077                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2078                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2079                     (void *) &errh->offender, sizeof(errh->offender));
2080                 break;
2081             }
2082             default:
2083                 goto unimplemented;
2084             }
2085             break;
2086 
2087         default:
2088         unimplemented:
2089             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2090                           cmsg->cmsg_level, cmsg->cmsg_type);
2091             memcpy(target_data, data, MIN(len, tgt_len));
2092             if (tgt_len > len) {
2093                 memset(target_data + len, 0, tgt_len - len);
2094             }
2095         }
2096 
2097         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2098         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2099         if (msg_controllen < tgt_space) {
2100             tgt_space = msg_controllen;
2101         }
2102         msg_controllen -= tgt_space;
2103         space += tgt_space;
2104         cmsg = CMSG_NXTHDR(msgh, cmsg);
2105         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2106                                          target_cmsg_start);
2107     }
2108     unlock_user(target_cmsg, target_cmsg_addr, space);
2109  the_end:
2110     target_msgh->msg_controllen = tswapal(space);
2111     return 0;
2112 }
2113 
2114 /* do_setsockopt() Must return target values and target errnos. */
2115 static abi_long do_setsockopt(int sockfd, int level, int optname,
2116                               abi_ulong optval_addr, socklen_t optlen)
2117 {
2118     abi_long ret;
2119     int val;
2120     struct ip_mreqn *ip_mreq;
2121     struct ip_mreq_source *ip_mreq_source;
2122 
2123     switch(level) {
2124     case SOL_TCP:
2125     case SOL_UDP:
2126         /* TCP and UDP options all take an 'int' value.  */
2127         if (optlen < sizeof(uint32_t))
2128             return -TARGET_EINVAL;
2129 
2130         if (get_user_u32(val, optval_addr))
2131             return -TARGET_EFAULT;
2132         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2133         break;
2134     case SOL_IP:
2135         switch(optname) {
2136         case IP_TOS:
2137         case IP_TTL:
2138         case IP_HDRINCL:
2139         case IP_ROUTER_ALERT:
2140         case IP_RECVOPTS:
2141         case IP_RETOPTS:
2142         case IP_PKTINFO:
2143         case IP_MTU_DISCOVER:
2144         case IP_RECVERR:
2145         case IP_RECVTTL:
2146         case IP_RECVTOS:
2147 #ifdef IP_FREEBIND
2148         case IP_FREEBIND:
2149 #endif
2150         case IP_MULTICAST_TTL:
2151         case IP_MULTICAST_LOOP:
2152             val = 0;
2153             if (optlen >= sizeof(uint32_t)) {
2154                 if (get_user_u32(val, optval_addr))
2155                     return -TARGET_EFAULT;
2156             } else if (optlen >= 1) {
2157                 if (get_user_u8(val, optval_addr))
2158                     return -TARGET_EFAULT;
2159             }
2160             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2161             break;
2162         case IP_ADD_MEMBERSHIP:
2163         case IP_DROP_MEMBERSHIP:
2164             if (optlen < sizeof (struct target_ip_mreq) ||
2165                 optlen > sizeof (struct target_ip_mreqn))
2166                 return -TARGET_EINVAL;
2167 
2168             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2169             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2170             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2171             break;
2172 
2173         case IP_BLOCK_SOURCE:
2174         case IP_UNBLOCK_SOURCE:
2175         case IP_ADD_SOURCE_MEMBERSHIP:
2176         case IP_DROP_SOURCE_MEMBERSHIP:
2177             if (optlen != sizeof (struct target_ip_mreq_source))
2178                 return -TARGET_EINVAL;
2179 
2180             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2181             if (!ip_mreq_source) {
2182                 return -TARGET_EFAULT;
2183             }
2184             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2185             unlock_user (ip_mreq_source, optval_addr, 0);
2186             break;
2187 
2188         default:
2189             goto unimplemented;
2190         }
2191         break;
2192     case SOL_IPV6:
2193         switch (optname) {
2194         case IPV6_MTU_DISCOVER:
2195         case IPV6_MTU:
2196         case IPV6_V6ONLY:
2197         case IPV6_RECVPKTINFO:
2198         case IPV6_UNICAST_HOPS:
2199         case IPV6_MULTICAST_HOPS:
2200         case IPV6_MULTICAST_LOOP:
2201         case IPV6_RECVERR:
2202         case IPV6_RECVHOPLIMIT:
2203         case IPV6_2292HOPLIMIT:
2204         case IPV6_CHECKSUM:
2205         case IPV6_ADDRFORM:
2206         case IPV6_2292PKTINFO:
2207         case IPV6_RECVTCLASS:
2208         case IPV6_RECVRTHDR:
2209         case IPV6_2292RTHDR:
2210         case IPV6_RECVHOPOPTS:
2211         case IPV6_2292HOPOPTS:
2212         case IPV6_RECVDSTOPTS:
2213         case IPV6_2292DSTOPTS:
2214         case IPV6_TCLASS:
2215         case IPV6_ADDR_PREFERENCES:
2216 #ifdef IPV6_RECVPATHMTU
2217         case IPV6_RECVPATHMTU:
2218 #endif
2219 #ifdef IPV6_TRANSPARENT
2220         case IPV6_TRANSPARENT:
2221 #endif
2222 #ifdef IPV6_FREEBIND
2223         case IPV6_FREEBIND:
2224 #endif
2225 #ifdef IPV6_RECVORIGDSTADDR
2226         case IPV6_RECVORIGDSTADDR:
2227 #endif
2228             val = 0;
2229             if (optlen < sizeof(uint32_t)) {
2230                 return -TARGET_EINVAL;
2231             }
2232             if (get_user_u32(val, optval_addr)) {
2233                 return -TARGET_EFAULT;
2234             }
2235             ret = get_errno(setsockopt(sockfd, level, optname,
2236                                        &val, sizeof(val)));
2237             break;
2238         case IPV6_PKTINFO:
2239         {
2240             struct in6_pktinfo pki;
2241 
2242             if (optlen < sizeof(pki)) {
2243                 return -TARGET_EINVAL;
2244             }
2245 
2246             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2247                 return -TARGET_EFAULT;
2248             }
2249 
2250             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2251 
2252             ret = get_errno(setsockopt(sockfd, level, optname,
2253                                        &pki, sizeof(pki)));
2254             break;
2255         }
2256         case IPV6_ADD_MEMBERSHIP:
2257         case IPV6_DROP_MEMBERSHIP:
2258         {
2259             struct ipv6_mreq ipv6mreq;
2260 
2261             if (optlen < sizeof(ipv6mreq)) {
2262                 return -TARGET_EINVAL;
2263             }
2264 
2265             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2266                 return -TARGET_EFAULT;
2267             }
2268 
2269             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2270 
2271             ret = get_errno(setsockopt(sockfd, level, optname,
2272                                        &ipv6mreq, sizeof(ipv6mreq)));
2273             break;
2274         }
2275         default:
2276             goto unimplemented;
2277         }
2278         break;
2279     case SOL_ICMPV6:
2280         switch (optname) {
2281         case ICMPV6_FILTER:
2282         {
2283             struct icmp6_filter icmp6f;
2284 
2285             if (optlen > sizeof(icmp6f)) {
2286                 optlen = sizeof(icmp6f);
2287             }
2288 
2289             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2290                 return -TARGET_EFAULT;
2291             }
2292 
2293             for (val = 0; val < 8; val++) {
2294                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2295             }
2296 
2297             ret = get_errno(setsockopt(sockfd, level, optname,
2298                                        &icmp6f, optlen));
2299             break;
2300         }
2301         default:
2302             goto unimplemented;
2303         }
2304         break;
2305     case SOL_RAW:
2306         switch (optname) {
2307         case ICMP_FILTER:
2308         case IPV6_CHECKSUM:
2309             /* those take an u32 value */
2310             if (optlen < sizeof(uint32_t)) {
2311                 return -TARGET_EINVAL;
2312             }
2313 
2314             if (get_user_u32(val, optval_addr)) {
2315                 return -TARGET_EFAULT;
2316             }
2317             ret = get_errno(setsockopt(sockfd, level, optname,
2318                                        &val, sizeof(val)));
2319             break;
2320 
2321         default:
2322             goto unimplemented;
2323         }
2324         break;
2325 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2326     case SOL_ALG:
2327         switch (optname) {
2328         case ALG_SET_KEY:
2329         {
2330             char *alg_key = g_malloc(optlen);
2331 
2332             if (!alg_key) {
2333                 return -TARGET_ENOMEM;
2334             }
2335             if (copy_from_user(alg_key, optval_addr, optlen)) {
2336                 g_free(alg_key);
2337                 return -TARGET_EFAULT;
2338             }
2339             ret = get_errno(setsockopt(sockfd, level, optname,
2340                                        alg_key, optlen));
2341             g_free(alg_key);
2342             break;
2343         }
2344         case ALG_SET_AEAD_AUTHSIZE:
2345         {
2346             ret = get_errno(setsockopt(sockfd, level, optname,
2347                                        NULL, optlen));
2348             break;
2349         }
2350         default:
2351             goto unimplemented;
2352         }
2353         break;
2354 #endif
2355     case TARGET_SOL_SOCKET:
2356         switch (optname) {
2357         case TARGET_SO_RCVTIMEO:
2358         {
2359                 struct timeval tv;
2360 
2361                 optname = SO_RCVTIMEO;
2362 
2363 set_timeout:
2364                 if (optlen != sizeof(struct target_timeval)) {
2365                     return -TARGET_EINVAL;
2366                 }
2367 
2368                 if (copy_from_user_timeval(&tv, optval_addr)) {
2369                     return -TARGET_EFAULT;
2370                 }
2371 
2372                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2373                                 &tv, sizeof(tv)));
2374                 return ret;
2375         }
2376         case TARGET_SO_SNDTIMEO:
2377                 optname = SO_SNDTIMEO;
2378                 goto set_timeout;
2379         case TARGET_SO_ATTACH_FILTER:
2380         {
2381                 struct target_sock_fprog *tfprog;
2382                 struct target_sock_filter *tfilter;
2383                 struct sock_fprog fprog;
2384                 struct sock_filter *filter;
2385                 int i;
2386 
2387                 if (optlen != sizeof(*tfprog)) {
2388                     return -TARGET_EINVAL;
2389                 }
2390                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2391                     return -TARGET_EFAULT;
2392                 }
2393                 if (!lock_user_struct(VERIFY_READ, tfilter,
2394                                       tswapal(tfprog->filter), 0)) {
2395                     unlock_user_struct(tfprog, optval_addr, 1);
2396                     return -TARGET_EFAULT;
2397                 }
2398 
2399                 fprog.len = tswap16(tfprog->len);
2400                 filter = g_try_new(struct sock_filter, fprog.len);
2401                 if (filter == NULL) {
2402                     unlock_user_struct(tfilter, tfprog->filter, 1);
2403                     unlock_user_struct(tfprog, optval_addr, 1);
2404                     return -TARGET_ENOMEM;
2405                 }
2406                 for (i = 0; i < fprog.len; i++) {
2407                     filter[i].code = tswap16(tfilter[i].code);
2408                     filter[i].jt = tfilter[i].jt;
2409                     filter[i].jf = tfilter[i].jf;
2410                     filter[i].k = tswap32(tfilter[i].k);
2411                 }
2412                 fprog.filter = filter;
2413 
2414                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2415                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2416                 g_free(filter);
2417 
2418                 unlock_user_struct(tfilter, tfprog->filter, 1);
2419                 unlock_user_struct(tfprog, optval_addr, 1);
2420                 return ret;
2421         }
2422 	case TARGET_SO_BINDTODEVICE:
2423 	{
2424 		char *dev_ifname, *addr_ifname;
2425 
2426 		if (optlen > IFNAMSIZ - 1) {
2427 		    optlen = IFNAMSIZ - 1;
2428 		}
2429 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2430 		if (!dev_ifname) {
2431 		    return -TARGET_EFAULT;
2432 		}
2433 		optname = SO_BINDTODEVICE;
2434 		addr_ifname = alloca(IFNAMSIZ);
2435 		memcpy(addr_ifname, dev_ifname, optlen);
2436 		addr_ifname[optlen] = 0;
2437 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2438                                            addr_ifname, optlen));
2439 		unlock_user (dev_ifname, optval_addr, 0);
2440 		return ret;
2441 	}
2442         case TARGET_SO_LINGER:
2443         {
2444                 struct linger lg;
2445                 struct target_linger *tlg;
2446 
2447                 if (optlen != sizeof(struct target_linger)) {
2448                     return -TARGET_EINVAL;
2449                 }
2450                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2451                     return -TARGET_EFAULT;
2452                 }
2453                 __get_user(lg.l_onoff, &tlg->l_onoff);
2454                 __get_user(lg.l_linger, &tlg->l_linger);
2455                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2456                                 &lg, sizeof(lg)));
2457                 unlock_user_struct(tlg, optval_addr, 0);
2458                 return ret;
2459         }
2460             /* Options with 'int' argument.  */
2461         case TARGET_SO_DEBUG:
2462 		optname = SO_DEBUG;
2463 		break;
2464         case TARGET_SO_REUSEADDR:
2465 		optname = SO_REUSEADDR;
2466 		break;
2467 #ifdef SO_REUSEPORT
2468         case TARGET_SO_REUSEPORT:
2469                 optname = SO_REUSEPORT;
2470                 break;
2471 #endif
2472         case TARGET_SO_TYPE:
2473 		optname = SO_TYPE;
2474 		break;
2475         case TARGET_SO_ERROR:
2476 		optname = SO_ERROR;
2477 		break;
2478         case TARGET_SO_DONTROUTE:
2479 		optname = SO_DONTROUTE;
2480 		break;
2481         case TARGET_SO_BROADCAST:
2482 		optname = SO_BROADCAST;
2483 		break;
2484         case TARGET_SO_SNDBUF:
2485 		optname = SO_SNDBUF;
2486 		break;
2487         case TARGET_SO_SNDBUFFORCE:
2488                 optname = SO_SNDBUFFORCE;
2489                 break;
2490         case TARGET_SO_RCVBUF:
2491 		optname = SO_RCVBUF;
2492 		break;
2493         case TARGET_SO_RCVBUFFORCE:
2494                 optname = SO_RCVBUFFORCE;
2495                 break;
2496         case TARGET_SO_KEEPALIVE:
2497 		optname = SO_KEEPALIVE;
2498 		break;
2499         case TARGET_SO_OOBINLINE:
2500 		optname = SO_OOBINLINE;
2501 		break;
2502         case TARGET_SO_NO_CHECK:
2503 		optname = SO_NO_CHECK;
2504 		break;
2505         case TARGET_SO_PRIORITY:
2506 		optname = SO_PRIORITY;
2507 		break;
2508 #ifdef SO_BSDCOMPAT
2509         case TARGET_SO_BSDCOMPAT:
2510 		optname = SO_BSDCOMPAT;
2511 		break;
2512 #endif
2513         case TARGET_SO_PASSCRED:
2514 		optname = SO_PASSCRED;
2515 		break;
2516         case TARGET_SO_PASSSEC:
2517                 optname = SO_PASSSEC;
2518                 break;
2519         case TARGET_SO_TIMESTAMP:
2520 		optname = SO_TIMESTAMP;
2521 		break;
2522         case TARGET_SO_RCVLOWAT:
2523 		optname = SO_RCVLOWAT;
2524 		break;
2525         default:
2526             goto unimplemented;
2527         }
2528 	if (optlen < sizeof(uint32_t))
2529             return -TARGET_EINVAL;
2530 
2531 	if (get_user_u32(val, optval_addr))
2532             return -TARGET_EFAULT;
2533 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2534         break;
2535 #ifdef SOL_NETLINK
2536     case SOL_NETLINK:
2537         switch (optname) {
2538         case NETLINK_PKTINFO:
2539         case NETLINK_ADD_MEMBERSHIP:
2540         case NETLINK_DROP_MEMBERSHIP:
2541         case NETLINK_BROADCAST_ERROR:
2542         case NETLINK_NO_ENOBUFS:
2543 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2544         case NETLINK_LISTEN_ALL_NSID:
2545         case NETLINK_CAP_ACK:
2546 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2547 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2548         case NETLINK_EXT_ACK:
2549 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2550 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2551         case NETLINK_GET_STRICT_CHK:
2552 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2553             break;
2554         default:
2555             goto unimplemented;
2556         }
2557         val = 0;
2558         if (optlen < sizeof(uint32_t)) {
2559             return -TARGET_EINVAL;
2560         }
2561         if (get_user_u32(val, optval_addr)) {
2562             return -TARGET_EFAULT;
2563         }
2564         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2565                                    sizeof(val)));
2566         break;
2567 #endif /* SOL_NETLINK */
2568     default:
2569     unimplemented:
2570         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2571                       level, optname);
2572         ret = -TARGET_ENOPROTOOPT;
2573     }
2574     return ret;
2575 }
2576 
2577 /* do_getsockopt() Must return target values and target errnos. */
2578 static abi_long do_getsockopt(int sockfd, int level, int optname,
2579                               abi_ulong optval_addr, abi_ulong optlen)
2580 {
2581     abi_long ret;
2582     int len, val;
2583     socklen_t lv;
2584 
2585     switch(level) {
2586     case TARGET_SOL_SOCKET:
2587         level = SOL_SOCKET;
2588         switch (optname) {
2589         /* These don't just return a single integer */
2590         case TARGET_SO_PEERNAME:
2591             goto unimplemented;
2592         case TARGET_SO_RCVTIMEO: {
2593             struct timeval tv;
2594             socklen_t tvlen;
2595 
2596             optname = SO_RCVTIMEO;
2597 
2598 get_timeout:
2599             if (get_user_u32(len, optlen)) {
2600                 return -TARGET_EFAULT;
2601             }
2602             if (len < 0) {
2603                 return -TARGET_EINVAL;
2604             }
2605 
2606             tvlen = sizeof(tv);
2607             ret = get_errno(getsockopt(sockfd, level, optname,
2608                                        &tv, &tvlen));
2609             if (ret < 0) {
2610                 return ret;
2611             }
2612             if (len > sizeof(struct target_timeval)) {
2613                 len = sizeof(struct target_timeval);
2614             }
2615             if (copy_to_user_timeval(optval_addr, &tv)) {
2616                 return -TARGET_EFAULT;
2617             }
2618             if (put_user_u32(len, optlen)) {
2619                 return -TARGET_EFAULT;
2620             }
2621             break;
2622         }
2623         case TARGET_SO_SNDTIMEO:
2624             optname = SO_SNDTIMEO;
2625             goto get_timeout;
2626         case TARGET_SO_PEERCRED: {
2627             struct ucred cr;
2628             socklen_t crlen;
2629             struct target_ucred *tcr;
2630 
2631             if (get_user_u32(len, optlen)) {
2632                 return -TARGET_EFAULT;
2633             }
2634             if (len < 0) {
2635                 return -TARGET_EINVAL;
2636             }
2637 
2638             crlen = sizeof(cr);
2639             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2640                                        &cr, &crlen));
2641             if (ret < 0) {
2642                 return ret;
2643             }
2644             if (len > crlen) {
2645                 len = crlen;
2646             }
2647             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2648                 return -TARGET_EFAULT;
2649             }
2650             __put_user(cr.pid, &tcr->pid);
2651             __put_user(cr.uid, &tcr->uid);
2652             __put_user(cr.gid, &tcr->gid);
2653             unlock_user_struct(tcr, optval_addr, 1);
2654             if (put_user_u32(len, optlen)) {
2655                 return -TARGET_EFAULT;
2656             }
2657             break;
2658         }
2659         case TARGET_SO_PEERSEC: {
2660             char *name;
2661 
2662             if (get_user_u32(len, optlen)) {
2663                 return -TARGET_EFAULT;
2664             }
2665             if (len < 0) {
2666                 return -TARGET_EINVAL;
2667             }
2668             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2669             if (!name) {
2670                 return -TARGET_EFAULT;
2671             }
2672             lv = len;
2673             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2674                                        name, &lv));
2675             if (put_user_u32(lv, optlen)) {
2676                 ret = -TARGET_EFAULT;
2677             }
2678             unlock_user(name, optval_addr, lv);
2679             break;
2680         }
2681         case TARGET_SO_LINGER:
2682         {
2683             struct linger lg;
2684             socklen_t lglen;
2685             struct target_linger *tlg;
2686 
2687             if (get_user_u32(len, optlen)) {
2688                 return -TARGET_EFAULT;
2689             }
2690             if (len < 0) {
2691                 return -TARGET_EINVAL;
2692             }
2693 
2694             lglen = sizeof(lg);
2695             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2696                                        &lg, &lglen));
2697             if (ret < 0) {
2698                 return ret;
2699             }
2700             if (len > lglen) {
2701                 len = lglen;
2702             }
2703             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2704                 return -TARGET_EFAULT;
2705             }
2706             __put_user(lg.l_onoff, &tlg->l_onoff);
2707             __put_user(lg.l_linger, &tlg->l_linger);
2708             unlock_user_struct(tlg, optval_addr, 1);
2709             if (put_user_u32(len, optlen)) {
2710                 return -TARGET_EFAULT;
2711             }
2712             break;
2713         }
2714         /* Options with 'int' argument.  */
2715         case TARGET_SO_DEBUG:
2716             optname = SO_DEBUG;
2717             goto int_case;
2718         case TARGET_SO_REUSEADDR:
2719             optname = SO_REUSEADDR;
2720             goto int_case;
2721 #ifdef SO_REUSEPORT
2722         case TARGET_SO_REUSEPORT:
2723             optname = SO_REUSEPORT;
2724             goto int_case;
2725 #endif
2726         case TARGET_SO_TYPE:
2727             optname = SO_TYPE;
2728             goto int_case;
2729         case TARGET_SO_ERROR:
2730             optname = SO_ERROR;
2731             goto int_case;
2732         case TARGET_SO_DONTROUTE:
2733             optname = SO_DONTROUTE;
2734             goto int_case;
2735         case TARGET_SO_BROADCAST:
2736             optname = SO_BROADCAST;
2737             goto int_case;
2738         case TARGET_SO_SNDBUF:
2739             optname = SO_SNDBUF;
2740             goto int_case;
2741         case TARGET_SO_RCVBUF:
2742             optname = SO_RCVBUF;
2743             goto int_case;
2744         case TARGET_SO_KEEPALIVE:
2745             optname = SO_KEEPALIVE;
2746             goto int_case;
2747         case TARGET_SO_OOBINLINE:
2748             optname = SO_OOBINLINE;
2749             goto int_case;
2750         case TARGET_SO_NO_CHECK:
2751             optname = SO_NO_CHECK;
2752             goto int_case;
2753         case TARGET_SO_PRIORITY:
2754             optname = SO_PRIORITY;
2755             goto int_case;
2756 #ifdef SO_BSDCOMPAT
2757         case TARGET_SO_BSDCOMPAT:
2758             optname = SO_BSDCOMPAT;
2759             goto int_case;
2760 #endif
2761         case TARGET_SO_PASSCRED:
2762             optname = SO_PASSCRED;
2763             goto int_case;
2764         case TARGET_SO_TIMESTAMP:
2765             optname = SO_TIMESTAMP;
2766             goto int_case;
2767         case TARGET_SO_RCVLOWAT:
2768             optname = SO_RCVLOWAT;
2769             goto int_case;
2770         case TARGET_SO_ACCEPTCONN:
2771             optname = SO_ACCEPTCONN;
2772             goto int_case;
2773         case TARGET_SO_PROTOCOL:
2774             optname = SO_PROTOCOL;
2775             goto int_case;
2776         case TARGET_SO_DOMAIN:
2777             optname = SO_DOMAIN;
2778             goto int_case;
2779         default:
2780             goto int_case;
2781         }
2782         break;
2783     case SOL_TCP:
2784     case SOL_UDP:
2785         /* TCP and UDP options all take an 'int' value.  */
2786     int_case:
2787         if (get_user_u32(len, optlen))
2788             return -TARGET_EFAULT;
2789         if (len < 0)
2790             return -TARGET_EINVAL;
2791         lv = sizeof(lv);
2792         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2793         if (ret < 0)
2794             return ret;
2795         if (optname == SO_TYPE) {
2796             val = host_to_target_sock_type(val);
2797         }
2798         if (len > lv)
2799             len = lv;
2800         if (len == 4) {
2801             if (put_user_u32(val, optval_addr))
2802                 return -TARGET_EFAULT;
2803         } else {
2804             if (put_user_u8(val, optval_addr))
2805                 return -TARGET_EFAULT;
2806         }
2807         if (put_user_u32(len, optlen))
2808             return -TARGET_EFAULT;
2809         break;
2810     case SOL_IP:
2811         switch(optname) {
2812         case IP_TOS:
2813         case IP_TTL:
2814         case IP_HDRINCL:
2815         case IP_ROUTER_ALERT:
2816         case IP_RECVOPTS:
2817         case IP_RETOPTS:
2818         case IP_PKTINFO:
2819         case IP_MTU_DISCOVER:
2820         case IP_RECVERR:
2821         case IP_RECVTOS:
2822 #ifdef IP_FREEBIND
2823         case IP_FREEBIND:
2824 #endif
2825         case IP_MULTICAST_TTL:
2826         case IP_MULTICAST_LOOP:
2827             if (get_user_u32(len, optlen))
2828                 return -TARGET_EFAULT;
2829             if (len < 0)
2830                 return -TARGET_EINVAL;
2831             lv = sizeof(lv);
2832             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2833             if (ret < 0)
2834                 return ret;
2835             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2836                 len = 1;
2837                 if (put_user_u32(len, optlen)
2838                     || put_user_u8(val, optval_addr))
2839                     return -TARGET_EFAULT;
2840             } else {
2841                 if (len > sizeof(int))
2842                     len = sizeof(int);
2843                 if (put_user_u32(len, optlen)
2844                     || put_user_u32(val, optval_addr))
2845                     return -TARGET_EFAULT;
2846             }
2847             break;
2848         default:
2849             ret = -TARGET_ENOPROTOOPT;
2850             break;
2851         }
2852         break;
2853     case SOL_IPV6:
2854         switch (optname) {
2855         case IPV6_MTU_DISCOVER:
2856         case IPV6_MTU:
2857         case IPV6_V6ONLY:
2858         case IPV6_RECVPKTINFO:
2859         case IPV6_UNICAST_HOPS:
2860         case IPV6_MULTICAST_HOPS:
2861         case IPV6_MULTICAST_LOOP:
2862         case IPV6_RECVERR:
2863         case IPV6_RECVHOPLIMIT:
2864         case IPV6_2292HOPLIMIT:
2865         case IPV6_CHECKSUM:
2866         case IPV6_ADDRFORM:
2867         case IPV6_2292PKTINFO:
2868         case IPV6_RECVTCLASS:
2869         case IPV6_RECVRTHDR:
2870         case IPV6_2292RTHDR:
2871         case IPV6_RECVHOPOPTS:
2872         case IPV6_2292HOPOPTS:
2873         case IPV6_RECVDSTOPTS:
2874         case IPV6_2292DSTOPTS:
2875         case IPV6_TCLASS:
2876         case IPV6_ADDR_PREFERENCES:
2877 #ifdef IPV6_RECVPATHMTU
2878         case IPV6_RECVPATHMTU:
2879 #endif
2880 #ifdef IPV6_TRANSPARENT
2881         case IPV6_TRANSPARENT:
2882 #endif
2883 #ifdef IPV6_FREEBIND
2884         case IPV6_FREEBIND:
2885 #endif
2886 #ifdef IPV6_RECVORIGDSTADDR
2887         case IPV6_RECVORIGDSTADDR:
2888 #endif
2889             if (get_user_u32(len, optlen))
2890                 return -TARGET_EFAULT;
2891             if (len < 0)
2892                 return -TARGET_EINVAL;
2893             lv = sizeof(lv);
2894             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2895             if (ret < 0)
2896                 return ret;
2897             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2898                 len = 1;
2899                 if (put_user_u32(len, optlen)
2900                     || put_user_u8(val, optval_addr))
2901                     return -TARGET_EFAULT;
2902             } else {
2903                 if (len > sizeof(int))
2904                     len = sizeof(int);
2905                 if (put_user_u32(len, optlen)
2906                     || put_user_u32(val, optval_addr))
2907                     return -TARGET_EFAULT;
2908             }
2909             break;
2910         default:
2911             ret = -TARGET_ENOPROTOOPT;
2912             break;
2913         }
2914         break;
2915 #ifdef SOL_NETLINK
2916     case SOL_NETLINK:
2917         switch (optname) {
2918         case NETLINK_PKTINFO:
2919         case NETLINK_BROADCAST_ERROR:
2920         case NETLINK_NO_ENOBUFS:
2921 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2922         case NETLINK_LISTEN_ALL_NSID:
2923         case NETLINK_CAP_ACK:
2924 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2925 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2926         case NETLINK_EXT_ACK:
2927 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2928 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2929         case NETLINK_GET_STRICT_CHK:
2930 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2931             if (get_user_u32(len, optlen)) {
2932                 return -TARGET_EFAULT;
2933             }
2934             if (len != sizeof(val)) {
2935                 return -TARGET_EINVAL;
2936             }
2937             lv = len;
2938             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2939             if (ret < 0) {
2940                 return ret;
2941             }
2942             if (put_user_u32(lv, optlen)
2943                 || put_user_u32(val, optval_addr)) {
2944                 return -TARGET_EFAULT;
2945             }
2946             break;
2947 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2948         case NETLINK_LIST_MEMBERSHIPS:
2949         {
2950             uint32_t *results;
2951             int i;
2952             if (get_user_u32(len, optlen)) {
2953                 return -TARGET_EFAULT;
2954             }
2955             if (len < 0) {
2956                 return -TARGET_EINVAL;
2957             }
2958             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2959             if (!results && len > 0) {
2960                 return -TARGET_EFAULT;
2961             }
2962             lv = len;
2963             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2964             if (ret < 0) {
2965                 unlock_user(results, optval_addr, 0);
2966                 return ret;
2967             }
2968             /* swap host endianess to target endianess. */
2969             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2970                 results[i] = tswap32(results[i]);
2971             }
2972             if (put_user_u32(lv, optlen)) {
2973                 return -TARGET_EFAULT;
2974             }
2975             unlock_user(results, optval_addr, 0);
2976             break;
2977         }
2978 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2979         default:
2980             goto unimplemented;
2981         }
2982         break;
2983 #endif /* SOL_NETLINK */
2984     default:
2985     unimplemented:
2986         qemu_log_mask(LOG_UNIMP,
2987                       "getsockopt level=%d optname=%d not yet supported\n",
2988                       level, optname);
2989         ret = -TARGET_EOPNOTSUPP;
2990         break;
2991     }
2992     return ret;
2993 }
2994 
2995 /* Convert target low/high pair representing file offset into the host
2996  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2997  * as the kernel doesn't handle them either.
2998  */
2999 static void target_to_host_low_high(abi_ulong tlow,
3000                                     abi_ulong thigh,
3001                                     unsigned long *hlow,
3002                                     unsigned long *hhigh)
3003 {
3004     uint64_t off = tlow |
3005         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3006         TARGET_LONG_BITS / 2;
3007 
3008     *hlow = off;
3009     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3010 }
3011 
3012 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3013                                 abi_ulong count, int copy)
3014 {
3015     struct target_iovec *target_vec;
3016     struct iovec *vec;
3017     abi_ulong total_len, max_len;
3018     int i;
3019     int err = 0;
3020     bool bad_address = false;
3021 
3022     if (count == 0) {
3023         errno = 0;
3024         return NULL;
3025     }
3026     if (count > IOV_MAX) {
3027         errno = EINVAL;
3028         return NULL;
3029     }
3030 
3031     vec = g_try_new0(struct iovec, count);
3032     if (vec == NULL) {
3033         errno = ENOMEM;
3034         return NULL;
3035     }
3036 
3037     target_vec = lock_user(VERIFY_READ, target_addr,
3038                            count * sizeof(struct target_iovec), 1);
3039     if (target_vec == NULL) {
3040         err = EFAULT;
3041         goto fail2;
3042     }
3043 
3044     /* ??? If host page size > target page size, this will result in a
3045        value larger than what we can actually support.  */
3046     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3047     total_len = 0;
3048 
3049     for (i = 0; i < count; i++) {
3050         abi_ulong base = tswapal(target_vec[i].iov_base);
3051         abi_long len = tswapal(target_vec[i].iov_len);
3052 
3053         if (len < 0) {
3054             err = EINVAL;
3055             goto fail;
3056         } else if (len == 0) {
3057             /* Zero length pointer is ignored.  */
3058             vec[i].iov_base = 0;
3059         } else {
3060             vec[i].iov_base = lock_user(type, base, len, copy);
3061             /* If the first buffer pointer is bad, this is a fault.  But
3062              * subsequent bad buffers will result in a partial write; this
3063              * is realized by filling the vector with null pointers and
3064              * zero lengths. */
3065             if (!vec[i].iov_base) {
3066                 if (i == 0) {
3067                     err = EFAULT;
3068                     goto fail;
3069                 } else {
3070                     bad_address = true;
3071                 }
3072             }
3073             if (bad_address) {
3074                 len = 0;
3075             }
3076             if (len > max_len - total_len) {
3077                 len = max_len - total_len;
3078             }
3079         }
3080         vec[i].iov_len = len;
3081         total_len += len;
3082     }
3083 
3084     unlock_user(target_vec, target_addr, 0);
3085     return vec;
3086 
3087  fail:
3088     while (--i >= 0) {
3089         if (tswapal(target_vec[i].iov_len) > 0) {
3090             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3091         }
3092     }
3093     unlock_user(target_vec, target_addr, 0);
3094  fail2:
3095     g_free(vec);
3096     errno = err;
3097     return NULL;
3098 }
3099 
3100 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3101                          abi_ulong count, int copy)
3102 {
3103     struct target_iovec *target_vec;
3104     int i;
3105 
3106     target_vec = lock_user(VERIFY_READ, target_addr,
3107                            count * sizeof(struct target_iovec), 1);
3108     if (target_vec) {
3109         for (i = 0; i < count; i++) {
3110             abi_ulong base = tswapal(target_vec[i].iov_base);
3111             abi_long len = tswapal(target_vec[i].iov_len);
3112             if (len < 0) {
3113                 break;
3114             }
3115             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3116         }
3117         unlock_user(target_vec, target_addr, 0);
3118     }
3119 
3120     g_free(vec);
3121 }
3122 
3123 static inline int target_to_host_sock_type(int *type)
3124 {
3125     int host_type = 0;
3126     int target_type = *type;
3127 
3128     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3129     case TARGET_SOCK_DGRAM:
3130         host_type = SOCK_DGRAM;
3131         break;
3132     case TARGET_SOCK_STREAM:
3133         host_type = SOCK_STREAM;
3134         break;
3135     default:
3136         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3137         break;
3138     }
3139     if (target_type & TARGET_SOCK_CLOEXEC) {
3140 #if defined(SOCK_CLOEXEC)
3141         host_type |= SOCK_CLOEXEC;
3142 #else
3143         return -TARGET_EINVAL;
3144 #endif
3145     }
3146     if (target_type & TARGET_SOCK_NONBLOCK) {
3147 #if defined(SOCK_NONBLOCK)
3148         host_type |= SOCK_NONBLOCK;
3149 #elif !defined(O_NONBLOCK)
3150         return -TARGET_EINVAL;
3151 #endif
3152     }
3153     *type = host_type;
3154     return 0;
3155 }
3156 
3157 /* Try to emulate socket type flags after socket creation.  */
3158 static int sock_flags_fixup(int fd, int target_type)
3159 {
3160 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3161     if (target_type & TARGET_SOCK_NONBLOCK) {
3162         int flags = fcntl(fd, F_GETFL);
3163         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3164             close(fd);
3165             return -TARGET_EINVAL;
3166         }
3167     }
3168 #endif
3169     return fd;
3170 }
3171 
3172 /* do_socket() Must return target values and target errnos. */
3173 static abi_long do_socket(int domain, int type, int protocol)
3174 {
3175     int target_type = type;
3176     int ret;
3177 
3178     ret = target_to_host_sock_type(&type);
3179     if (ret) {
3180         return ret;
3181     }
3182 
3183     if (domain == PF_NETLINK && !(
3184 #ifdef CONFIG_RTNETLINK
3185          protocol == NETLINK_ROUTE ||
3186 #endif
3187          protocol == NETLINK_KOBJECT_UEVENT ||
3188          protocol == NETLINK_AUDIT)) {
3189         return -TARGET_EPROTONOSUPPORT;
3190     }
3191 
3192     if (domain == AF_PACKET ||
3193         (domain == AF_INET && type == SOCK_PACKET)) {
3194         protocol = tswap16(protocol);
3195     }
3196 
3197     ret = get_errno(socket(domain, type, protocol));
3198     if (ret >= 0) {
3199         ret = sock_flags_fixup(ret, target_type);
3200         if (type == SOCK_PACKET) {
3201             /* Manage an obsolete case :
3202              * if socket type is SOCK_PACKET, bind by name
3203              */
3204             fd_trans_register(ret, &target_packet_trans);
3205         } else if (domain == PF_NETLINK) {
3206             switch (protocol) {
3207 #ifdef CONFIG_RTNETLINK
3208             case NETLINK_ROUTE:
3209                 fd_trans_register(ret, &target_netlink_route_trans);
3210                 break;
3211 #endif
3212             case NETLINK_KOBJECT_UEVENT:
3213                 /* nothing to do: messages are strings */
3214                 break;
3215             case NETLINK_AUDIT:
3216                 fd_trans_register(ret, &target_netlink_audit_trans);
3217                 break;
3218             default:
3219                 g_assert_not_reached();
3220             }
3221         }
3222     }
3223     return ret;
3224 }
3225 
3226 /* do_bind() Must return target values and target errnos. */
3227 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3228                         socklen_t addrlen)
3229 {
3230     void *addr;
3231     abi_long ret;
3232 
3233     if ((int)addrlen < 0) {
3234         return -TARGET_EINVAL;
3235     }
3236 
3237     addr = alloca(addrlen+1);
3238 
3239     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3240     if (ret)
3241         return ret;
3242 
3243     return get_errno(bind(sockfd, addr, addrlen));
3244 }
3245 
3246 /* do_connect() Must return target values and target errnos. */
3247 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3248                            socklen_t addrlen)
3249 {
3250     void *addr;
3251     abi_long ret;
3252 
3253     if ((int)addrlen < 0) {
3254         return -TARGET_EINVAL;
3255     }
3256 
3257     addr = alloca(addrlen+1);
3258 
3259     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3260     if (ret)
3261         return ret;
3262 
3263     return get_errno(safe_connect(sockfd, addr, addrlen));
3264 }
3265 
3266 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3267 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3268                                       int flags, int send)
3269 {
3270     abi_long ret, len;
3271     struct msghdr msg;
3272     abi_ulong count;
3273     struct iovec *vec;
3274     abi_ulong target_vec;
3275 
3276     if (msgp->msg_name) {
3277         msg.msg_namelen = tswap32(msgp->msg_namelen);
3278         msg.msg_name = alloca(msg.msg_namelen+1);
3279         ret = target_to_host_sockaddr(fd, msg.msg_name,
3280                                       tswapal(msgp->msg_name),
3281                                       msg.msg_namelen);
3282         if (ret == -TARGET_EFAULT) {
3283             /* For connected sockets msg_name and msg_namelen must
3284              * be ignored, so returning EFAULT immediately is wrong.
3285              * Instead, pass a bad msg_name to the host kernel, and
3286              * let it decide whether to return EFAULT or not.
3287              */
3288             msg.msg_name = (void *)-1;
3289         } else if (ret) {
3290             goto out2;
3291         }
3292     } else {
3293         msg.msg_name = NULL;
3294         msg.msg_namelen = 0;
3295     }
3296     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3297     msg.msg_control = alloca(msg.msg_controllen);
3298     memset(msg.msg_control, 0, msg.msg_controllen);
3299 
3300     msg.msg_flags = tswap32(msgp->msg_flags);
3301 
3302     count = tswapal(msgp->msg_iovlen);
3303     target_vec = tswapal(msgp->msg_iov);
3304 
3305     if (count > IOV_MAX) {
3306         /* sendrcvmsg returns a different errno for this condition than
3307          * readv/writev, so we must catch it here before lock_iovec() does.
3308          */
3309         ret = -TARGET_EMSGSIZE;
3310         goto out2;
3311     }
3312 
3313     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3314                      target_vec, count, send);
3315     if (vec == NULL) {
3316         ret = -host_to_target_errno(errno);
3317         goto out2;
3318     }
3319     msg.msg_iovlen = count;
3320     msg.msg_iov = vec;
3321 
3322     if (send) {
3323         if (fd_trans_target_to_host_data(fd)) {
3324             void *host_msg;
3325 
3326             host_msg = g_malloc(msg.msg_iov->iov_len);
3327             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3328             ret = fd_trans_target_to_host_data(fd)(host_msg,
3329                                                    msg.msg_iov->iov_len);
3330             if (ret >= 0) {
3331                 msg.msg_iov->iov_base = host_msg;
3332                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3333             }
3334             g_free(host_msg);
3335         } else {
3336             ret = target_to_host_cmsg(&msg, msgp);
3337             if (ret == 0) {
3338                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3339             }
3340         }
3341     } else {
3342         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3343         if (!is_error(ret)) {
3344             len = ret;
3345             if (fd_trans_host_to_target_data(fd)) {
3346                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3347                                                MIN(msg.msg_iov->iov_len, len));
3348             } else {
3349                 ret = host_to_target_cmsg(msgp, &msg);
3350             }
3351             if (!is_error(ret)) {
3352                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3353                 msgp->msg_flags = tswap32(msg.msg_flags);
3354                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3355                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3356                                     msg.msg_name, msg.msg_namelen);
3357                     if (ret) {
3358                         goto out;
3359                     }
3360                 }
3361 
3362                 ret = len;
3363             }
3364         }
3365     }
3366 
3367 out:
3368     unlock_iovec(vec, target_vec, count, !send);
3369 out2:
3370     return ret;
3371 }
3372 
3373 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3374                                int flags, int send)
3375 {
3376     abi_long ret;
3377     struct target_msghdr *msgp;
3378 
3379     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3380                           msgp,
3381                           target_msg,
3382                           send ? 1 : 0)) {
3383         return -TARGET_EFAULT;
3384     }
3385     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3386     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3387     return ret;
3388 }
3389 
3390 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3391  * so it might not have this *mmsg-specific flag either.
3392  */
3393 #ifndef MSG_WAITFORONE
3394 #define MSG_WAITFORONE 0x10000
3395 #endif
3396 
3397 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3398                                 unsigned int vlen, unsigned int flags,
3399                                 int send)
3400 {
3401     struct target_mmsghdr *mmsgp;
3402     abi_long ret = 0;
3403     int i;
3404 
3405     if (vlen > UIO_MAXIOV) {
3406         vlen = UIO_MAXIOV;
3407     }
3408 
3409     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3410     if (!mmsgp) {
3411         return -TARGET_EFAULT;
3412     }
3413 
3414     for (i = 0; i < vlen; i++) {
3415         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3416         if (is_error(ret)) {
3417             break;
3418         }
3419         mmsgp[i].msg_len = tswap32(ret);
3420         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3421         if (flags & MSG_WAITFORONE) {
3422             flags |= MSG_DONTWAIT;
3423         }
3424     }
3425 
3426     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3427 
3428     /* Return number of datagrams sent if we sent any at all;
3429      * otherwise return the error.
3430      */
3431     if (i) {
3432         return i;
3433     }
3434     return ret;
3435 }
3436 
3437 /* do_accept4() Must return target values and target errnos. */
3438 static abi_long do_accept4(int fd, abi_ulong target_addr,
3439                            abi_ulong target_addrlen_addr, int flags)
3440 {
3441     socklen_t addrlen, ret_addrlen;
3442     void *addr;
3443     abi_long ret;
3444     int host_flags;
3445 
3446     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3447 
3448     if (target_addr == 0) {
3449         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3450     }
3451 
3452     /* linux returns EFAULT if addrlen pointer is invalid */
3453     if (get_user_u32(addrlen, target_addrlen_addr))
3454         return -TARGET_EFAULT;
3455 
3456     if ((int)addrlen < 0) {
3457         return -TARGET_EINVAL;
3458     }
3459 
3460     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3461         return -TARGET_EFAULT;
3462     }
3463 
3464     addr = alloca(addrlen);
3465 
3466     ret_addrlen = addrlen;
3467     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3468     if (!is_error(ret)) {
3469         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3470         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3471             ret = -TARGET_EFAULT;
3472         }
3473     }
3474     return ret;
3475 }
3476 
3477 /* do_getpeername() Must return target values and target errnos. */
3478 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3479                                abi_ulong target_addrlen_addr)
3480 {
3481     socklen_t addrlen, ret_addrlen;
3482     void *addr;
3483     abi_long ret;
3484 
3485     if (get_user_u32(addrlen, target_addrlen_addr))
3486         return -TARGET_EFAULT;
3487 
3488     if ((int)addrlen < 0) {
3489         return -TARGET_EINVAL;
3490     }
3491 
3492     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3493         return -TARGET_EFAULT;
3494     }
3495 
3496     addr = alloca(addrlen);
3497 
3498     ret_addrlen = addrlen;
3499     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3500     if (!is_error(ret)) {
3501         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3502         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3503             ret = -TARGET_EFAULT;
3504         }
3505     }
3506     return ret;
3507 }
3508 
3509 /* do_getsockname() Must return target values and target errnos. */
3510 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3511                                abi_ulong target_addrlen_addr)
3512 {
3513     socklen_t addrlen, ret_addrlen;
3514     void *addr;
3515     abi_long ret;
3516 
3517     if (get_user_u32(addrlen, target_addrlen_addr))
3518         return -TARGET_EFAULT;
3519 
3520     if ((int)addrlen < 0) {
3521         return -TARGET_EINVAL;
3522     }
3523 
3524     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3525         return -TARGET_EFAULT;
3526     }
3527 
3528     addr = alloca(addrlen);
3529 
3530     ret_addrlen = addrlen;
3531     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3532     if (!is_error(ret)) {
3533         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3534         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3535             ret = -TARGET_EFAULT;
3536         }
3537     }
3538     return ret;
3539 }
3540 
3541 /* do_socketpair() Must return target values and target errnos. */
3542 static abi_long do_socketpair(int domain, int type, int protocol,
3543                               abi_ulong target_tab_addr)
3544 {
3545     int tab[2];
3546     abi_long ret;
3547 
3548     target_to_host_sock_type(&type);
3549 
3550     ret = get_errno(socketpair(domain, type, protocol, tab));
3551     if (!is_error(ret)) {
3552         if (put_user_s32(tab[0], target_tab_addr)
3553             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3554             ret = -TARGET_EFAULT;
3555     }
3556     return ret;
3557 }
3558 
3559 /* do_sendto() Must return target values and target errnos. */
3560 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3561                           abi_ulong target_addr, socklen_t addrlen)
3562 {
3563     void *addr;
3564     void *host_msg;
3565     void *copy_msg = NULL;
3566     abi_long ret;
3567 
3568     if ((int)addrlen < 0) {
3569         return -TARGET_EINVAL;
3570     }
3571 
3572     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3573     if (!host_msg)
3574         return -TARGET_EFAULT;
3575     if (fd_trans_target_to_host_data(fd)) {
3576         copy_msg = host_msg;
3577         host_msg = g_malloc(len);
3578         memcpy(host_msg, copy_msg, len);
3579         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3580         if (ret < 0) {
3581             goto fail;
3582         }
3583     }
3584     if (target_addr) {
3585         addr = alloca(addrlen+1);
3586         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3587         if (ret) {
3588             goto fail;
3589         }
3590         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3591     } else {
3592         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3593     }
3594 fail:
3595     if (copy_msg) {
3596         g_free(host_msg);
3597         host_msg = copy_msg;
3598     }
3599     unlock_user(host_msg, msg, 0);
3600     return ret;
3601 }
3602 
3603 /* do_recvfrom() Must return target values and target errnos. */
3604 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3605                             abi_ulong target_addr,
3606                             abi_ulong target_addrlen)
3607 {
3608     socklen_t addrlen, ret_addrlen;
3609     void *addr;
3610     void *host_msg;
3611     abi_long ret;
3612 
3613     if (!msg) {
3614         host_msg = NULL;
3615     } else {
3616         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3617         if (!host_msg) {
3618             return -TARGET_EFAULT;
3619         }
3620     }
3621     if (target_addr) {
3622         if (get_user_u32(addrlen, target_addrlen)) {
3623             ret = -TARGET_EFAULT;
3624             goto fail;
3625         }
3626         if ((int)addrlen < 0) {
3627             ret = -TARGET_EINVAL;
3628             goto fail;
3629         }
3630         addr = alloca(addrlen);
3631         ret_addrlen = addrlen;
3632         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3633                                       addr, &ret_addrlen));
3634     } else {
3635         addr = NULL; /* To keep compiler quiet.  */
3636         addrlen = 0; /* To keep compiler quiet.  */
3637         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3638     }
3639     if (!is_error(ret)) {
3640         if (fd_trans_host_to_target_data(fd)) {
3641             abi_long trans;
3642             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3643             if (is_error(trans)) {
3644                 ret = trans;
3645                 goto fail;
3646             }
3647         }
3648         if (target_addr) {
3649             host_to_target_sockaddr(target_addr, addr,
3650                                     MIN(addrlen, ret_addrlen));
3651             if (put_user_u32(ret_addrlen, target_addrlen)) {
3652                 ret = -TARGET_EFAULT;
3653                 goto fail;
3654             }
3655         }
3656         unlock_user(host_msg, msg, len);
3657     } else {
3658 fail:
3659         unlock_user(host_msg, msg, 0);
3660     }
3661     return ret;
3662 }
3663 
3664 #ifdef TARGET_NR_socketcall
3665 /* do_socketcall() must return target values and target errnos. */
3666 static abi_long do_socketcall(int num, abi_ulong vptr)
3667 {
3668     static const unsigned nargs[] = { /* number of arguments per operation */
3669         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3670         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3671         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3672         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3673         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3674         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3675         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3676         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3677         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3678         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3679         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3680         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3681         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3682         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3683         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3684         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3685         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3686         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3687         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3688         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3689     };
3690     abi_long a[6]; /* max 6 args */
3691     unsigned i;
3692 
3693     /* check the range of the first argument num */
3694     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3695     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3696         return -TARGET_EINVAL;
3697     }
3698     /* ensure we have space for args */
3699     if (nargs[num] > ARRAY_SIZE(a)) {
3700         return -TARGET_EINVAL;
3701     }
3702     /* collect the arguments in a[] according to nargs[] */
3703     for (i = 0; i < nargs[num]; ++i) {
3704         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3705             return -TARGET_EFAULT;
3706         }
3707     }
3708     /* now when we have the args, invoke the appropriate underlying function */
3709     switch (num) {
3710     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3711         return do_socket(a[0], a[1], a[2]);
3712     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3713         return do_bind(a[0], a[1], a[2]);
3714     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3715         return do_connect(a[0], a[1], a[2]);
3716     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3717         return get_errno(listen(a[0], a[1]));
3718     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3719         return do_accept4(a[0], a[1], a[2], 0);
3720     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3721         return do_getsockname(a[0], a[1], a[2]);
3722     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3723         return do_getpeername(a[0], a[1], a[2]);
3724     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3725         return do_socketpair(a[0], a[1], a[2], a[3]);
3726     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3727         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3728     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3729         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3730     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3731         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3732     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3733         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3734     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3735         return get_errno(shutdown(a[0], a[1]));
3736     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3737         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3738     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3739         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3740     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3741         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3742     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3743         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3744     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3745         return do_accept4(a[0], a[1], a[2], a[3]);
3746     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3747         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3748     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3749         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3750     default:
3751         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3752         return -TARGET_EINVAL;
3753     }
3754 }
3755 #endif
3756 
3757 #define N_SHM_REGIONS	32
3758 
3759 static struct shm_region {
3760     abi_ulong start;
3761     abi_ulong size;
3762     bool in_use;
3763 } shm_regions[N_SHM_REGIONS];
3764 
3765 #ifndef TARGET_SEMID64_DS
3766 /* asm-generic version of this struct */
3767 struct target_semid64_ds
3768 {
3769   struct target_ipc_perm sem_perm;
3770   abi_ulong sem_otime;
3771 #if TARGET_ABI_BITS == 32
3772   abi_ulong __unused1;
3773 #endif
3774   abi_ulong sem_ctime;
3775 #if TARGET_ABI_BITS == 32
3776   abi_ulong __unused2;
3777 #endif
3778   abi_ulong sem_nsems;
3779   abi_ulong __unused3;
3780   abi_ulong __unused4;
3781 };
3782 #endif
3783 
3784 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3785                                                abi_ulong target_addr)
3786 {
3787     struct target_ipc_perm *target_ip;
3788     struct target_semid64_ds *target_sd;
3789 
3790     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3791         return -TARGET_EFAULT;
3792     target_ip = &(target_sd->sem_perm);
3793     host_ip->__key = tswap32(target_ip->__key);
3794     host_ip->uid = tswap32(target_ip->uid);
3795     host_ip->gid = tswap32(target_ip->gid);
3796     host_ip->cuid = tswap32(target_ip->cuid);
3797     host_ip->cgid = tswap32(target_ip->cgid);
3798 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3799     host_ip->mode = tswap32(target_ip->mode);
3800 #else
3801     host_ip->mode = tswap16(target_ip->mode);
3802 #endif
3803 #if defined(TARGET_PPC)
3804     host_ip->__seq = tswap32(target_ip->__seq);
3805 #else
3806     host_ip->__seq = tswap16(target_ip->__seq);
3807 #endif
3808     unlock_user_struct(target_sd, target_addr, 0);
3809     return 0;
3810 }
3811 
3812 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3813                                                struct ipc_perm *host_ip)
3814 {
3815     struct target_ipc_perm *target_ip;
3816     struct target_semid64_ds *target_sd;
3817 
3818     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3819         return -TARGET_EFAULT;
3820     target_ip = &(target_sd->sem_perm);
3821     target_ip->__key = tswap32(host_ip->__key);
3822     target_ip->uid = tswap32(host_ip->uid);
3823     target_ip->gid = tswap32(host_ip->gid);
3824     target_ip->cuid = tswap32(host_ip->cuid);
3825     target_ip->cgid = tswap32(host_ip->cgid);
3826 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3827     target_ip->mode = tswap32(host_ip->mode);
3828 #else
3829     target_ip->mode = tswap16(host_ip->mode);
3830 #endif
3831 #if defined(TARGET_PPC)
3832     target_ip->__seq = tswap32(host_ip->__seq);
3833 #else
3834     target_ip->__seq = tswap16(host_ip->__seq);
3835 #endif
3836     unlock_user_struct(target_sd, target_addr, 1);
3837     return 0;
3838 }
3839 
3840 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3841                                                abi_ulong target_addr)
3842 {
3843     struct target_semid64_ds *target_sd;
3844 
3845     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3846         return -TARGET_EFAULT;
3847     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3848         return -TARGET_EFAULT;
3849     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3850     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3851     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3852     unlock_user_struct(target_sd, target_addr, 0);
3853     return 0;
3854 }
3855 
3856 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3857                                                struct semid_ds *host_sd)
3858 {
3859     struct target_semid64_ds *target_sd;
3860 
3861     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3862         return -TARGET_EFAULT;
3863     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3864         return -TARGET_EFAULT;
3865     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3866     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3867     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3868     unlock_user_struct(target_sd, target_addr, 1);
3869     return 0;
3870 }
3871 
3872 struct target_seminfo {
3873     int semmap;
3874     int semmni;
3875     int semmns;
3876     int semmnu;
3877     int semmsl;
3878     int semopm;
3879     int semume;
3880     int semusz;
3881     int semvmx;
3882     int semaem;
3883 };
3884 
3885 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3886                                               struct seminfo *host_seminfo)
3887 {
3888     struct target_seminfo *target_seminfo;
3889     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3890         return -TARGET_EFAULT;
3891     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3892     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3893     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3894     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3895     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3896     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3897     __put_user(host_seminfo->semume, &target_seminfo->semume);
3898     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3899     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3900     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3901     unlock_user_struct(target_seminfo, target_addr, 1);
3902     return 0;
3903 }
3904 
3905 union semun {
3906 	int val;
3907 	struct semid_ds *buf;
3908 	unsigned short *array;
3909 	struct seminfo *__buf;
3910 };
3911 
3912 union target_semun {
3913 	int val;
3914 	abi_ulong buf;
3915 	abi_ulong array;
3916 	abi_ulong __buf;
3917 };
3918 
3919 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3920                                                abi_ulong target_addr)
3921 {
3922     int nsems;
3923     unsigned short *array;
3924     union semun semun;
3925     struct semid_ds semid_ds;
3926     int i, ret;
3927 
3928     semun.buf = &semid_ds;
3929 
3930     ret = semctl(semid, 0, IPC_STAT, semun);
3931     if (ret == -1)
3932         return get_errno(ret);
3933 
3934     nsems = semid_ds.sem_nsems;
3935 
3936     *host_array = g_try_new(unsigned short, nsems);
3937     if (!*host_array) {
3938         return -TARGET_ENOMEM;
3939     }
3940     array = lock_user(VERIFY_READ, target_addr,
3941                       nsems*sizeof(unsigned short), 1);
3942     if (!array) {
3943         g_free(*host_array);
3944         return -TARGET_EFAULT;
3945     }
3946 
3947     for(i=0; i<nsems; i++) {
3948         __get_user((*host_array)[i], &array[i]);
3949     }
3950     unlock_user(array, target_addr, 0);
3951 
3952     return 0;
3953 }
3954 
3955 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3956                                                unsigned short **host_array)
3957 {
3958     int nsems;
3959     unsigned short *array;
3960     union semun semun;
3961     struct semid_ds semid_ds;
3962     int i, ret;
3963 
3964     semun.buf = &semid_ds;
3965 
3966     ret = semctl(semid, 0, IPC_STAT, semun);
3967     if (ret == -1)
3968         return get_errno(ret);
3969 
3970     nsems = semid_ds.sem_nsems;
3971 
3972     array = lock_user(VERIFY_WRITE, target_addr,
3973                       nsems*sizeof(unsigned short), 0);
3974     if (!array)
3975         return -TARGET_EFAULT;
3976 
3977     for(i=0; i<nsems; i++) {
3978         __put_user((*host_array)[i], &array[i]);
3979     }
3980     g_free(*host_array);
3981     unlock_user(array, target_addr, 1);
3982 
3983     return 0;
3984 }
3985 
3986 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3987                                  abi_ulong target_arg)
3988 {
3989     union target_semun target_su = { .buf = target_arg };
3990     union semun arg;
3991     struct semid_ds dsarg;
3992     unsigned short *array = NULL;
3993     struct seminfo seminfo;
3994     abi_long ret = -TARGET_EINVAL;
3995     abi_long err;
3996     cmd &= 0xff;
3997 
3998     switch( cmd ) {
3999 	case GETVAL:
4000 	case SETVAL:
4001             /* In 64 bit cross-endian situations, we will erroneously pick up
4002              * the wrong half of the union for the "val" element.  To rectify
4003              * this, the entire 8-byte structure is byteswapped, followed by
4004 	     * a swap of the 4 byte val field. In other cases, the data is
4005 	     * already in proper host byte order. */
4006 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4007 		target_su.buf = tswapal(target_su.buf);
4008 		arg.val = tswap32(target_su.val);
4009 	    } else {
4010 		arg.val = target_su.val;
4011 	    }
4012             ret = get_errno(semctl(semid, semnum, cmd, arg));
4013             break;
4014 	case GETALL:
4015 	case SETALL:
4016             err = target_to_host_semarray(semid, &array, target_su.array);
4017             if (err)
4018                 return err;
4019             arg.array = array;
4020             ret = get_errno(semctl(semid, semnum, cmd, arg));
4021             err = host_to_target_semarray(semid, target_su.array, &array);
4022             if (err)
4023                 return err;
4024             break;
4025 	case IPC_STAT:
4026 	case IPC_SET:
4027 	case SEM_STAT:
4028             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4029             if (err)
4030                 return err;
4031             arg.buf = &dsarg;
4032             ret = get_errno(semctl(semid, semnum, cmd, arg));
4033             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4034             if (err)
4035                 return err;
4036             break;
4037 	case IPC_INFO:
4038 	case SEM_INFO:
4039             arg.__buf = &seminfo;
4040             ret = get_errno(semctl(semid, semnum, cmd, arg));
4041             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4042             if (err)
4043                 return err;
4044             break;
4045 	case IPC_RMID:
4046 	case GETPID:
4047 	case GETNCNT:
4048 	case GETZCNT:
4049             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4050             break;
4051     }
4052 
4053     return ret;
4054 }
4055 
4056 struct target_sembuf {
4057     unsigned short sem_num;
4058     short sem_op;
4059     short sem_flg;
4060 };
4061 
4062 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4063                                              abi_ulong target_addr,
4064                                              unsigned nsops)
4065 {
4066     struct target_sembuf *target_sembuf;
4067     int i;
4068 
4069     target_sembuf = lock_user(VERIFY_READ, target_addr,
4070                               nsops*sizeof(struct target_sembuf), 1);
4071     if (!target_sembuf)
4072         return -TARGET_EFAULT;
4073 
4074     for(i=0; i<nsops; i++) {
4075         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4076         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4077         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4078     }
4079 
4080     unlock_user(target_sembuf, target_addr, 0);
4081 
4082     return 0;
4083 }
4084 
4085 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4086     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4087 
4088 /*
4089  * This macro is required to handle the s390 variants, which passes the
4090  * arguments in a different order than default.
4091  */
4092 #ifdef __s390x__
4093 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4094   (__nsops), (__timeout), (__sops)
4095 #else
4096 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4097   (__nsops), 0, (__sops), (__timeout)
4098 #endif
4099 
4100 static inline abi_long do_semtimedop(int semid,
4101                                      abi_long ptr,
4102                                      unsigned nsops,
4103                                      abi_long timeout, bool time64)
4104 {
4105     struct sembuf *sops;
4106     struct timespec ts, *pts = NULL;
4107     abi_long ret;
4108 
4109     if (timeout) {
4110         pts = &ts;
4111         if (time64) {
4112             if (target_to_host_timespec64(pts, timeout)) {
4113                 return -TARGET_EFAULT;
4114             }
4115         } else {
4116             if (target_to_host_timespec(pts, timeout)) {
4117                 return -TARGET_EFAULT;
4118             }
4119         }
4120     }
4121 
4122     if (nsops > TARGET_SEMOPM) {
4123         return -TARGET_E2BIG;
4124     }
4125 
4126     sops = g_new(struct sembuf, nsops);
4127 
4128     if (target_to_host_sembuf(sops, ptr, nsops)) {
4129         g_free(sops);
4130         return -TARGET_EFAULT;
4131     }
4132 
4133     ret = -TARGET_ENOSYS;
4134 #ifdef __NR_semtimedop
4135     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4136 #endif
4137 #ifdef __NR_ipc
4138     if (ret == -TARGET_ENOSYS) {
4139         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4140                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4141     }
4142 #endif
4143     g_free(sops);
4144     return ret;
4145 }
4146 #endif
4147 
4148 struct target_msqid_ds
4149 {
4150     struct target_ipc_perm msg_perm;
4151     abi_ulong msg_stime;
4152 #if TARGET_ABI_BITS == 32
4153     abi_ulong __unused1;
4154 #endif
4155     abi_ulong msg_rtime;
4156 #if TARGET_ABI_BITS == 32
4157     abi_ulong __unused2;
4158 #endif
4159     abi_ulong msg_ctime;
4160 #if TARGET_ABI_BITS == 32
4161     abi_ulong __unused3;
4162 #endif
4163     abi_ulong __msg_cbytes;
4164     abi_ulong msg_qnum;
4165     abi_ulong msg_qbytes;
4166     abi_ulong msg_lspid;
4167     abi_ulong msg_lrpid;
4168     abi_ulong __unused4;
4169     abi_ulong __unused5;
4170 };
4171 
4172 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4173                                                abi_ulong target_addr)
4174 {
4175     struct target_msqid_ds *target_md;
4176 
4177     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4178         return -TARGET_EFAULT;
4179     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4180         return -TARGET_EFAULT;
4181     host_md->msg_stime = tswapal(target_md->msg_stime);
4182     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4183     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4184     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4185     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4186     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4187     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4188     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4189     unlock_user_struct(target_md, target_addr, 0);
4190     return 0;
4191 }
4192 
4193 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4194                                                struct msqid_ds *host_md)
4195 {
4196     struct target_msqid_ds *target_md;
4197 
4198     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4199         return -TARGET_EFAULT;
4200     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4201         return -TARGET_EFAULT;
4202     target_md->msg_stime = tswapal(host_md->msg_stime);
4203     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4204     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4205     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4206     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4207     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4208     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4209     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4210     unlock_user_struct(target_md, target_addr, 1);
4211     return 0;
4212 }
4213 
4214 struct target_msginfo {
4215     int msgpool;
4216     int msgmap;
4217     int msgmax;
4218     int msgmnb;
4219     int msgmni;
4220     int msgssz;
4221     int msgtql;
4222     unsigned short int msgseg;
4223 };
4224 
4225 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4226                                               struct msginfo *host_msginfo)
4227 {
4228     struct target_msginfo *target_msginfo;
4229     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4230         return -TARGET_EFAULT;
4231     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4232     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4233     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4234     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4235     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4236     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4237     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4238     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4239     unlock_user_struct(target_msginfo, target_addr, 1);
4240     return 0;
4241 }
4242 
4243 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4244 {
4245     struct msqid_ds dsarg;
4246     struct msginfo msginfo;
4247     abi_long ret = -TARGET_EINVAL;
4248 
4249     cmd &= 0xff;
4250 
4251     switch (cmd) {
4252     case IPC_STAT:
4253     case IPC_SET:
4254     case MSG_STAT:
4255         if (target_to_host_msqid_ds(&dsarg,ptr))
4256             return -TARGET_EFAULT;
4257         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4258         if (host_to_target_msqid_ds(ptr,&dsarg))
4259             return -TARGET_EFAULT;
4260         break;
4261     case IPC_RMID:
4262         ret = get_errno(msgctl(msgid, cmd, NULL));
4263         break;
4264     case IPC_INFO:
4265     case MSG_INFO:
4266         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4267         if (host_to_target_msginfo(ptr, &msginfo))
4268             return -TARGET_EFAULT;
4269         break;
4270     }
4271 
4272     return ret;
4273 }
4274 
4275 struct target_msgbuf {
4276     abi_long mtype;
4277     char	mtext[1];
4278 };
4279 
4280 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4281                                  ssize_t msgsz, int msgflg)
4282 {
4283     struct target_msgbuf *target_mb;
4284     struct msgbuf *host_mb;
4285     abi_long ret = 0;
4286 
4287     if (msgsz < 0) {
4288         return -TARGET_EINVAL;
4289     }
4290 
4291     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4292         return -TARGET_EFAULT;
4293     host_mb = g_try_malloc(msgsz + sizeof(long));
4294     if (!host_mb) {
4295         unlock_user_struct(target_mb, msgp, 0);
4296         return -TARGET_ENOMEM;
4297     }
4298     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4299     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4300     ret = -TARGET_ENOSYS;
4301 #ifdef __NR_msgsnd
4302     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4303 #endif
4304 #ifdef __NR_ipc
4305     if (ret == -TARGET_ENOSYS) {
4306 #ifdef __s390x__
4307         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4308                                  host_mb));
4309 #else
4310         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4311                                  host_mb, 0));
4312 #endif
4313     }
4314 #endif
4315     g_free(host_mb);
4316     unlock_user_struct(target_mb, msgp, 0);
4317 
4318     return ret;
4319 }
4320 
4321 #ifdef __NR_ipc
4322 #if defined(__sparc__)
4323 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4324 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4325 #elif defined(__s390x__)
4326 /* The s390 sys_ipc variant has only five parameters.  */
4327 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4328     ((long int[]){(long int)__msgp, __msgtyp})
4329 #else
4330 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4331     ((long int[]){(long int)__msgp, __msgtyp}), 0
4332 #endif
4333 #endif
4334 
4335 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4336                                  ssize_t msgsz, abi_long msgtyp,
4337                                  int msgflg)
4338 {
4339     struct target_msgbuf *target_mb;
4340     char *target_mtext;
4341     struct msgbuf *host_mb;
4342     abi_long ret = 0;
4343 
4344     if (msgsz < 0) {
4345         return -TARGET_EINVAL;
4346     }
4347 
4348     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4349         return -TARGET_EFAULT;
4350 
4351     host_mb = g_try_malloc(msgsz + sizeof(long));
4352     if (!host_mb) {
4353         ret = -TARGET_ENOMEM;
4354         goto end;
4355     }
4356     ret = -TARGET_ENOSYS;
4357 #ifdef __NR_msgrcv
4358     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4359 #endif
4360 #ifdef __NR_ipc
4361     if (ret == -TARGET_ENOSYS) {
4362         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4363                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4364     }
4365 #endif
4366 
4367     if (ret > 0) {
4368         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4369         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4370         if (!target_mtext) {
4371             ret = -TARGET_EFAULT;
4372             goto end;
4373         }
4374         memcpy(target_mb->mtext, host_mb->mtext, ret);
4375         unlock_user(target_mtext, target_mtext_addr, ret);
4376     }
4377 
4378     target_mb->mtype = tswapal(host_mb->mtype);
4379 
4380 end:
4381     if (target_mb)
4382         unlock_user_struct(target_mb, msgp, 1);
4383     g_free(host_mb);
4384     return ret;
4385 }
4386 
4387 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4388                                                abi_ulong target_addr)
4389 {
4390     struct target_shmid_ds *target_sd;
4391 
4392     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4393         return -TARGET_EFAULT;
4394     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4395         return -TARGET_EFAULT;
4396     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4397     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4398     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4399     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4400     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4401     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4402     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4403     unlock_user_struct(target_sd, target_addr, 0);
4404     return 0;
4405 }
4406 
4407 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4408                                                struct shmid_ds *host_sd)
4409 {
4410     struct target_shmid_ds *target_sd;
4411 
4412     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4413         return -TARGET_EFAULT;
4414     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4415         return -TARGET_EFAULT;
4416     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4417     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4418     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4419     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4420     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4421     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4422     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4423     unlock_user_struct(target_sd, target_addr, 1);
4424     return 0;
4425 }
4426 
4427 struct  target_shminfo {
4428     abi_ulong shmmax;
4429     abi_ulong shmmin;
4430     abi_ulong shmmni;
4431     abi_ulong shmseg;
4432     abi_ulong shmall;
4433 };
4434 
4435 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4436                                               struct shminfo *host_shminfo)
4437 {
4438     struct target_shminfo *target_shminfo;
4439     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4440         return -TARGET_EFAULT;
4441     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4442     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4443     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4444     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4445     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4446     unlock_user_struct(target_shminfo, target_addr, 1);
4447     return 0;
4448 }
4449 
4450 struct target_shm_info {
4451     int used_ids;
4452     abi_ulong shm_tot;
4453     abi_ulong shm_rss;
4454     abi_ulong shm_swp;
4455     abi_ulong swap_attempts;
4456     abi_ulong swap_successes;
4457 };
4458 
4459 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4460                                                struct shm_info *host_shm_info)
4461 {
4462     struct target_shm_info *target_shm_info;
4463     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4464         return -TARGET_EFAULT;
4465     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4466     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4467     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4468     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4469     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4470     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4471     unlock_user_struct(target_shm_info, target_addr, 1);
4472     return 0;
4473 }
4474 
4475 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4476 {
4477     struct shmid_ds dsarg;
4478     struct shminfo shminfo;
4479     struct shm_info shm_info;
4480     abi_long ret = -TARGET_EINVAL;
4481 
4482     cmd &= 0xff;
4483 
4484     switch(cmd) {
4485     case IPC_STAT:
4486     case IPC_SET:
4487     case SHM_STAT:
4488         if (target_to_host_shmid_ds(&dsarg, buf))
4489             return -TARGET_EFAULT;
4490         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4491         if (host_to_target_shmid_ds(buf, &dsarg))
4492             return -TARGET_EFAULT;
4493         break;
4494     case IPC_INFO:
4495         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4496         if (host_to_target_shminfo(buf, &shminfo))
4497             return -TARGET_EFAULT;
4498         break;
4499     case SHM_INFO:
4500         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4501         if (host_to_target_shm_info(buf, &shm_info))
4502             return -TARGET_EFAULT;
4503         break;
4504     case IPC_RMID:
4505     case SHM_LOCK:
4506     case SHM_UNLOCK:
4507         ret = get_errno(shmctl(shmid, cmd, NULL));
4508         break;
4509     }
4510 
4511     return ret;
4512 }
4513 
4514 #ifndef TARGET_FORCE_SHMLBA
4515 /* For most architectures, SHMLBA is the same as the page size;
4516  * some architectures have larger values, in which case they should
4517  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4518  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4519  * and defining its own value for SHMLBA.
4520  *
4521  * The kernel also permits SHMLBA to be set by the architecture to a
4522  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4523  * this means that addresses are rounded to the large size if
4524  * SHM_RND is set but addresses not aligned to that size are not rejected
4525  * as long as they are at least page-aligned. Since the only architecture
4526  * which uses this is ia64 this code doesn't provide for that oddity.
4527  */
4528 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4529 {
4530     return TARGET_PAGE_SIZE;
4531 }
4532 #endif
4533 
4534 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4535                                  int shmid, abi_ulong shmaddr, int shmflg)
4536 {
4537     CPUState *cpu = env_cpu(cpu_env);
4538     abi_long raddr;
4539     void *host_raddr;
4540     struct shmid_ds shm_info;
4541     int i,ret;
4542     abi_ulong shmlba;
4543 
4544     /* shmat pointers are always untagged */
4545 
4546     /* find out the length of the shared memory segment */
4547     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4548     if (is_error(ret)) {
4549         /* can't get length, bail out */
4550         return ret;
4551     }
4552 
4553     shmlba = target_shmlba(cpu_env);
4554 
4555     if (shmaddr & (shmlba - 1)) {
4556         if (shmflg & SHM_RND) {
4557             shmaddr &= ~(shmlba - 1);
4558         } else {
4559             return -TARGET_EINVAL;
4560         }
4561     }
4562     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4563         return -TARGET_EINVAL;
4564     }
4565 
4566     mmap_lock();
4567 
4568     /*
4569      * We're mapping shared memory, so ensure we generate code for parallel
4570      * execution and flush old translations.  This will work up to the level
4571      * supported by the host -- anything that requires EXCP_ATOMIC will not
4572      * be atomic with respect to an external process.
4573      */
4574     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4575         cpu->tcg_cflags |= CF_PARALLEL;
4576         tb_flush(cpu);
4577     }
4578 
4579     if (shmaddr)
4580         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4581     else {
4582         abi_ulong mmap_start;
4583 
4584         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4585         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4586 
4587         if (mmap_start == -1) {
4588             errno = ENOMEM;
4589             host_raddr = (void *)-1;
4590         } else
4591             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4592                                shmflg | SHM_REMAP);
4593     }
4594 
4595     if (host_raddr == (void *)-1) {
4596         mmap_unlock();
4597         return get_errno((long)host_raddr);
4598     }
4599     raddr=h2g((unsigned long)host_raddr);
4600 
4601     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4602                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4603                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4604 
4605     for (i = 0; i < N_SHM_REGIONS; i++) {
4606         if (!shm_regions[i].in_use) {
4607             shm_regions[i].in_use = true;
4608             shm_regions[i].start = raddr;
4609             shm_regions[i].size = shm_info.shm_segsz;
4610             break;
4611         }
4612     }
4613 
4614     mmap_unlock();
4615     return raddr;
4616 
4617 }
4618 
4619 static inline abi_long do_shmdt(abi_ulong shmaddr)
4620 {
4621     int i;
4622     abi_long rv;
4623 
4624     /* shmdt pointers are always untagged */
4625 
4626     mmap_lock();
4627 
4628     for (i = 0; i < N_SHM_REGIONS; ++i) {
4629         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4630             shm_regions[i].in_use = false;
4631             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4632             break;
4633         }
4634     }
4635     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4636 
4637     mmap_unlock();
4638 
4639     return rv;
4640 }
4641 
4642 #ifdef TARGET_NR_ipc
4643 /* ??? This only works with linear mappings.  */
4644 /* do_ipc() must return target values and target errnos. */
4645 static abi_long do_ipc(CPUArchState *cpu_env,
4646                        unsigned int call, abi_long first,
4647                        abi_long second, abi_long third,
4648                        abi_long ptr, abi_long fifth)
4649 {
4650     int version;
4651     abi_long ret = 0;
4652 
4653     version = call >> 16;
4654     call &= 0xffff;
4655 
4656     switch (call) {
4657     case IPCOP_semop:
4658         ret = do_semtimedop(first, ptr, second, 0, false);
4659         break;
4660     case IPCOP_semtimedop:
4661     /*
4662      * The s390 sys_ipc variant has only five parameters instead of six
4663      * (as for default variant) and the only difference is the handling of
4664      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4665      * to a struct timespec where the generic variant uses fifth parameter.
4666      */
4667 #if defined(TARGET_S390X)
4668         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4669 #else
4670         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4671 #endif
4672         break;
4673 
4674     case IPCOP_semget:
4675         ret = get_errno(semget(first, second, third));
4676         break;
4677 
4678     case IPCOP_semctl: {
4679         /* The semun argument to semctl is passed by value, so dereference the
4680          * ptr argument. */
4681         abi_ulong atptr;
4682         get_user_ual(atptr, ptr);
4683         ret = do_semctl(first, second, third, atptr);
4684         break;
4685     }
4686 
4687     case IPCOP_msgget:
4688         ret = get_errno(msgget(first, second));
4689         break;
4690 
4691     case IPCOP_msgsnd:
4692         ret = do_msgsnd(first, ptr, second, third);
4693         break;
4694 
4695     case IPCOP_msgctl:
4696         ret = do_msgctl(first, second, ptr);
4697         break;
4698 
4699     case IPCOP_msgrcv:
4700         switch (version) {
4701         case 0:
4702             {
4703                 struct target_ipc_kludge {
4704                     abi_long msgp;
4705                     abi_long msgtyp;
4706                 } *tmp;
4707 
4708                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4709                     ret = -TARGET_EFAULT;
4710                     break;
4711                 }
4712 
4713                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4714 
4715                 unlock_user_struct(tmp, ptr, 0);
4716                 break;
4717             }
4718         default:
4719             ret = do_msgrcv(first, ptr, second, fifth, third);
4720         }
4721         break;
4722 
4723     case IPCOP_shmat:
4724         switch (version) {
4725         default:
4726         {
4727             abi_ulong raddr;
4728             raddr = do_shmat(cpu_env, first, ptr, second);
4729             if (is_error(raddr))
4730                 return get_errno(raddr);
4731             if (put_user_ual(raddr, third))
4732                 return -TARGET_EFAULT;
4733             break;
4734         }
4735         case 1:
4736             ret = -TARGET_EINVAL;
4737             break;
4738         }
4739 	break;
4740     case IPCOP_shmdt:
4741         ret = do_shmdt(ptr);
4742 	break;
4743 
4744     case IPCOP_shmget:
4745 	/* IPC_* flag values are the same on all linux platforms */
4746 	ret = get_errno(shmget(first, second, third));
4747 	break;
4748 
4749 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4750     case IPCOP_shmctl:
4751         ret = do_shmctl(first, second, ptr);
4752         break;
4753     default:
4754         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4755                       call, version);
4756 	ret = -TARGET_ENOSYS;
4757 	break;
4758     }
4759     return ret;
4760 }
4761 #endif
4762 
4763 /* kernel structure types definitions */
4764 
4765 #define STRUCT(name, ...) STRUCT_ ## name,
4766 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4767 enum {
4768 #include "syscall_types.h"
4769 STRUCT_MAX
4770 };
4771 #undef STRUCT
4772 #undef STRUCT_SPECIAL
4773 
4774 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4775 #define STRUCT_SPECIAL(name)
4776 #include "syscall_types.h"
4777 #undef STRUCT
4778 #undef STRUCT_SPECIAL
4779 
4780 #define MAX_STRUCT_SIZE 4096
4781 
4782 #ifdef CONFIG_FIEMAP
4783 /* So fiemap access checks don't overflow on 32 bit systems.
4784  * This is very slightly smaller than the limit imposed by
4785  * the underlying kernel.
4786  */
4787 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4788                             / sizeof(struct fiemap_extent))
4789 
4790 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4791                                        int fd, int cmd, abi_long arg)
4792 {
4793     /* The parameter for this ioctl is a struct fiemap followed
4794      * by an array of struct fiemap_extent whose size is set
4795      * in fiemap->fm_extent_count. The array is filled in by the
4796      * ioctl.
4797      */
4798     int target_size_in, target_size_out;
4799     struct fiemap *fm;
4800     const argtype *arg_type = ie->arg_type;
4801     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4802     void *argptr, *p;
4803     abi_long ret;
4804     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4805     uint32_t outbufsz;
4806     int free_fm = 0;
4807 
4808     assert(arg_type[0] == TYPE_PTR);
4809     assert(ie->access == IOC_RW);
4810     arg_type++;
4811     target_size_in = thunk_type_size(arg_type, 0);
4812     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4813     if (!argptr) {
4814         return -TARGET_EFAULT;
4815     }
4816     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4817     unlock_user(argptr, arg, 0);
4818     fm = (struct fiemap *)buf_temp;
4819     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4820         return -TARGET_EINVAL;
4821     }
4822 
4823     outbufsz = sizeof (*fm) +
4824         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4825 
4826     if (outbufsz > MAX_STRUCT_SIZE) {
4827         /* We can't fit all the extents into the fixed size buffer.
4828          * Allocate one that is large enough and use it instead.
4829          */
4830         fm = g_try_malloc(outbufsz);
4831         if (!fm) {
4832             return -TARGET_ENOMEM;
4833         }
4834         memcpy(fm, buf_temp, sizeof(struct fiemap));
4835         free_fm = 1;
4836     }
4837     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4838     if (!is_error(ret)) {
4839         target_size_out = target_size_in;
4840         /* An extent_count of 0 means we were only counting the extents
4841          * so there are no structs to copy
4842          */
4843         if (fm->fm_extent_count != 0) {
4844             target_size_out += fm->fm_mapped_extents * extent_size;
4845         }
4846         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4847         if (!argptr) {
4848             ret = -TARGET_EFAULT;
4849         } else {
4850             /* Convert the struct fiemap */
4851             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4852             if (fm->fm_extent_count != 0) {
4853                 p = argptr + target_size_in;
4854                 /* ...and then all the struct fiemap_extents */
4855                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4856                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4857                                   THUNK_TARGET);
4858                     p += extent_size;
4859                 }
4860             }
4861             unlock_user(argptr, arg, target_size_out);
4862         }
4863     }
4864     if (free_fm) {
4865         g_free(fm);
4866     }
4867     return ret;
4868 }
4869 #endif
4870 
4871 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4872                                 int fd, int cmd, abi_long arg)
4873 {
4874     const argtype *arg_type = ie->arg_type;
4875     int target_size;
4876     void *argptr;
4877     int ret;
4878     struct ifconf *host_ifconf;
4879     uint32_t outbufsz;
4880     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4881     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4882     int target_ifreq_size;
4883     int nb_ifreq;
4884     int free_buf = 0;
4885     int i;
4886     int target_ifc_len;
4887     abi_long target_ifc_buf;
4888     int host_ifc_len;
4889     char *host_ifc_buf;
4890 
4891     assert(arg_type[0] == TYPE_PTR);
4892     assert(ie->access == IOC_RW);
4893 
4894     arg_type++;
4895     target_size = thunk_type_size(arg_type, 0);
4896 
4897     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4898     if (!argptr)
4899         return -TARGET_EFAULT;
4900     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4901     unlock_user(argptr, arg, 0);
4902 
4903     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4904     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4905     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4906 
4907     if (target_ifc_buf != 0) {
4908         target_ifc_len = host_ifconf->ifc_len;
4909         nb_ifreq = target_ifc_len / target_ifreq_size;
4910         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4911 
4912         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4913         if (outbufsz > MAX_STRUCT_SIZE) {
4914             /*
4915              * We can't fit all the extents into the fixed size buffer.
4916              * Allocate one that is large enough and use it instead.
4917              */
4918             host_ifconf = g_try_malloc(outbufsz);
4919             if (!host_ifconf) {
4920                 return -TARGET_ENOMEM;
4921             }
4922             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4923             free_buf = 1;
4924         }
4925         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4926 
4927         host_ifconf->ifc_len = host_ifc_len;
4928     } else {
4929       host_ifc_buf = NULL;
4930     }
4931     host_ifconf->ifc_buf = host_ifc_buf;
4932 
4933     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4934     if (!is_error(ret)) {
4935 	/* convert host ifc_len to target ifc_len */
4936 
4937         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4938         target_ifc_len = nb_ifreq * target_ifreq_size;
4939         host_ifconf->ifc_len = target_ifc_len;
4940 
4941 	/* restore target ifc_buf */
4942 
4943         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4944 
4945 	/* copy struct ifconf to target user */
4946 
4947         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4948         if (!argptr)
4949             return -TARGET_EFAULT;
4950         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4951         unlock_user(argptr, arg, target_size);
4952 
4953         if (target_ifc_buf != 0) {
4954             /* copy ifreq[] to target user */
4955             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4956             for (i = 0; i < nb_ifreq ; i++) {
4957                 thunk_convert(argptr + i * target_ifreq_size,
4958                               host_ifc_buf + i * sizeof(struct ifreq),
4959                               ifreq_arg_type, THUNK_TARGET);
4960             }
4961             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4962         }
4963     }
4964 
4965     if (free_buf) {
4966         g_free(host_ifconf);
4967     }
4968 
4969     return ret;
4970 }
4971 
4972 #if defined(CONFIG_USBFS)
4973 #if HOST_LONG_BITS > 64
4974 #error USBDEVFS thunks do not support >64 bit hosts yet.
4975 #endif
4976 struct live_urb {
4977     uint64_t target_urb_adr;
4978     uint64_t target_buf_adr;
4979     char *target_buf_ptr;
4980     struct usbdevfs_urb host_urb;
4981 };
4982 
4983 static GHashTable *usbdevfs_urb_hashtable(void)
4984 {
4985     static GHashTable *urb_hashtable;
4986 
4987     if (!urb_hashtable) {
4988         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4989     }
4990     return urb_hashtable;
4991 }
4992 
4993 static void urb_hashtable_insert(struct live_urb *urb)
4994 {
4995     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4996     g_hash_table_insert(urb_hashtable, urb, urb);
4997 }
4998 
4999 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5000 {
5001     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5002     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5003 }
5004 
5005 static void urb_hashtable_remove(struct live_urb *urb)
5006 {
5007     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5008     g_hash_table_remove(urb_hashtable, urb);
5009 }
5010 
5011 static abi_long
5012 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5013                           int fd, int cmd, abi_long arg)
5014 {
5015     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5016     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5017     struct live_urb *lurb;
5018     void *argptr;
5019     uint64_t hurb;
5020     int target_size;
5021     uintptr_t target_urb_adr;
5022     abi_long ret;
5023 
5024     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5025 
5026     memset(buf_temp, 0, sizeof(uint64_t));
5027     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5028     if (is_error(ret)) {
5029         return ret;
5030     }
5031 
5032     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5033     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5034     if (!lurb->target_urb_adr) {
5035         return -TARGET_EFAULT;
5036     }
5037     urb_hashtable_remove(lurb);
5038     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5039         lurb->host_urb.buffer_length);
5040     lurb->target_buf_ptr = NULL;
5041 
5042     /* restore the guest buffer pointer */
5043     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5044 
5045     /* update the guest urb struct */
5046     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5047     if (!argptr) {
5048         g_free(lurb);
5049         return -TARGET_EFAULT;
5050     }
5051     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5052     unlock_user(argptr, lurb->target_urb_adr, target_size);
5053 
5054     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5055     /* write back the urb handle */
5056     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5057     if (!argptr) {
5058         g_free(lurb);
5059         return -TARGET_EFAULT;
5060     }
5061 
5062     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5063     target_urb_adr = lurb->target_urb_adr;
5064     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5065     unlock_user(argptr, arg, target_size);
5066 
5067     g_free(lurb);
5068     return ret;
5069 }
5070 
5071 static abi_long
5072 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5073                              uint8_t *buf_temp __attribute__((unused)),
5074                              int fd, int cmd, abi_long arg)
5075 {
5076     struct live_urb *lurb;
5077 
5078     /* map target address back to host URB with metadata. */
5079     lurb = urb_hashtable_lookup(arg);
5080     if (!lurb) {
5081         return -TARGET_EFAULT;
5082     }
5083     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5084 }
5085 
5086 static abi_long
5087 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5088                             int fd, int cmd, abi_long arg)
5089 {
5090     const argtype *arg_type = ie->arg_type;
5091     int target_size;
5092     abi_long ret;
5093     void *argptr;
5094     int rw_dir;
5095     struct live_urb *lurb;
5096 
5097     /*
5098      * each submitted URB needs to map to a unique ID for the
5099      * kernel, and that unique ID needs to be a pointer to
5100      * host memory.  hence, we need to malloc for each URB.
5101      * isochronous transfers have a variable length struct.
5102      */
5103     arg_type++;
5104     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5105 
5106     /* construct host copy of urb and metadata */
5107     lurb = g_try_malloc0(sizeof(struct live_urb));
5108     if (!lurb) {
5109         return -TARGET_ENOMEM;
5110     }
5111 
5112     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5113     if (!argptr) {
5114         g_free(lurb);
5115         return -TARGET_EFAULT;
5116     }
5117     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5118     unlock_user(argptr, arg, 0);
5119 
5120     lurb->target_urb_adr = arg;
5121     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5122 
5123     /* buffer space used depends on endpoint type so lock the entire buffer */
5124     /* control type urbs should check the buffer contents for true direction */
5125     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5126     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5127         lurb->host_urb.buffer_length, 1);
5128     if (lurb->target_buf_ptr == NULL) {
5129         g_free(lurb);
5130         return -TARGET_EFAULT;
5131     }
5132 
5133     /* update buffer pointer in host copy */
5134     lurb->host_urb.buffer = lurb->target_buf_ptr;
5135 
5136     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5137     if (is_error(ret)) {
5138         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5139         g_free(lurb);
5140     } else {
5141         urb_hashtable_insert(lurb);
5142     }
5143 
5144     return ret;
5145 }
5146 #endif /* CONFIG_USBFS */
5147 
5148 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5149                             int cmd, abi_long arg)
5150 {
5151     void *argptr;
5152     struct dm_ioctl *host_dm;
5153     abi_long guest_data;
5154     uint32_t guest_data_size;
5155     int target_size;
5156     const argtype *arg_type = ie->arg_type;
5157     abi_long ret;
5158     void *big_buf = NULL;
5159     char *host_data;
5160 
5161     arg_type++;
5162     target_size = thunk_type_size(arg_type, 0);
5163     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5164     if (!argptr) {
5165         ret = -TARGET_EFAULT;
5166         goto out;
5167     }
5168     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5169     unlock_user(argptr, arg, 0);
5170 
5171     /* buf_temp is too small, so fetch things into a bigger buffer */
5172     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5173     memcpy(big_buf, buf_temp, target_size);
5174     buf_temp = big_buf;
5175     host_dm = big_buf;
5176 
5177     guest_data = arg + host_dm->data_start;
5178     if ((guest_data - arg) < 0) {
5179         ret = -TARGET_EINVAL;
5180         goto out;
5181     }
5182     guest_data_size = host_dm->data_size - host_dm->data_start;
5183     host_data = (char*)host_dm + host_dm->data_start;
5184 
5185     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5186     if (!argptr) {
5187         ret = -TARGET_EFAULT;
5188         goto out;
5189     }
5190 
5191     switch (ie->host_cmd) {
5192     case DM_REMOVE_ALL:
5193     case DM_LIST_DEVICES:
5194     case DM_DEV_CREATE:
5195     case DM_DEV_REMOVE:
5196     case DM_DEV_SUSPEND:
5197     case DM_DEV_STATUS:
5198     case DM_DEV_WAIT:
5199     case DM_TABLE_STATUS:
5200     case DM_TABLE_CLEAR:
5201     case DM_TABLE_DEPS:
5202     case DM_LIST_VERSIONS:
5203         /* no input data */
5204         break;
5205     case DM_DEV_RENAME:
5206     case DM_DEV_SET_GEOMETRY:
5207         /* data contains only strings */
5208         memcpy(host_data, argptr, guest_data_size);
5209         break;
5210     case DM_TARGET_MSG:
5211         memcpy(host_data, argptr, guest_data_size);
5212         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5213         break;
5214     case DM_TABLE_LOAD:
5215     {
5216         void *gspec = argptr;
5217         void *cur_data = host_data;
5218         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5219         int spec_size = thunk_type_size(arg_type, 0);
5220         int i;
5221 
5222         for (i = 0; i < host_dm->target_count; i++) {
5223             struct dm_target_spec *spec = cur_data;
5224             uint32_t next;
5225             int slen;
5226 
5227             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5228             slen = strlen((char*)gspec + spec_size) + 1;
5229             next = spec->next;
5230             spec->next = sizeof(*spec) + slen;
5231             strcpy((char*)&spec[1], gspec + spec_size);
5232             gspec += next;
5233             cur_data += spec->next;
5234         }
5235         break;
5236     }
5237     default:
5238         ret = -TARGET_EINVAL;
5239         unlock_user(argptr, guest_data, 0);
5240         goto out;
5241     }
5242     unlock_user(argptr, guest_data, 0);
5243 
5244     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5245     if (!is_error(ret)) {
5246         guest_data = arg + host_dm->data_start;
5247         guest_data_size = host_dm->data_size - host_dm->data_start;
5248         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5249         switch (ie->host_cmd) {
5250         case DM_REMOVE_ALL:
5251         case DM_DEV_CREATE:
5252         case DM_DEV_REMOVE:
5253         case DM_DEV_RENAME:
5254         case DM_DEV_SUSPEND:
5255         case DM_DEV_STATUS:
5256         case DM_TABLE_LOAD:
5257         case DM_TABLE_CLEAR:
5258         case DM_TARGET_MSG:
5259         case DM_DEV_SET_GEOMETRY:
5260             /* no return data */
5261             break;
5262         case DM_LIST_DEVICES:
5263         {
5264             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5265             uint32_t remaining_data = guest_data_size;
5266             void *cur_data = argptr;
5267             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5268             int nl_size = 12; /* can't use thunk_size due to alignment */
5269 
5270             while (1) {
5271                 uint32_t next = nl->next;
5272                 if (next) {
5273                     nl->next = nl_size + (strlen(nl->name) + 1);
5274                 }
5275                 if (remaining_data < nl->next) {
5276                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5277                     break;
5278                 }
5279                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5280                 strcpy(cur_data + nl_size, nl->name);
5281                 cur_data += nl->next;
5282                 remaining_data -= nl->next;
5283                 if (!next) {
5284                     break;
5285                 }
5286                 nl = (void*)nl + next;
5287             }
5288             break;
5289         }
5290         case DM_DEV_WAIT:
5291         case DM_TABLE_STATUS:
5292         {
5293             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5294             void *cur_data = argptr;
5295             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5296             int spec_size = thunk_type_size(arg_type, 0);
5297             int i;
5298 
5299             for (i = 0; i < host_dm->target_count; i++) {
5300                 uint32_t next = spec->next;
5301                 int slen = strlen((char*)&spec[1]) + 1;
5302                 spec->next = (cur_data - argptr) + spec_size + slen;
5303                 if (guest_data_size < spec->next) {
5304                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5305                     break;
5306                 }
5307                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5308                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5309                 cur_data = argptr + spec->next;
5310                 spec = (void*)host_dm + host_dm->data_start + next;
5311             }
5312             break;
5313         }
5314         case DM_TABLE_DEPS:
5315         {
5316             void *hdata = (void*)host_dm + host_dm->data_start;
5317             int count = *(uint32_t*)hdata;
5318             uint64_t *hdev = hdata + 8;
5319             uint64_t *gdev = argptr + 8;
5320             int i;
5321 
5322             *(uint32_t*)argptr = tswap32(count);
5323             for (i = 0; i < count; i++) {
5324                 *gdev = tswap64(*hdev);
5325                 gdev++;
5326                 hdev++;
5327             }
5328             break;
5329         }
5330         case DM_LIST_VERSIONS:
5331         {
5332             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5333             uint32_t remaining_data = guest_data_size;
5334             void *cur_data = argptr;
5335             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5336             int vers_size = thunk_type_size(arg_type, 0);
5337 
5338             while (1) {
5339                 uint32_t next = vers->next;
5340                 if (next) {
5341                     vers->next = vers_size + (strlen(vers->name) + 1);
5342                 }
5343                 if (remaining_data < vers->next) {
5344                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5345                     break;
5346                 }
5347                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5348                 strcpy(cur_data + vers_size, vers->name);
5349                 cur_data += vers->next;
5350                 remaining_data -= vers->next;
5351                 if (!next) {
5352                     break;
5353                 }
5354                 vers = (void*)vers + next;
5355             }
5356             break;
5357         }
5358         default:
5359             unlock_user(argptr, guest_data, 0);
5360             ret = -TARGET_EINVAL;
5361             goto out;
5362         }
5363         unlock_user(argptr, guest_data, guest_data_size);
5364 
5365         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5366         if (!argptr) {
5367             ret = -TARGET_EFAULT;
5368             goto out;
5369         }
5370         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5371         unlock_user(argptr, arg, target_size);
5372     }
5373 out:
5374     g_free(big_buf);
5375     return ret;
5376 }
5377 
5378 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5379                                int cmd, abi_long arg)
5380 {
5381     void *argptr;
5382     int target_size;
5383     const argtype *arg_type = ie->arg_type;
5384     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5385     abi_long ret;
5386 
5387     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5388     struct blkpg_partition host_part;
5389 
5390     /* Read and convert blkpg */
5391     arg_type++;
5392     target_size = thunk_type_size(arg_type, 0);
5393     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5394     if (!argptr) {
5395         ret = -TARGET_EFAULT;
5396         goto out;
5397     }
5398     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5399     unlock_user(argptr, arg, 0);
5400 
5401     switch (host_blkpg->op) {
5402     case BLKPG_ADD_PARTITION:
5403     case BLKPG_DEL_PARTITION:
5404         /* payload is struct blkpg_partition */
5405         break;
5406     default:
5407         /* Unknown opcode */
5408         ret = -TARGET_EINVAL;
5409         goto out;
5410     }
5411 
5412     /* Read and convert blkpg->data */
5413     arg = (abi_long)(uintptr_t)host_blkpg->data;
5414     target_size = thunk_type_size(part_arg_type, 0);
5415     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5416     if (!argptr) {
5417         ret = -TARGET_EFAULT;
5418         goto out;
5419     }
5420     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5421     unlock_user(argptr, arg, 0);
5422 
5423     /* Swizzle the data pointer to our local copy and call! */
5424     host_blkpg->data = &host_part;
5425     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5426 
5427 out:
5428     return ret;
5429 }
5430 
5431 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5432                                 int fd, int cmd, abi_long arg)
5433 {
5434     const argtype *arg_type = ie->arg_type;
5435     const StructEntry *se;
5436     const argtype *field_types;
5437     const int *dst_offsets, *src_offsets;
5438     int target_size;
5439     void *argptr;
5440     abi_ulong *target_rt_dev_ptr = NULL;
5441     unsigned long *host_rt_dev_ptr = NULL;
5442     abi_long ret;
5443     int i;
5444 
5445     assert(ie->access == IOC_W);
5446     assert(*arg_type == TYPE_PTR);
5447     arg_type++;
5448     assert(*arg_type == TYPE_STRUCT);
5449     target_size = thunk_type_size(arg_type, 0);
5450     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5451     if (!argptr) {
5452         return -TARGET_EFAULT;
5453     }
5454     arg_type++;
5455     assert(*arg_type == (int)STRUCT_rtentry);
5456     se = struct_entries + *arg_type++;
5457     assert(se->convert[0] == NULL);
5458     /* convert struct here to be able to catch rt_dev string */
5459     field_types = se->field_types;
5460     dst_offsets = se->field_offsets[THUNK_HOST];
5461     src_offsets = se->field_offsets[THUNK_TARGET];
5462     for (i = 0; i < se->nb_fields; i++) {
5463         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5464             assert(*field_types == TYPE_PTRVOID);
5465             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5466             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5467             if (*target_rt_dev_ptr != 0) {
5468                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5469                                                   tswapal(*target_rt_dev_ptr));
5470                 if (!*host_rt_dev_ptr) {
5471                     unlock_user(argptr, arg, 0);
5472                     return -TARGET_EFAULT;
5473                 }
5474             } else {
5475                 *host_rt_dev_ptr = 0;
5476             }
5477             field_types++;
5478             continue;
5479         }
5480         field_types = thunk_convert(buf_temp + dst_offsets[i],
5481                                     argptr + src_offsets[i],
5482                                     field_types, THUNK_HOST);
5483     }
5484     unlock_user(argptr, arg, 0);
5485 
5486     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5487 
5488     assert(host_rt_dev_ptr != NULL);
5489     assert(target_rt_dev_ptr != NULL);
5490     if (*host_rt_dev_ptr != 0) {
5491         unlock_user((void *)*host_rt_dev_ptr,
5492                     *target_rt_dev_ptr, 0);
5493     }
5494     return ret;
5495 }
5496 
5497 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5498                                      int fd, int cmd, abi_long arg)
5499 {
5500     int sig = target_to_host_signal(arg);
5501     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5502 }
5503 
5504 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5505                                     int fd, int cmd, abi_long arg)
5506 {
5507     struct timeval tv;
5508     abi_long ret;
5509 
5510     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5511     if (is_error(ret)) {
5512         return ret;
5513     }
5514 
5515     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5516         if (copy_to_user_timeval(arg, &tv)) {
5517             return -TARGET_EFAULT;
5518         }
5519     } else {
5520         if (copy_to_user_timeval64(arg, &tv)) {
5521             return -TARGET_EFAULT;
5522         }
5523     }
5524 
5525     return ret;
5526 }
5527 
5528 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5529                                       int fd, int cmd, abi_long arg)
5530 {
5531     struct timespec ts;
5532     abi_long ret;
5533 
5534     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5535     if (is_error(ret)) {
5536         return ret;
5537     }
5538 
5539     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5540         if (host_to_target_timespec(arg, &ts)) {
5541             return -TARGET_EFAULT;
5542         }
5543     } else{
5544         if (host_to_target_timespec64(arg, &ts)) {
5545             return -TARGET_EFAULT;
5546         }
5547     }
5548 
5549     return ret;
5550 }
5551 
5552 #ifdef TIOCGPTPEER
5553 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5554                                      int fd, int cmd, abi_long arg)
5555 {
5556     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5557     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5558 }
5559 #endif
5560 
5561 #ifdef HAVE_DRM_H
5562 
5563 static void unlock_drm_version(struct drm_version *host_ver,
5564                                struct target_drm_version *target_ver,
5565                                bool copy)
5566 {
5567     unlock_user(host_ver->name, target_ver->name,
5568                                 copy ? host_ver->name_len : 0);
5569     unlock_user(host_ver->date, target_ver->date,
5570                                 copy ? host_ver->date_len : 0);
5571     unlock_user(host_ver->desc, target_ver->desc,
5572                                 copy ? host_ver->desc_len : 0);
5573 }
5574 
5575 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5576                                           struct target_drm_version *target_ver)
5577 {
5578     memset(host_ver, 0, sizeof(*host_ver));
5579 
5580     __get_user(host_ver->name_len, &target_ver->name_len);
5581     if (host_ver->name_len) {
5582         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5583                                    target_ver->name_len, 0);
5584         if (!host_ver->name) {
5585             return -EFAULT;
5586         }
5587     }
5588 
5589     __get_user(host_ver->date_len, &target_ver->date_len);
5590     if (host_ver->date_len) {
5591         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5592                                    target_ver->date_len, 0);
5593         if (!host_ver->date) {
5594             goto err;
5595         }
5596     }
5597 
5598     __get_user(host_ver->desc_len, &target_ver->desc_len);
5599     if (host_ver->desc_len) {
5600         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5601                                    target_ver->desc_len, 0);
5602         if (!host_ver->desc) {
5603             goto err;
5604         }
5605     }
5606 
5607     return 0;
5608 err:
5609     unlock_drm_version(host_ver, target_ver, false);
5610     return -EFAULT;
5611 }
5612 
5613 static inline void host_to_target_drmversion(
5614                                           struct target_drm_version *target_ver,
5615                                           struct drm_version *host_ver)
5616 {
5617     __put_user(host_ver->version_major, &target_ver->version_major);
5618     __put_user(host_ver->version_minor, &target_ver->version_minor);
5619     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5620     __put_user(host_ver->name_len, &target_ver->name_len);
5621     __put_user(host_ver->date_len, &target_ver->date_len);
5622     __put_user(host_ver->desc_len, &target_ver->desc_len);
5623     unlock_drm_version(host_ver, target_ver, true);
5624 }
5625 
5626 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5627                              int fd, int cmd, abi_long arg)
5628 {
5629     struct drm_version *ver;
5630     struct target_drm_version *target_ver;
5631     abi_long ret;
5632 
5633     switch (ie->host_cmd) {
5634     case DRM_IOCTL_VERSION:
5635         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5636             return -TARGET_EFAULT;
5637         }
5638         ver = (struct drm_version *)buf_temp;
5639         ret = target_to_host_drmversion(ver, target_ver);
5640         if (!is_error(ret)) {
5641             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5642             if (is_error(ret)) {
5643                 unlock_drm_version(ver, target_ver, false);
5644             } else {
5645                 host_to_target_drmversion(target_ver, ver);
5646             }
5647         }
5648         unlock_user_struct(target_ver, arg, 0);
5649         return ret;
5650     }
5651     return -TARGET_ENOSYS;
5652 }
5653 
5654 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5655                                            struct drm_i915_getparam *gparam,
5656                                            int fd, abi_long arg)
5657 {
5658     abi_long ret;
5659     int value;
5660     struct target_drm_i915_getparam *target_gparam;
5661 
5662     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5663         return -TARGET_EFAULT;
5664     }
5665 
5666     __get_user(gparam->param, &target_gparam->param);
5667     gparam->value = &value;
5668     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5669     put_user_s32(value, target_gparam->value);
5670 
5671     unlock_user_struct(target_gparam, arg, 0);
5672     return ret;
5673 }
5674 
5675 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5676                                   int fd, int cmd, abi_long arg)
5677 {
5678     switch (ie->host_cmd) {
5679     case DRM_IOCTL_I915_GETPARAM:
5680         return do_ioctl_drm_i915_getparam(ie,
5681                                           (struct drm_i915_getparam *)buf_temp,
5682                                           fd, arg);
5683     default:
5684         return -TARGET_ENOSYS;
5685     }
5686 }
5687 
5688 #endif
5689 
5690 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5691                                         int fd, int cmd, abi_long arg)
5692 {
5693     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5694     struct tun_filter *target_filter;
5695     char *target_addr;
5696 
5697     assert(ie->access == IOC_W);
5698 
5699     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5700     if (!target_filter) {
5701         return -TARGET_EFAULT;
5702     }
5703     filter->flags = tswap16(target_filter->flags);
5704     filter->count = tswap16(target_filter->count);
5705     unlock_user(target_filter, arg, 0);
5706 
5707     if (filter->count) {
5708         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5709             MAX_STRUCT_SIZE) {
5710             return -TARGET_EFAULT;
5711         }
5712 
5713         target_addr = lock_user(VERIFY_READ,
5714                                 arg + offsetof(struct tun_filter, addr),
5715                                 filter->count * ETH_ALEN, 1);
5716         if (!target_addr) {
5717             return -TARGET_EFAULT;
5718         }
5719         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5720         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5721     }
5722 
5723     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5724 }
5725 
5726 IOCTLEntry ioctl_entries[] = {
5727 #define IOCTL(cmd, access, ...) \
5728     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5729 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5730     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5731 #define IOCTL_IGNORE(cmd) \
5732     { TARGET_ ## cmd, 0, #cmd },
5733 #include "ioctls.h"
5734     { 0, 0, },
5735 };
5736 
5737 /* ??? Implement proper locking for ioctls.  */
5738 /* do_ioctl() Must return target values and target errnos. */
5739 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5740 {
5741     const IOCTLEntry *ie;
5742     const argtype *arg_type;
5743     abi_long ret;
5744     uint8_t buf_temp[MAX_STRUCT_SIZE];
5745     int target_size;
5746     void *argptr;
5747 
5748     ie = ioctl_entries;
5749     for(;;) {
5750         if (ie->target_cmd == 0) {
5751             qemu_log_mask(
5752                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5753             return -TARGET_ENOSYS;
5754         }
5755         if (ie->target_cmd == cmd)
5756             break;
5757         ie++;
5758     }
5759     arg_type = ie->arg_type;
5760     if (ie->do_ioctl) {
5761         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5762     } else if (!ie->host_cmd) {
5763         /* Some architectures define BSD ioctls in their headers
5764            that are not implemented in Linux.  */
5765         return -TARGET_ENOSYS;
5766     }
5767 
5768     switch(arg_type[0]) {
5769     case TYPE_NULL:
5770         /* no argument */
5771         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5772         break;
5773     case TYPE_PTRVOID:
5774     case TYPE_INT:
5775     case TYPE_LONG:
5776     case TYPE_ULONG:
5777         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5778         break;
5779     case TYPE_PTR:
5780         arg_type++;
5781         target_size = thunk_type_size(arg_type, 0);
5782         switch(ie->access) {
5783         case IOC_R:
5784             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5785             if (!is_error(ret)) {
5786                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5787                 if (!argptr)
5788                     return -TARGET_EFAULT;
5789                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5790                 unlock_user(argptr, arg, target_size);
5791             }
5792             break;
5793         case IOC_W:
5794             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5795             if (!argptr)
5796                 return -TARGET_EFAULT;
5797             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5798             unlock_user(argptr, arg, 0);
5799             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5800             break;
5801         default:
5802         case IOC_RW:
5803             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5804             if (!argptr)
5805                 return -TARGET_EFAULT;
5806             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5807             unlock_user(argptr, arg, 0);
5808             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5809             if (!is_error(ret)) {
5810                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5811                 if (!argptr)
5812                     return -TARGET_EFAULT;
5813                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5814                 unlock_user(argptr, arg, target_size);
5815             }
5816             break;
5817         }
5818         break;
5819     default:
5820         qemu_log_mask(LOG_UNIMP,
5821                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5822                       (long)cmd, arg_type[0]);
5823         ret = -TARGET_ENOSYS;
5824         break;
5825     }
5826     return ret;
5827 }
5828 
5829 static const bitmask_transtbl iflag_tbl[] = {
5830         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5831         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5832         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5833         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5834         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5835         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5836         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5837         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5838         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5839         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5840         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5841         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5842         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5843         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5844         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5845         { 0, 0, 0, 0 }
5846 };
5847 
5848 static const bitmask_transtbl oflag_tbl[] = {
5849 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5850 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5851 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5852 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5853 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5854 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5855 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5856 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5857 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5858 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5859 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5860 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5861 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5862 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5863 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5864 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5865 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5866 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5867 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5868 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5869 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5870 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5871 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5872 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5873 	{ 0, 0, 0, 0 }
5874 };
5875 
5876 static const bitmask_transtbl cflag_tbl[] = {
5877 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5878 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5879 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5880 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5881 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5882 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5883 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5884 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5885 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5886 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5887 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5888 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5889 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5890 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5891 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5892 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5893 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5894 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5895 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5896 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5897 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5898 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5899 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5900 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5901 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5902 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5903 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5904 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5905 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5906 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5907 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5908 	{ 0, 0, 0, 0 }
5909 };
5910 
5911 static const bitmask_transtbl lflag_tbl[] = {
5912   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5913   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5914   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5915   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5916   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5917   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5918   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5919   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5920   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5921   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5922   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5923   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5924   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5925   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5926   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5927   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5928   { 0, 0, 0, 0 }
5929 };
5930 
5931 static void target_to_host_termios (void *dst, const void *src)
5932 {
5933     struct host_termios *host = dst;
5934     const struct target_termios *target = src;
5935 
5936     host->c_iflag =
5937         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5938     host->c_oflag =
5939         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5940     host->c_cflag =
5941         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5942     host->c_lflag =
5943         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5944     host->c_line = target->c_line;
5945 
5946     memset(host->c_cc, 0, sizeof(host->c_cc));
5947     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5948     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5949     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5950     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5951     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5952     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5953     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5954     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5955     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5956     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5957     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5958     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5959     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5960     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5961     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5962     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5963     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5964 }
5965 
5966 static void host_to_target_termios (void *dst, const void *src)
5967 {
5968     struct target_termios *target = dst;
5969     const struct host_termios *host = src;
5970 
5971     target->c_iflag =
5972         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5973     target->c_oflag =
5974         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5975     target->c_cflag =
5976         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5977     target->c_lflag =
5978         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5979     target->c_line = host->c_line;
5980 
5981     memset(target->c_cc, 0, sizeof(target->c_cc));
5982     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5983     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5984     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5985     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5986     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5987     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5988     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5989     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5990     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5991     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5992     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5993     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5994     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5995     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5996     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5997     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5998     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5999 }
6000 
6001 static const StructEntry struct_termios_def = {
6002     .convert = { host_to_target_termios, target_to_host_termios },
6003     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6004     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6005     .print = print_termios,
6006 };
6007 
6008 static const bitmask_transtbl mmap_flags_tbl[] = {
6009     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6010     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6011     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6012     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6013       MAP_ANONYMOUS, MAP_ANONYMOUS },
6014     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6015       MAP_GROWSDOWN, MAP_GROWSDOWN },
6016     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6017       MAP_DENYWRITE, MAP_DENYWRITE },
6018     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6019       MAP_EXECUTABLE, MAP_EXECUTABLE },
6020     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6021     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6022       MAP_NORESERVE, MAP_NORESERVE },
6023     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6024     /* MAP_STACK had been ignored by the kernel for quite some time.
6025        Recognize it for the target insofar as we do not want to pass
6026        it through to the host.  */
6027     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6028     { 0, 0, 0, 0 }
6029 };
6030 
6031 /*
6032  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6033  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6034  */
6035 #if defined(TARGET_I386)
6036 
6037 /* NOTE: there is really one LDT for all the threads */
6038 static uint8_t *ldt_table;
6039 
6040 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6041 {
6042     int size;
6043     void *p;
6044 
6045     if (!ldt_table)
6046         return 0;
6047     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6048     if (size > bytecount)
6049         size = bytecount;
6050     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6051     if (!p)
6052         return -TARGET_EFAULT;
6053     /* ??? Should this by byteswapped?  */
6054     memcpy(p, ldt_table, size);
6055     unlock_user(p, ptr, size);
6056     return size;
6057 }
6058 
6059 /* XXX: add locking support */
6060 static abi_long write_ldt(CPUX86State *env,
6061                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6062 {
6063     struct target_modify_ldt_ldt_s ldt_info;
6064     struct target_modify_ldt_ldt_s *target_ldt_info;
6065     int seg_32bit, contents, read_exec_only, limit_in_pages;
6066     int seg_not_present, useable, lm;
6067     uint32_t *lp, entry_1, entry_2;
6068 
6069     if (bytecount != sizeof(ldt_info))
6070         return -TARGET_EINVAL;
6071     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6072         return -TARGET_EFAULT;
6073     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6074     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6075     ldt_info.limit = tswap32(target_ldt_info->limit);
6076     ldt_info.flags = tswap32(target_ldt_info->flags);
6077     unlock_user_struct(target_ldt_info, ptr, 0);
6078 
6079     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6080         return -TARGET_EINVAL;
6081     seg_32bit = ldt_info.flags & 1;
6082     contents = (ldt_info.flags >> 1) & 3;
6083     read_exec_only = (ldt_info.flags >> 3) & 1;
6084     limit_in_pages = (ldt_info.flags >> 4) & 1;
6085     seg_not_present = (ldt_info.flags >> 5) & 1;
6086     useable = (ldt_info.flags >> 6) & 1;
6087 #ifdef TARGET_ABI32
6088     lm = 0;
6089 #else
6090     lm = (ldt_info.flags >> 7) & 1;
6091 #endif
6092     if (contents == 3) {
6093         if (oldmode)
6094             return -TARGET_EINVAL;
6095         if (seg_not_present == 0)
6096             return -TARGET_EINVAL;
6097     }
6098     /* allocate the LDT */
6099     if (!ldt_table) {
6100         env->ldt.base = target_mmap(0,
6101                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6102                                     PROT_READ|PROT_WRITE,
6103                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6104         if (env->ldt.base == -1)
6105             return -TARGET_ENOMEM;
6106         memset(g2h_untagged(env->ldt.base), 0,
6107                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6108         env->ldt.limit = 0xffff;
6109         ldt_table = g2h_untagged(env->ldt.base);
6110     }
6111 
6112     /* NOTE: same code as Linux kernel */
6113     /* Allow LDTs to be cleared by the user. */
6114     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6115         if (oldmode ||
6116             (contents == 0		&&
6117              read_exec_only == 1	&&
6118              seg_32bit == 0		&&
6119              limit_in_pages == 0	&&
6120              seg_not_present == 1	&&
6121              useable == 0 )) {
6122             entry_1 = 0;
6123             entry_2 = 0;
6124             goto install;
6125         }
6126     }
6127 
6128     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6129         (ldt_info.limit & 0x0ffff);
6130     entry_2 = (ldt_info.base_addr & 0xff000000) |
6131         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6132         (ldt_info.limit & 0xf0000) |
6133         ((read_exec_only ^ 1) << 9) |
6134         (contents << 10) |
6135         ((seg_not_present ^ 1) << 15) |
6136         (seg_32bit << 22) |
6137         (limit_in_pages << 23) |
6138         (lm << 21) |
6139         0x7000;
6140     if (!oldmode)
6141         entry_2 |= (useable << 20);
6142 
6143     /* Install the new entry ...  */
6144 install:
6145     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6146     lp[0] = tswap32(entry_1);
6147     lp[1] = tswap32(entry_2);
6148     return 0;
6149 }
6150 
6151 /* specific and weird i386 syscalls */
6152 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6153                               unsigned long bytecount)
6154 {
6155     abi_long ret;
6156 
6157     switch (func) {
6158     case 0:
6159         ret = read_ldt(ptr, bytecount);
6160         break;
6161     case 1:
6162         ret = write_ldt(env, ptr, bytecount, 1);
6163         break;
6164     case 0x11:
6165         ret = write_ldt(env, ptr, bytecount, 0);
6166         break;
6167     default:
6168         ret = -TARGET_ENOSYS;
6169         break;
6170     }
6171     return ret;
6172 }
6173 
6174 #if defined(TARGET_ABI32)
6175 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6176 {
6177     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6178     struct target_modify_ldt_ldt_s ldt_info;
6179     struct target_modify_ldt_ldt_s *target_ldt_info;
6180     int seg_32bit, contents, read_exec_only, limit_in_pages;
6181     int seg_not_present, useable, lm;
6182     uint32_t *lp, entry_1, entry_2;
6183     int i;
6184 
6185     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6186     if (!target_ldt_info)
6187         return -TARGET_EFAULT;
6188     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6189     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6190     ldt_info.limit = tswap32(target_ldt_info->limit);
6191     ldt_info.flags = tswap32(target_ldt_info->flags);
6192     if (ldt_info.entry_number == -1) {
6193         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6194             if (gdt_table[i] == 0) {
6195                 ldt_info.entry_number = i;
6196                 target_ldt_info->entry_number = tswap32(i);
6197                 break;
6198             }
6199         }
6200     }
6201     unlock_user_struct(target_ldt_info, ptr, 1);
6202 
6203     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6204         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6205            return -TARGET_EINVAL;
6206     seg_32bit = ldt_info.flags & 1;
6207     contents = (ldt_info.flags >> 1) & 3;
6208     read_exec_only = (ldt_info.flags >> 3) & 1;
6209     limit_in_pages = (ldt_info.flags >> 4) & 1;
6210     seg_not_present = (ldt_info.flags >> 5) & 1;
6211     useable = (ldt_info.flags >> 6) & 1;
6212 #ifdef TARGET_ABI32
6213     lm = 0;
6214 #else
6215     lm = (ldt_info.flags >> 7) & 1;
6216 #endif
6217 
6218     if (contents == 3) {
6219         if (seg_not_present == 0)
6220             return -TARGET_EINVAL;
6221     }
6222 
6223     /* NOTE: same code as Linux kernel */
6224     /* Allow LDTs to be cleared by the user. */
6225     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6226         if ((contents == 0             &&
6227              read_exec_only == 1       &&
6228              seg_32bit == 0            &&
6229              limit_in_pages == 0       &&
6230              seg_not_present == 1      &&
6231              useable == 0 )) {
6232             entry_1 = 0;
6233             entry_2 = 0;
6234             goto install;
6235         }
6236     }
6237 
6238     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6239         (ldt_info.limit & 0x0ffff);
6240     entry_2 = (ldt_info.base_addr & 0xff000000) |
6241         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6242         (ldt_info.limit & 0xf0000) |
6243         ((read_exec_only ^ 1) << 9) |
6244         (contents << 10) |
6245         ((seg_not_present ^ 1) << 15) |
6246         (seg_32bit << 22) |
6247         (limit_in_pages << 23) |
6248         (useable << 20) |
6249         (lm << 21) |
6250         0x7000;
6251 
6252     /* Install the new entry ...  */
6253 install:
6254     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6255     lp[0] = tswap32(entry_1);
6256     lp[1] = tswap32(entry_2);
6257     return 0;
6258 }
6259 
6260 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6261 {
6262     struct target_modify_ldt_ldt_s *target_ldt_info;
6263     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6264     uint32_t base_addr, limit, flags;
6265     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6266     int seg_not_present, useable, lm;
6267     uint32_t *lp, entry_1, entry_2;
6268 
6269     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6270     if (!target_ldt_info)
6271         return -TARGET_EFAULT;
6272     idx = tswap32(target_ldt_info->entry_number);
6273     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6274         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6275         unlock_user_struct(target_ldt_info, ptr, 1);
6276         return -TARGET_EINVAL;
6277     }
6278     lp = (uint32_t *)(gdt_table + idx);
6279     entry_1 = tswap32(lp[0]);
6280     entry_2 = tswap32(lp[1]);
6281 
6282     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6283     contents = (entry_2 >> 10) & 3;
6284     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6285     seg_32bit = (entry_2 >> 22) & 1;
6286     limit_in_pages = (entry_2 >> 23) & 1;
6287     useable = (entry_2 >> 20) & 1;
6288 #ifdef TARGET_ABI32
6289     lm = 0;
6290 #else
6291     lm = (entry_2 >> 21) & 1;
6292 #endif
6293     flags = (seg_32bit << 0) | (contents << 1) |
6294         (read_exec_only << 3) | (limit_in_pages << 4) |
6295         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6296     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6297     base_addr = (entry_1 >> 16) |
6298         (entry_2 & 0xff000000) |
6299         ((entry_2 & 0xff) << 16);
6300     target_ldt_info->base_addr = tswapal(base_addr);
6301     target_ldt_info->limit = tswap32(limit);
6302     target_ldt_info->flags = tswap32(flags);
6303     unlock_user_struct(target_ldt_info, ptr, 1);
6304     return 0;
6305 }
6306 
6307 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6308 {
6309     return -TARGET_ENOSYS;
6310 }
6311 #else
6312 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6313 {
6314     abi_long ret = 0;
6315     abi_ulong val;
6316     int idx;
6317 
6318     switch(code) {
6319     case TARGET_ARCH_SET_GS:
6320     case TARGET_ARCH_SET_FS:
6321         if (code == TARGET_ARCH_SET_GS)
6322             idx = R_GS;
6323         else
6324             idx = R_FS;
6325         cpu_x86_load_seg(env, idx, 0);
6326         env->segs[idx].base = addr;
6327         break;
6328     case TARGET_ARCH_GET_GS:
6329     case TARGET_ARCH_GET_FS:
6330         if (code == TARGET_ARCH_GET_GS)
6331             idx = R_GS;
6332         else
6333             idx = R_FS;
6334         val = env->segs[idx].base;
6335         if (put_user(val, addr, abi_ulong))
6336             ret = -TARGET_EFAULT;
6337         break;
6338     default:
6339         ret = -TARGET_EINVAL;
6340         break;
6341     }
6342     return ret;
6343 }
6344 #endif /* defined(TARGET_ABI32 */
6345 #endif /* defined(TARGET_I386) */
6346 
6347 /*
6348  * These constants are generic.  Supply any that are missing from the host.
6349  */
6350 #ifndef PR_SET_NAME
6351 # define PR_SET_NAME    15
6352 # define PR_GET_NAME    16
6353 #endif
6354 #ifndef PR_SET_FP_MODE
6355 # define PR_SET_FP_MODE 45
6356 # define PR_GET_FP_MODE 46
6357 # define PR_FP_MODE_FR   (1 << 0)
6358 # define PR_FP_MODE_FRE  (1 << 1)
6359 #endif
6360 #ifndef PR_SVE_SET_VL
6361 # define PR_SVE_SET_VL  50
6362 # define PR_SVE_GET_VL  51
6363 # define PR_SVE_VL_LEN_MASK  0xffff
6364 # define PR_SVE_VL_INHERIT   (1 << 17)
6365 #endif
6366 #ifndef PR_PAC_RESET_KEYS
6367 # define PR_PAC_RESET_KEYS  54
6368 # define PR_PAC_APIAKEY   (1 << 0)
6369 # define PR_PAC_APIBKEY   (1 << 1)
6370 # define PR_PAC_APDAKEY   (1 << 2)
6371 # define PR_PAC_APDBKEY   (1 << 3)
6372 # define PR_PAC_APGAKEY   (1 << 4)
6373 #endif
6374 #ifndef PR_SET_TAGGED_ADDR_CTRL
6375 # define PR_SET_TAGGED_ADDR_CTRL 55
6376 # define PR_GET_TAGGED_ADDR_CTRL 56
6377 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6378 #endif
6379 #ifndef PR_MTE_TCF_SHIFT
6380 # define PR_MTE_TCF_SHIFT       1
6381 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6382 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6383 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6384 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6385 # define PR_MTE_TAG_SHIFT       3
6386 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6387 #endif
6388 #ifndef PR_SET_IO_FLUSHER
6389 # define PR_SET_IO_FLUSHER 57
6390 # define PR_GET_IO_FLUSHER 58
6391 #endif
6392 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6393 # define PR_SET_SYSCALL_USER_DISPATCH 59
6394 #endif
6395 
6396 #include "target_prctl.h"
6397 
6398 static abi_long do_prctl_inval0(CPUArchState *env)
6399 {
6400     return -TARGET_EINVAL;
6401 }
6402 
6403 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6404 {
6405     return -TARGET_EINVAL;
6406 }
6407 
6408 #ifndef do_prctl_get_fp_mode
6409 #define do_prctl_get_fp_mode do_prctl_inval0
6410 #endif
6411 #ifndef do_prctl_set_fp_mode
6412 #define do_prctl_set_fp_mode do_prctl_inval1
6413 #endif
6414 #ifndef do_prctl_get_vl
6415 #define do_prctl_get_vl do_prctl_inval0
6416 #endif
6417 #ifndef do_prctl_set_vl
6418 #define do_prctl_set_vl do_prctl_inval1
6419 #endif
6420 #ifndef do_prctl_reset_keys
6421 #define do_prctl_reset_keys do_prctl_inval1
6422 #endif
6423 #ifndef do_prctl_set_tagged_addr_ctrl
6424 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6425 #endif
6426 #ifndef do_prctl_get_tagged_addr_ctrl
6427 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6428 #endif
6429 #ifndef do_prctl_get_unalign
6430 #define do_prctl_get_unalign do_prctl_inval1
6431 #endif
6432 #ifndef do_prctl_set_unalign
6433 #define do_prctl_set_unalign do_prctl_inval1
6434 #endif
6435 
6436 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6437                          abi_long arg3, abi_long arg4, abi_long arg5)
6438 {
6439     abi_long ret;
6440 
6441     switch (option) {
6442     case PR_GET_PDEATHSIG:
6443         {
6444             int deathsig;
6445             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6446                                   arg3, arg4, arg5));
6447             if (!is_error(ret) &&
6448                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6449                 return -TARGET_EFAULT;
6450             }
6451             return ret;
6452         }
6453     case PR_SET_PDEATHSIG:
6454         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6455                                arg3, arg4, arg5));
6456     case PR_GET_NAME:
6457         {
6458             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6459             if (!name) {
6460                 return -TARGET_EFAULT;
6461             }
6462             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6463                                   arg3, arg4, arg5));
6464             unlock_user(name, arg2, 16);
6465             return ret;
6466         }
6467     case PR_SET_NAME:
6468         {
6469             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6470             if (!name) {
6471                 return -TARGET_EFAULT;
6472             }
6473             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6474                                   arg3, arg4, arg5));
6475             unlock_user(name, arg2, 0);
6476             return ret;
6477         }
6478     case PR_GET_FP_MODE:
6479         return do_prctl_get_fp_mode(env);
6480     case PR_SET_FP_MODE:
6481         return do_prctl_set_fp_mode(env, arg2);
6482     case PR_SVE_GET_VL:
6483         return do_prctl_get_vl(env);
6484     case PR_SVE_SET_VL:
6485         return do_prctl_set_vl(env, arg2);
6486     case PR_PAC_RESET_KEYS:
6487         if (arg3 || arg4 || arg5) {
6488             return -TARGET_EINVAL;
6489         }
6490         return do_prctl_reset_keys(env, arg2);
6491     case PR_SET_TAGGED_ADDR_CTRL:
6492         if (arg3 || arg4 || arg5) {
6493             return -TARGET_EINVAL;
6494         }
6495         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6496     case PR_GET_TAGGED_ADDR_CTRL:
6497         if (arg2 || arg3 || arg4 || arg5) {
6498             return -TARGET_EINVAL;
6499         }
6500         return do_prctl_get_tagged_addr_ctrl(env);
6501 
6502     case PR_GET_UNALIGN:
6503         return do_prctl_get_unalign(env, arg2);
6504     case PR_SET_UNALIGN:
6505         return do_prctl_set_unalign(env, arg2);
6506 
6507     case PR_CAP_AMBIENT:
6508     case PR_CAPBSET_READ:
6509     case PR_CAPBSET_DROP:
6510     case PR_GET_DUMPABLE:
6511     case PR_SET_DUMPABLE:
6512     case PR_GET_KEEPCAPS:
6513     case PR_SET_KEEPCAPS:
6514     case PR_GET_SECUREBITS:
6515     case PR_SET_SECUREBITS:
6516     case PR_GET_TIMING:
6517     case PR_SET_TIMING:
6518     case PR_GET_TIMERSLACK:
6519     case PR_SET_TIMERSLACK:
6520     case PR_MCE_KILL:
6521     case PR_MCE_KILL_GET:
6522     case PR_GET_NO_NEW_PRIVS:
6523     case PR_SET_NO_NEW_PRIVS:
6524     case PR_GET_IO_FLUSHER:
6525     case PR_SET_IO_FLUSHER:
6526         /* Some prctl options have no pointer arguments and we can pass on. */
6527         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6528 
6529     case PR_GET_CHILD_SUBREAPER:
6530     case PR_SET_CHILD_SUBREAPER:
6531     case PR_GET_SPECULATION_CTRL:
6532     case PR_SET_SPECULATION_CTRL:
6533     case PR_GET_TID_ADDRESS:
6534         /* TODO */
6535         return -TARGET_EINVAL;
6536 
6537     case PR_GET_FPEXC:
6538     case PR_SET_FPEXC:
6539         /* Was used for SPE on PowerPC. */
6540         return -TARGET_EINVAL;
6541 
6542     case PR_GET_ENDIAN:
6543     case PR_SET_ENDIAN:
6544     case PR_GET_FPEMU:
6545     case PR_SET_FPEMU:
6546     case PR_SET_MM:
6547     case PR_GET_SECCOMP:
6548     case PR_SET_SECCOMP:
6549     case PR_SET_SYSCALL_USER_DISPATCH:
6550     case PR_GET_THP_DISABLE:
6551     case PR_SET_THP_DISABLE:
6552     case PR_GET_TSC:
6553     case PR_SET_TSC:
6554         /* Disable to prevent the target disabling stuff we need. */
6555         return -TARGET_EINVAL;
6556 
6557     default:
6558         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6559                       option);
6560         return -TARGET_EINVAL;
6561     }
6562 }
6563 
6564 #define NEW_STACK_SIZE 0x40000
6565 
6566 
6567 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6568 typedef struct {
6569     CPUArchState *env;
6570     pthread_mutex_t mutex;
6571     pthread_cond_t cond;
6572     pthread_t thread;
6573     uint32_t tid;
6574     abi_ulong child_tidptr;
6575     abi_ulong parent_tidptr;
6576     sigset_t sigmask;
6577 } new_thread_info;
6578 
6579 static void *clone_func(void *arg)
6580 {
6581     new_thread_info *info = arg;
6582     CPUArchState *env;
6583     CPUState *cpu;
6584     TaskState *ts;
6585 
6586     rcu_register_thread();
6587     tcg_register_thread();
6588     env = info->env;
6589     cpu = env_cpu(env);
6590     thread_cpu = cpu;
6591     ts = (TaskState *)cpu->opaque;
6592     info->tid = sys_gettid();
6593     task_settid(ts);
6594     if (info->child_tidptr)
6595         put_user_u32(info->tid, info->child_tidptr);
6596     if (info->parent_tidptr)
6597         put_user_u32(info->tid, info->parent_tidptr);
6598     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6599     /* Enable signals.  */
6600     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6601     /* Signal to the parent that we're ready.  */
6602     pthread_mutex_lock(&info->mutex);
6603     pthread_cond_broadcast(&info->cond);
6604     pthread_mutex_unlock(&info->mutex);
6605     /* Wait until the parent has finished initializing the tls state.  */
6606     pthread_mutex_lock(&clone_lock);
6607     pthread_mutex_unlock(&clone_lock);
6608     cpu_loop(env);
6609     /* never exits */
6610     return NULL;
6611 }
6612 
6613 /* do_fork() Must return host values and target errnos (unlike most
6614    do_*() functions). */
6615 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6616                    abi_ulong parent_tidptr, target_ulong newtls,
6617                    abi_ulong child_tidptr)
6618 {
6619     CPUState *cpu = env_cpu(env);
6620     int ret;
6621     TaskState *ts;
6622     CPUState *new_cpu;
6623     CPUArchState *new_env;
6624     sigset_t sigmask;
6625 
6626     flags &= ~CLONE_IGNORED_FLAGS;
6627 
6628     /* Emulate vfork() with fork() */
6629     if (flags & CLONE_VFORK)
6630         flags &= ~(CLONE_VFORK | CLONE_VM);
6631 
6632     if (flags & CLONE_VM) {
6633         TaskState *parent_ts = (TaskState *)cpu->opaque;
6634         new_thread_info info;
6635         pthread_attr_t attr;
6636 
6637         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6638             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6639             return -TARGET_EINVAL;
6640         }
6641 
6642         ts = g_new0(TaskState, 1);
6643         init_task_state(ts);
6644 
6645         /* Grab a mutex so that thread setup appears atomic.  */
6646         pthread_mutex_lock(&clone_lock);
6647 
6648         /*
6649          * If this is our first additional thread, we need to ensure we
6650          * generate code for parallel execution and flush old translations.
6651          * Do this now so that the copy gets CF_PARALLEL too.
6652          */
6653         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6654             cpu->tcg_cflags |= CF_PARALLEL;
6655             tb_flush(cpu);
6656         }
6657 
6658         /* we create a new CPU instance. */
6659         new_env = cpu_copy(env);
6660         /* Init regs that differ from the parent.  */
6661         cpu_clone_regs_child(new_env, newsp, flags);
6662         cpu_clone_regs_parent(env, flags);
6663         new_cpu = env_cpu(new_env);
6664         new_cpu->opaque = ts;
6665         ts->bprm = parent_ts->bprm;
6666         ts->info = parent_ts->info;
6667         ts->signal_mask = parent_ts->signal_mask;
6668 
6669         if (flags & CLONE_CHILD_CLEARTID) {
6670             ts->child_tidptr = child_tidptr;
6671         }
6672 
6673         if (flags & CLONE_SETTLS) {
6674             cpu_set_tls (new_env, newtls);
6675         }
6676 
6677         memset(&info, 0, sizeof(info));
6678         pthread_mutex_init(&info.mutex, NULL);
6679         pthread_mutex_lock(&info.mutex);
6680         pthread_cond_init(&info.cond, NULL);
6681         info.env = new_env;
6682         if (flags & CLONE_CHILD_SETTID) {
6683             info.child_tidptr = child_tidptr;
6684         }
6685         if (flags & CLONE_PARENT_SETTID) {
6686             info.parent_tidptr = parent_tidptr;
6687         }
6688 
6689         ret = pthread_attr_init(&attr);
6690         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6691         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6692         /* It is not safe to deliver signals until the child has finished
6693            initializing, so temporarily block all signals.  */
6694         sigfillset(&sigmask);
6695         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6696         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6697 
6698         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6699         /* TODO: Free new CPU state if thread creation failed.  */
6700 
6701         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6702         pthread_attr_destroy(&attr);
6703         if (ret == 0) {
6704             /* Wait for the child to initialize.  */
6705             pthread_cond_wait(&info.cond, &info.mutex);
6706             ret = info.tid;
6707         } else {
6708             ret = -1;
6709         }
6710         pthread_mutex_unlock(&info.mutex);
6711         pthread_cond_destroy(&info.cond);
6712         pthread_mutex_destroy(&info.mutex);
6713         pthread_mutex_unlock(&clone_lock);
6714     } else {
6715         /* if no CLONE_VM, we consider it is a fork */
6716         if (flags & CLONE_INVALID_FORK_FLAGS) {
6717             return -TARGET_EINVAL;
6718         }
6719 
6720         /* We can't support custom termination signals */
6721         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6722             return -TARGET_EINVAL;
6723         }
6724 
6725         if (block_signals()) {
6726             return -QEMU_ERESTARTSYS;
6727         }
6728 
6729         fork_start();
6730         ret = fork();
6731         if (ret == 0) {
6732             /* Child Process.  */
6733             cpu_clone_regs_child(env, newsp, flags);
6734             fork_end(1);
6735             /* There is a race condition here.  The parent process could
6736                theoretically read the TID in the child process before the child
6737                tid is set.  This would require using either ptrace
6738                (not implemented) or having *_tidptr to point at a shared memory
6739                mapping.  We can't repeat the spinlock hack used above because
6740                the child process gets its own copy of the lock.  */
6741             if (flags & CLONE_CHILD_SETTID)
6742                 put_user_u32(sys_gettid(), child_tidptr);
6743             if (flags & CLONE_PARENT_SETTID)
6744                 put_user_u32(sys_gettid(), parent_tidptr);
6745             ts = (TaskState *)cpu->opaque;
6746             if (flags & CLONE_SETTLS)
6747                 cpu_set_tls (env, newtls);
6748             if (flags & CLONE_CHILD_CLEARTID)
6749                 ts->child_tidptr = child_tidptr;
6750         } else {
6751             cpu_clone_regs_parent(env, flags);
6752             fork_end(0);
6753         }
6754     }
6755     return ret;
6756 }
6757 
6758 /* warning : doesn't handle linux specific flags... */
6759 static int target_to_host_fcntl_cmd(int cmd)
6760 {
6761     int ret;
6762 
6763     switch(cmd) {
6764     case TARGET_F_DUPFD:
6765     case TARGET_F_GETFD:
6766     case TARGET_F_SETFD:
6767     case TARGET_F_GETFL:
6768     case TARGET_F_SETFL:
6769     case TARGET_F_OFD_GETLK:
6770     case TARGET_F_OFD_SETLK:
6771     case TARGET_F_OFD_SETLKW:
6772         ret = cmd;
6773         break;
6774     case TARGET_F_GETLK:
6775         ret = F_GETLK64;
6776         break;
6777     case TARGET_F_SETLK:
6778         ret = F_SETLK64;
6779         break;
6780     case TARGET_F_SETLKW:
6781         ret = F_SETLKW64;
6782         break;
6783     case TARGET_F_GETOWN:
6784         ret = F_GETOWN;
6785         break;
6786     case TARGET_F_SETOWN:
6787         ret = F_SETOWN;
6788         break;
6789     case TARGET_F_GETSIG:
6790         ret = F_GETSIG;
6791         break;
6792     case TARGET_F_SETSIG:
6793         ret = F_SETSIG;
6794         break;
6795 #if TARGET_ABI_BITS == 32
6796     case TARGET_F_GETLK64:
6797         ret = F_GETLK64;
6798         break;
6799     case TARGET_F_SETLK64:
6800         ret = F_SETLK64;
6801         break;
6802     case TARGET_F_SETLKW64:
6803         ret = F_SETLKW64;
6804         break;
6805 #endif
6806     case TARGET_F_SETLEASE:
6807         ret = F_SETLEASE;
6808         break;
6809     case TARGET_F_GETLEASE:
6810         ret = F_GETLEASE;
6811         break;
6812 #ifdef F_DUPFD_CLOEXEC
6813     case TARGET_F_DUPFD_CLOEXEC:
6814         ret = F_DUPFD_CLOEXEC;
6815         break;
6816 #endif
6817     case TARGET_F_NOTIFY:
6818         ret = F_NOTIFY;
6819         break;
6820 #ifdef F_GETOWN_EX
6821     case TARGET_F_GETOWN_EX:
6822         ret = F_GETOWN_EX;
6823         break;
6824 #endif
6825 #ifdef F_SETOWN_EX
6826     case TARGET_F_SETOWN_EX:
6827         ret = F_SETOWN_EX;
6828         break;
6829 #endif
6830 #ifdef F_SETPIPE_SZ
6831     case TARGET_F_SETPIPE_SZ:
6832         ret = F_SETPIPE_SZ;
6833         break;
6834     case TARGET_F_GETPIPE_SZ:
6835         ret = F_GETPIPE_SZ;
6836         break;
6837 #endif
6838 #ifdef F_ADD_SEALS
6839     case TARGET_F_ADD_SEALS:
6840         ret = F_ADD_SEALS;
6841         break;
6842     case TARGET_F_GET_SEALS:
6843         ret = F_GET_SEALS;
6844         break;
6845 #endif
6846     default:
6847         ret = -TARGET_EINVAL;
6848         break;
6849     }
6850 
6851 #if defined(__powerpc64__)
6852     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6853      * is not supported by kernel. The glibc fcntl call actually adjusts
6854      * them to 5, 6 and 7 before making the syscall(). Since we make the
6855      * syscall directly, adjust to what is supported by the kernel.
6856      */
6857     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6858         ret -= F_GETLK64 - 5;
6859     }
6860 #endif
6861 
6862     return ret;
6863 }
6864 
6865 #define FLOCK_TRANSTBL \
6866     switch (type) { \
6867     TRANSTBL_CONVERT(F_RDLCK); \
6868     TRANSTBL_CONVERT(F_WRLCK); \
6869     TRANSTBL_CONVERT(F_UNLCK); \
6870     }
6871 
6872 static int target_to_host_flock(int type)
6873 {
6874 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6875     FLOCK_TRANSTBL
6876 #undef  TRANSTBL_CONVERT
6877     return -TARGET_EINVAL;
6878 }
6879 
6880 static int host_to_target_flock(int type)
6881 {
6882 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6883     FLOCK_TRANSTBL
6884 #undef  TRANSTBL_CONVERT
6885     /* if we don't know how to convert the value coming
6886      * from the host we copy to the target field as-is
6887      */
6888     return type;
6889 }
6890 
6891 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6892                                             abi_ulong target_flock_addr)
6893 {
6894     struct target_flock *target_fl;
6895     int l_type;
6896 
6897     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6898         return -TARGET_EFAULT;
6899     }
6900 
6901     __get_user(l_type, &target_fl->l_type);
6902     l_type = target_to_host_flock(l_type);
6903     if (l_type < 0) {
6904         return l_type;
6905     }
6906     fl->l_type = l_type;
6907     __get_user(fl->l_whence, &target_fl->l_whence);
6908     __get_user(fl->l_start, &target_fl->l_start);
6909     __get_user(fl->l_len, &target_fl->l_len);
6910     __get_user(fl->l_pid, &target_fl->l_pid);
6911     unlock_user_struct(target_fl, target_flock_addr, 0);
6912     return 0;
6913 }
6914 
6915 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6916                                           const struct flock64 *fl)
6917 {
6918     struct target_flock *target_fl;
6919     short l_type;
6920 
6921     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6922         return -TARGET_EFAULT;
6923     }
6924 
6925     l_type = host_to_target_flock(fl->l_type);
6926     __put_user(l_type, &target_fl->l_type);
6927     __put_user(fl->l_whence, &target_fl->l_whence);
6928     __put_user(fl->l_start, &target_fl->l_start);
6929     __put_user(fl->l_len, &target_fl->l_len);
6930     __put_user(fl->l_pid, &target_fl->l_pid);
6931     unlock_user_struct(target_fl, target_flock_addr, 1);
6932     return 0;
6933 }
6934 
6935 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6936 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6937 
6938 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6939 struct target_oabi_flock64 {
6940     abi_short l_type;
6941     abi_short l_whence;
6942     abi_llong l_start;
6943     abi_llong l_len;
6944     abi_int   l_pid;
6945 } QEMU_PACKED;
6946 
6947 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6948                                                    abi_ulong target_flock_addr)
6949 {
6950     struct target_oabi_flock64 *target_fl;
6951     int l_type;
6952 
6953     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6954         return -TARGET_EFAULT;
6955     }
6956 
6957     __get_user(l_type, &target_fl->l_type);
6958     l_type = target_to_host_flock(l_type);
6959     if (l_type < 0) {
6960         return l_type;
6961     }
6962     fl->l_type = l_type;
6963     __get_user(fl->l_whence, &target_fl->l_whence);
6964     __get_user(fl->l_start, &target_fl->l_start);
6965     __get_user(fl->l_len, &target_fl->l_len);
6966     __get_user(fl->l_pid, &target_fl->l_pid);
6967     unlock_user_struct(target_fl, target_flock_addr, 0);
6968     return 0;
6969 }
6970 
6971 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6972                                                  const struct flock64 *fl)
6973 {
6974     struct target_oabi_flock64 *target_fl;
6975     short l_type;
6976 
6977     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6978         return -TARGET_EFAULT;
6979     }
6980 
6981     l_type = host_to_target_flock(fl->l_type);
6982     __put_user(l_type, &target_fl->l_type);
6983     __put_user(fl->l_whence, &target_fl->l_whence);
6984     __put_user(fl->l_start, &target_fl->l_start);
6985     __put_user(fl->l_len, &target_fl->l_len);
6986     __put_user(fl->l_pid, &target_fl->l_pid);
6987     unlock_user_struct(target_fl, target_flock_addr, 1);
6988     return 0;
6989 }
6990 #endif
6991 
6992 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6993                                               abi_ulong target_flock_addr)
6994 {
6995     struct target_flock64 *target_fl;
6996     int l_type;
6997 
6998     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6999         return -TARGET_EFAULT;
7000     }
7001 
7002     __get_user(l_type, &target_fl->l_type);
7003     l_type = target_to_host_flock(l_type);
7004     if (l_type < 0) {
7005         return l_type;
7006     }
7007     fl->l_type = l_type;
7008     __get_user(fl->l_whence, &target_fl->l_whence);
7009     __get_user(fl->l_start, &target_fl->l_start);
7010     __get_user(fl->l_len, &target_fl->l_len);
7011     __get_user(fl->l_pid, &target_fl->l_pid);
7012     unlock_user_struct(target_fl, target_flock_addr, 0);
7013     return 0;
7014 }
7015 
7016 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7017                                             const struct flock64 *fl)
7018 {
7019     struct target_flock64 *target_fl;
7020     short l_type;
7021 
7022     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7023         return -TARGET_EFAULT;
7024     }
7025 
7026     l_type = host_to_target_flock(fl->l_type);
7027     __put_user(l_type, &target_fl->l_type);
7028     __put_user(fl->l_whence, &target_fl->l_whence);
7029     __put_user(fl->l_start, &target_fl->l_start);
7030     __put_user(fl->l_len, &target_fl->l_len);
7031     __put_user(fl->l_pid, &target_fl->l_pid);
7032     unlock_user_struct(target_fl, target_flock_addr, 1);
7033     return 0;
7034 }
7035 
7036 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7037 {
7038     struct flock64 fl64;
7039 #ifdef F_GETOWN_EX
7040     struct f_owner_ex fox;
7041     struct target_f_owner_ex *target_fox;
7042 #endif
7043     abi_long ret;
7044     int host_cmd = target_to_host_fcntl_cmd(cmd);
7045 
7046     if (host_cmd == -TARGET_EINVAL)
7047 	    return host_cmd;
7048 
7049     switch(cmd) {
7050     case TARGET_F_GETLK:
7051         ret = copy_from_user_flock(&fl64, arg);
7052         if (ret) {
7053             return ret;
7054         }
7055         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7056         if (ret == 0) {
7057             ret = copy_to_user_flock(arg, &fl64);
7058         }
7059         break;
7060 
7061     case TARGET_F_SETLK:
7062     case TARGET_F_SETLKW:
7063         ret = copy_from_user_flock(&fl64, arg);
7064         if (ret) {
7065             return ret;
7066         }
7067         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7068         break;
7069 
7070     case TARGET_F_GETLK64:
7071     case TARGET_F_OFD_GETLK:
7072         ret = copy_from_user_flock64(&fl64, arg);
7073         if (ret) {
7074             return ret;
7075         }
7076         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7077         if (ret == 0) {
7078             ret = copy_to_user_flock64(arg, &fl64);
7079         }
7080         break;
7081     case TARGET_F_SETLK64:
7082     case TARGET_F_SETLKW64:
7083     case TARGET_F_OFD_SETLK:
7084     case TARGET_F_OFD_SETLKW:
7085         ret = copy_from_user_flock64(&fl64, arg);
7086         if (ret) {
7087             return ret;
7088         }
7089         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7090         break;
7091 
7092     case TARGET_F_GETFL:
7093         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7094         if (ret >= 0) {
7095             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7096         }
7097         break;
7098 
7099     case TARGET_F_SETFL:
7100         ret = get_errno(safe_fcntl(fd, host_cmd,
7101                                    target_to_host_bitmask(arg,
7102                                                           fcntl_flags_tbl)));
7103         break;
7104 
7105 #ifdef F_GETOWN_EX
7106     case TARGET_F_GETOWN_EX:
7107         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7108         if (ret >= 0) {
7109             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7110                 return -TARGET_EFAULT;
7111             target_fox->type = tswap32(fox.type);
7112             target_fox->pid = tswap32(fox.pid);
7113             unlock_user_struct(target_fox, arg, 1);
7114         }
7115         break;
7116 #endif
7117 
7118 #ifdef F_SETOWN_EX
7119     case TARGET_F_SETOWN_EX:
7120         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7121             return -TARGET_EFAULT;
7122         fox.type = tswap32(target_fox->type);
7123         fox.pid = tswap32(target_fox->pid);
7124         unlock_user_struct(target_fox, arg, 0);
7125         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7126         break;
7127 #endif
7128 
7129     case TARGET_F_SETSIG:
7130         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7131         break;
7132 
7133     case TARGET_F_GETSIG:
7134         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7135         break;
7136 
7137     case TARGET_F_SETOWN:
7138     case TARGET_F_GETOWN:
7139     case TARGET_F_SETLEASE:
7140     case TARGET_F_GETLEASE:
7141     case TARGET_F_SETPIPE_SZ:
7142     case TARGET_F_GETPIPE_SZ:
7143     case TARGET_F_ADD_SEALS:
7144     case TARGET_F_GET_SEALS:
7145         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7146         break;
7147 
7148     default:
7149         ret = get_errno(safe_fcntl(fd, cmd, arg));
7150         break;
7151     }
7152     return ret;
7153 }
7154 
7155 #ifdef USE_UID16
7156 
7157 static inline int high2lowuid(int uid)
7158 {
7159     if (uid > 65535)
7160         return 65534;
7161     else
7162         return uid;
7163 }
7164 
7165 static inline int high2lowgid(int gid)
7166 {
7167     if (gid > 65535)
7168         return 65534;
7169     else
7170         return gid;
7171 }
7172 
7173 static inline int low2highuid(int uid)
7174 {
7175     if ((int16_t)uid == -1)
7176         return -1;
7177     else
7178         return uid;
7179 }
7180 
7181 static inline int low2highgid(int gid)
7182 {
7183     if ((int16_t)gid == -1)
7184         return -1;
7185     else
7186         return gid;
7187 }
7188 static inline int tswapid(int id)
7189 {
7190     return tswap16(id);
7191 }
7192 
7193 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7194 
7195 #else /* !USE_UID16 */
7196 static inline int high2lowuid(int uid)
7197 {
7198     return uid;
7199 }
7200 static inline int high2lowgid(int gid)
7201 {
7202     return gid;
7203 }
7204 static inline int low2highuid(int uid)
7205 {
7206     return uid;
7207 }
7208 static inline int low2highgid(int gid)
7209 {
7210     return gid;
7211 }
7212 static inline int tswapid(int id)
7213 {
7214     return tswap32(id);
7215 }
7216 
7217 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7218 
7219 #endif /* USE_UID16 */
7220 
7221 /* We must do direct syscalls for setting UID/GID, because we want to
7222  * implement the Linux system call semantics of "change only for this thread",
7223  * not the libc/POSIX semantics of "change for all threads in process".
7224  * (See http://ewontfix.com/17/ for more details.)
7225  * We use the 32-bit version of the syscalls if present; if it is not
7226  * then either the host architecture supports 32-bit UIDs natively with
7227  * the standard syscall, or the 16-bit UID is the best we can do.
7228  */
7229 #ifdef __NR_setuid32
7230 #define __NR_sys_setuid __NR_setuid32
7231 #else
7232 #define __NR_sys_setuid __NR_setuid
7233 #endif
7234 #ifdef __NR_setgid32
7235 #define __NR_sys_setgid __NR_setgid32
7236 #else
7237 #define __NR_sys_setgid __NR_setgid
7238 #endif
7239 #ifdef __NR_setresuid32
7240 #define __NR_sys_setresuid __NR_setresuid32
7241 #else
7242 #define __NR_sys_setresuid __NR_setresuid
7243 #endif
7244 #ifdef __NR_setresgid32
7245 #define __NR_sys_setresgid __NR_setresgid32
7246 #else
7247 #define __NR_sys_setresgid __NR_setresgid
7248 #endif
7249 
7250 _syscall1(int, sys_setuid, uid_t, uid)
7251 _syscall1(int, sys_setgid, gid_t, gid)
7252 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7253 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7254 
7255 void syscall_init(void)
7256 {
7257     IOCTLEntry *ie;
7258     const argtype *arg_type;
7259     int size;
7260 
7261     thunk_init(STRUCT_MAX);
7262 
7263 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7264 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7265 #include "syscall_types.h"
7266 #undef STRUCT
7267 #undef STRUCT_SPECIAL
7268 
7269     /* we patch the ioctl size if necessary. We rely on the fact that
7270        no ioctl has all the bits at '1' in the size field */
7271     ie = ioctl_entries;
7272     while (ie->target_cmd != 0) {
7273         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7274             TARGET_IOC_SIZEMASK) {
7275             arg_type = ie->arg_type;
7276             if (arg_type[0] != TYPE_PTR) {
7277                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7278                         ie->target_cmd);
7279                 exit(1);
7280             }
7281             arg_type++;
7282             size = thunk_type_size(arg_type, 0);
7283             ie->target_cmd = (ie->target_cmd &
7284                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7285                 (size << TARGET_IOC_SIZESHIFT);
7286         }
7287 
7288         /* automatic consistency check if same arch */
7289 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7290     (defined(__x86_64__) && defined(TARGET_X86_64))
7291         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7292             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7293                     ie->name, ie->target_cmd, ie->host_cmd);
7294         }
7295 #endif
7296         ie++;
7297     }
7298 }
7299 
7300 #ifdef TARGET_NR_truncate64
7301 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7302                                          abi_long arg2,
7303                                          abi_long arg3,
7304                                          abi_long arg4)
7305 {
7306     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7307         arg2 = arg3;
7308         arg3 = arg4;
7309     }
7310     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7311 }
7312 #endif
7313 
7314 #ifdef TARGET_NR_ftruncate64
7315 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7316                                           abi_long arg2,
7317                                           abi_long arg3,
7318                                           abi_long arg4)
7319 {
7320     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7321         arg2 = arg3;
7322         arg3 = arg4;
7323     }
7324     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7325 }
7326 #endif
7327 
7328 #if defined(TARGET_NR_timer_settime) || \
7329     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7330 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7331                                                  abi_ulong target_addr)
7332 {
7333     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7334                                 offsetof(struct target_itimerspec,
7335                                          it_interval)) ||
7336         target_to_host_timespec(&host_its->it_value, target_addr +
7337                                 offsetof(struct target_itimerspec,
7338                                          it_value))) {
7339         return -TARGET_EFAULT;
7340     }
7341 
7342     return 0;
7343 }
7344 #endif
7345 
7346 #if defined(TARGET_NR_timer_settime64) || \
7347     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7348 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7349                                                    abi_ulong target_addr)
7350 {
7351     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7352                                   offsetof(struct target__kernel_itimerspec,
7353                                            it_interval)) ||
7354         target_to_host_timespec64(&host_its->it_value, target_addr +
7355                                   offsetof(struct target__kernel_itimerspec,
7356                                            it_value))) {
7357         return -TARGET_EFAULT;
7358     }
7359 
7360     return 0;
7361 }
7362 #endif
7363 
7364 #if ((defined(TARGET_NR_timerfd_gettime) || \
7365       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7366       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7367 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7368                                                  struct itimerspec *host_its)
7369 {
7370     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7371                                                        it_interval),
7372                                 &host_its->it_interval) ||
7373         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7374                                                        it_value),
7375                                 &host_its->it_value)) {
7376         return -TARGET_EFAULT;
7377     }
7378     return 0;
7379 }
7380 #endif
7381 
7382 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7383       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7384       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7385 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7386                                                    struct itimerspec *host_its)
7387 {
7388     if (host_to_target_timespec64(target_addr +
7389                                   offsetof(struct target__kernel_itimerspec,
7390                                            it_interval),
7391                                   &host_its->it_interval) ||
7392         host_to_target_timespec64(target_addr +
7393                                   offsetof(struct target__kernel_itimerspec,
7394                                            it_value),
7395                                   &host_its->it_value)) {
7396         return -TARGET_EFAULT;
7397     }
7398     return 0;
7399 }
7400 #endif
7401 
7402 #if defined(TARGET_NR_adjtimex) || \
7403     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7404 static inline abi_long target_to_host_timex(struct timex *host_tx,
7405                                             abi_long target_addr)
7406 {
7407     struct target_timex *target_tx;
7408 
7409     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7410         return -TARGET_EFAULT;
7411     }
7412 
7413     __get_user(host_tx->modes, &target_tx->modes);
7414     __get_user(host_tx->offset, &target_tx->offset);
7415     __get_user(host_tx->freq, &target_tx->freq);
7416     __get_user(host_tx->maxerror, &target_tx->maxerror);
7417     __get_user(host_tx->esterror, &target_tx->esterror);
7418     __get_user(host_tx->status, &target_tx->status);
7419     __get_user(host_tx->constant, &target_tx->constant);
7420     __get_user(host_tx->precision, &target_tx->precision);
7421     __get_user(host_tx->tolerance, &target_tx->tolerance);
7422     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7423     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7424     __get_user(host_tx->tick, &target_tx->tick);
7425     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7426     __get_user(host_tx->jitter, &target_tx->jitter);
7427     __get_user(host_tx->shift, &target_tx->shift);
7428     __get_user(host_tx->stabil, &target_tx->stabil);
7429     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7430     __get_user(host_tx->calcnt, &target_tx->calcnt);
7431     __get_user(host_tx->errcnt, &target_tx->errcnt);
7432     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7433     __get_user(host_tx->tai, &target_tx->tai);
7434 
7435     unlock_user_struct(target_tx, target_addr, 0);
7436     return 0;
7437 }
7438 
7439 static inline abi_long host_to_target_timex(abi_long target_addr,
7440                                             struct timex *host_tx)
7441 {
7442     struct target_timex *target_tx;
7443 
7444     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7445         return -TARGET_EFAULT;
7446     }
7447 
7448     __put_user(host_tx->modes, &target_tx->modes);
7449     __put_user(host_tx->offset, &target_tx->offset);
7450     __put_user(host_tx->freq, &target_tx->freq);
7451     __put_user(host_tx->maxerror, &target_tx->maxerror);
7452     __put_user(host_tx->esterror, &target_tx->esterror);
7453     __put_user(host_tx->status, &target_tx->status);
7454     __put_user(host_tx->constant, &target_tx->constant);
7455     __put_user(host_tx->precision, &target_tx->precision);
7456     __put_user(host_tx->tolerance, &target_tx->tolerance);
7457     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7458     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7459     __put_user(host_tx->tick, &target_tx->tick);
7460     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7461     __put_user(host_tx->jitter, &target_tx->jitter);
7462     __put_user(host_tx->shift, &target_tx->shift);
7463     __put_user(host_tx->stabil, &target_tx->stabil);
7464     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7465     __put_user(host_tx->calcnt, &target_tx->calcnt);
7466     __put_user(host_tx->errcnt, &target_tx->errcnt);
7467     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7468     __put_user(host_tx->tai, &target_tx->tai);
7469 
7470     unlock_user_struct(target_tx, target_addr, 1);
7471     return 0;
7472 }
7473 #endif
7474 
7475 
7476 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7477 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7478                                               abi_long target_addr)
7479 {
7480     struct target__kernel_timex *target_tx;
7481 
7482     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7483                                  offsetof(struct target__kernel_timex,
7484                                           time))) {
7485         return -TARGET_EFAULT;
7486     }
7487 
7488     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7489         return -TARGET_EFAULT;
7490     }
7491 
7492     __get_user(host_tx->modes, &target_tx->modes);
7493     __get_user(host_tx->offset, &target_tx->offset);
7494     __get_user(host_tx->freq, &target_tx->freq);
7495     __get_user(host_tx->maxerror, &target_tx->maxerror);
7496     __get_user(host_tx->esterror, &target_tx->esterror);
7497     __get_user(host_tx->status, &target_tx->status);
7498     __get_user(host_tx->constant, &target_tx->constant);
7499     __get_user(host_tx->precision, &target_tx->precision);
7500     __get_user(host_tx->tolerance, &target_tx->tolerance);
7501     __get_user(host_tx->tick, &target_tx->tick);
7502     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7503     __get_user(host_tx->jitter, &target_tx->jitter);
7504     __get_user(host_tx->shift, &target_tx->shift);
7505     __get_user(host_tx->stabil, &target_tx->stabil);
7506     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7507     __get_user(host_tx->calcnt, &target_tx->calcnt);
7508     __get_user(host_tx->errcnt, &target_tx->errcnt);
7509     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7510     __get_user(host_tx->tai, &target_tx->tai);
7511 
7512     unlock_user_struct(target_tx, target_addr, 0);
7513     return 0;
7514 }
7515 
7516 static inline abi_long host_to_target_timex64(abi_long target_addr,
7517                                               struct timex *host_tx)
7518 {
7519     struct target__kernel_timex *target_tx;
7520 
7521    if (copy_to_user_timeval64(target_addr +
7522                               offsetof(struct target__kernel_timex, time),
7523                               &host_tx->time)) {
7524         return -TARGET_EFAULT;
7525     }
7526 
7527     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7528         return -TARGET_EFAULT;
7529     }
7530 
7531     __put_user(host_tx->modes, &target_tx->modes);
7532     __put_user(host_tx->offset, &target_tx->offset);
7533     __put_user(host_tx->freq, &target_tx->freq);
7534     __put_user(host_tx->maxerror, &target_tx->maxerror);
7535     __put_user(host_tx->esterror, &target_tx->esterror);
7536     __put_user(host_tx->status, &target_tx->status);
7537     __put_user(host_tx->constant, &target_tx->constant);
7538     __put_user(host_tx->precision, &target_tx->precision);
7539     __put_user(host_tx->tolerance, &target_tx->tolerance);
7540     __put_user(host_tx->tick, &target_tx->tick);
7541     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7542     __put_user(host_tx->jitter, &target_tx->jitter);
7543     __put_user(host_tx->shift, &target_tx->shift);
7544     __put_user(host_tx->stabil, &target_tx->stabil);
7545     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7546     __put_user(host_tx->calcnt, &target_tx->calcnt);
7547     __put_user(host_tx->errcnt, &target_tx->errcnt);
7548     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7549     __put_user(host_tx->tai, &target_tx->tai);
7550 
7551     unlock_user_struct(target_tx, target_addr, 1);
7552     return 0;
7553 }
7554 #endif
7555 
7556 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7557 #define sigev_notify_thread_id _sigev_un._tid
7558 #endif
7559 
7560 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7561                                                abi_ulong target_addr)
7562 {
7563     struct target_sigevent *target_sevp;
7564 
7565     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7566         return -TARGET_EFAULT;
7567     }
7568 
7569     /* This union is awkward on 64 bit systems because it has a 32 bit
7570      * integer and a pointer in it; we follow the conversion approach
7571      * used for handling sigval types in signal.c so the guest should get
7572      * the correct value back even if we did a 64 bit byteswap and it's
7573      * using the 32 bit integer.
7574      */
7575     host_sevp->sigev_value.sival_ptr =
7576         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7577     host_sevp->sigev_signo =
7578         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7579     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7580     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7581 
7582     unlock_user_struct(target_sevp, target_addr, 1);
7583     return 0;
7584 }
7585 
7586 #if defined(TARGET_NR_mlockall)
7587 static inline int target_to_host_mlockall_arg(int arg)
7588 {
7589     int result = 0;
7590 
7591     if (arg & TARGET_MCL_CURRENT) {
7592         result |= MCL_CURRENT;
7593     }
7594     if (arg & TARGET_MCL_FUTURE) {
7595         result |= MCL_FUTURE;
7596     }
7597 #ifdef MCL_ONFAULT
7598     if (arg & TARGET_MCL_ONFAULT) {
7599         result |= MCL_ONFAULT;
7600     }
7601 #endif
7602 
7603     return result;
7604 }
7605 #endif
7606 
7607 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7608      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7609      defined(TARGET_NR_newfstatat))
7610 static inline abi_long host_to_target_stat64(void *cpu_env,
7611                                              abi_ulong target_addr,
7612                                              struct stat *host_st)
7613 {
7614 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7615     if (((CPUARMState *)cpu_env)->eabi) {
7616         struct target_eabi_stat64 *target_st;
7617 
7618         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7619             return -TARGET_EFAULT;
7620         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7621         __put_user(host_st->st_dev, &target_st->st_dev);
7622         __put_user(host_st->st_ino, &target_st->st_ino);
7623 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7624         __put_user(host_st->st_ino, &target_st->__st_ino);
7625 #endif
7626         __put_user(host_st->st_mode, &target_st->st_mode);
7627         __put_user(host_st->st_nlink, &target_st->st_nlink);
7628         __put_user(host_st->st_uid, &target_st->st_uid);
7629         __put_user(host_st->st_gid, &target_st->st_gid);
7630         __put_user(host_st->st_rdev, &target_st->st_rdev);
7631         __put_user(host_st->st_size, &target_st->st_size);
7632         __put_user(host_st->st_blksize, &target_st->st_blksize);
7633         __put_user(host_st->st_blocks, &target_st->st_blocks);
7634         __put_user(host_st->st_atime, &target_st->target_st_atime);
7635         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7636         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7637 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7638         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7639         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7640         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7641 #endif
7642         unlock_user_struct(target_st, target_addr, 1);
7643     } else
7644 #endif
7645     {
7646 #if defined(TARGET_HAS_STRUCT_STAT64)
7647         struct target_stat64 *target_st;
7648 #else
7649         struct target_stat *target_st;
7650 #endif
7651 
7652         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7653             return -TARGET_EFAULT;
7654         memset(target_st, 0, sizeof(*target_st));
7655         __put_user(host_st->st_dev, &target_st->st_dev);
7656         __put_user(host_st->st_ino, &target_st->st_ino);
7657 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7658         __put_user(host_st->st_ino, &target_st->__st_ino);
7659 #endif
7660         __put_user(host_st->st_mode, &target_st->st_mode);
7661         __put_user(host_st->st_nlink, &target_st->st_nlink);
7662         __put_user(host_st->st_uid, &target_st->st_uid);
7663         __put_user(host_st->st_gid, &target_st->st_gid);
7664         __put_user(host_st->st_rdev, &target_st->st_rdev);
7665         /* XXX: better use of kernel struct */
7666         __put_user(host_st->st_size, &target_st->st_size);
7667         __put_user(host_st->st_blksize, &target_st->st_blksize);
7668         __put_user(host_st->st_blocks, &target_st->st_blocks);
7669         __put_user(host_st->st_atime, &target_st->target_st_atime);
7670         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7671         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7672 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7673         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7674         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7675         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7676 #endif
7677         unlock_user_struct(target_st, target_addr, 1);
7678     }
7679 
7680     return 0;
7681 }
7682 #endif
7683 
7684 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7685 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7686                                             abi_ulong target_addr)
7687 {
7688     struct target_statx *target_stx;
7689 
7690     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7691         return -TARGET_EFAULT;
7692     }
7693     memset(target_stx, 0, sizeof(*target_stx));
7694 
7695     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7696     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7697     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7698     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7699     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7700     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7701     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7702     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7703     __put_user(host_stx->stx_size, &target_stx->stx_size);
7704     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7705     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7706     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7707     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7708     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7709     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7710     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7711     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7712     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7713     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7714     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7715     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7716     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7717     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7718 
7719     unlock_user_struct(target_stx, target_addr, 1);
7720 
7721     return 0;
7722 }
7723 #endif
7724 
7725 static int do_sys_futex(int *uaddr, int op, int val,
7726                          const struct timespec *timeout, int *uaddr2,
7727                          int val3)
7728 {
7729 #if HOST_LONG_BITS == 64
7730 #if defined(__NR_futex)
7731     /* always a 64-bit time_t, it doesn't define _time64 version  */
7732     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7733 
7734 #endif
7735 #else /* HOST_LONG_BITS == 64 */
7736 #if defined(__NR_futex_time64)
7737     if (sizeof(timeout->tv_sec) == 8) {
7738         /* _time64 function on 32bit arch */
7739         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7740     }
7741 #endif
7742 #if defined(__NR_futex)
7743     /* old function on 32bit arch */
7744     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7745 #endif
7746 #endif /* HOST_LONG_BITS == 64 */
7747     g_assert_not_reached();
7748 }
7749 
7750 static int do_safe_futex(int *uaddr, int op, int val,
7751                          const struct timespec *timeout, int *uaddr2,
7752                          int val3)
7753 {
7754 #if HOST_LONG_BITS == 64
7755 #if defined(__NR_futex)
7756     /* always a 64-bit time_t, it doesn't define _time64 version  */
7757     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7758 #endif
7759 #else /* HOST_LONG_BITS == 64 */
7760 #if defined(__NR_futex_time64)
7761     if (sizeof(timeout->tv_sec) == 8) {
7762         /* _time64 function on 32bit arch */
7763         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7764                                            val3));
7765     }
7766 #endif
7767 #if defined(__NR_futex)
7768     /* old function on 32bit arch */
7769     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7770 #endif
7771 #endif /* HOST_LONG_BITS == 64 */
7772     return -TARGET_ENOSYS;
7773 }
7774 
7775 /* ??? Using host futex calls even when target atomic operations
7776    are not really atomic probably breaks things.  However implementing
7777    futexes locally would make futexes shared between multiple processes
7778    tricky.  However they're probably useless because guest atomic
7779    operations won't work either.  */
7780 #if defined(TARGET_NR_futex)
7781 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7782                     target_ulong timeout, target_ulong uaddr2, int val3)
7783 {
7784     struct timespec ts, *pts;
7785     int base_op;
7786 
7787     /* ??? We assume FUTEX_* constants are the same on both host
7788        and target.  */
7789 #ifdef FUTEX_CMD_MASK
7790     base_op = op & FUTEX_CMD_MASK;
7791 #else
7792     base_op = op;
7793 #endif
7794     switch (base_op) {
7795     case FUTEX_WAIT:
7796     case FUTEX_WAIT_BITSET:
7797         if (timeout) {
7798             pts = &ts;
7799             target_to_host_timespec(pts, timeout);
7800         } else {
7801             pts = NULL;
7802         }
7803         return do_safe_futex(g2h(cpu, uaddr),
7804                              op, tswap32(val), pts, NULL, val3);
7805     case FUTEX_WAKE:
7806         return do_safe_futex(g2h(cpu, uaddr),
7807                              op, val, NULL, NULL, 0);
7808     case FUTEX_FD:
7809         return do_safe_futex(g2h(cpu, uaddr),
7810                              op, val, NULL, NULL, 0);
7811     case FUTEX_REQUEUE:
7812     case FUTEX_CMP_REQUEUE:
7813     case FUTEX_WAKE_OP:
7814         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7815            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7816            But the prototype takes a `struct timespec *'; insert casts
7817            to satisfy the compiler.  We do not need to tswap TIMEOUT
7818            since it's not compared to guest memory.  */
7819         pts = (struct timespec *)(uintptr_t) timeout;
7820         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7821                              (base_op == FUTEX_CMP_REQUEUE
7822                               ? tswap32(val3) : val3));
7823     default:
7824         return -TARGET_ENOSYS;
7825     }
7826 }
7827 #endif
7828 
7829 #if defined(TARGET_NR_futex_time64)
7830 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7831                            int val, target_ulong timeout,
7832                            target_ulong uaddr2, int val3)
7833 {
7834     struct timespec ts, *pts;
7835     int base_op;
7836 
7837     /* ??? We assume FUTEX_* constants are the same on both host
7838        and target.  */
7839 #ifdef FUTEX_CMD_MASK
7840     base_op = op & FUTEX_CMD_MASK;
7841 #else
7842     base_op = op;
7843 #endif
7844     switch (base_op) {
7845     case FUTEX_WAIT:
7846     case FUTEX_WAIT_BITSET:
7847         if (timeout) {
7848             pts = &ts;
7849             if (target_to_host_timespec64(pts, timeout)) {
7850                 return -TARGET_EFAULT;
7851             }
7852         } else {
7853             pts = NULL;
7854         }
7855         return do_safe_futex(g2h(cpu, uaddr), op,
7856                              tswap32(val), pts, NULL, val3);
7857     case FUTEX_WAKE:
7858         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7859     case FUTEX_FD:
7860         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7861     case FUTEX_REQUEUE:
7862     case FUTEX_CMP_REQUEUE:
7863     case FUTEX_WAKE_OP:
7864         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7865            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7866            But the prototype takes a `struct timespec *'; insert casts
7867            to satisfy the compiler.  We do not need to tswap TIMEOUT
7868            since it's not compared to guest memory.  */
7869         pts = (struct timespec *)(uintptr_t) timeout;
7870         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7871                              (base_op == FUTEX_CMP_REQUEUE
7872                               ? tswap32(val3) : val3));
7873     default:
7874         return -TARGET_ENOSYS;
7875     }
7876 }
7877 #endif
7878 
7879 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7880 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7881                                      abi_long handle, abi_long mount_id,
7882                                      abi_long flags)
7883 {
7884     struct file_handle *target_fh;
7885     struct file_handle *fh;
7886     int mid = 0;
7887     abi_long ret;
7888     char *name;
7889     unsigned int size, total_size;
7890 
7891     if (get_user_s32(size, handle)) {
7892         return -TARGET_EFAULT;
7893     }
7894 
7895     name = lock_user_string(pathname);
7896     if (!name) {
7897         return -TARGET_EFAULT;
7898     }
7899 
7900     total_size = sizeof(struct file_handle) + size;
7901     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7902     if (!target_fh) {
7903         unlock_user(name, pathname, 0);
7904         return -TARGET_EFAULT;
7905     }
7906 
7907     fh = g_malloc0(total_size);
7908     fh->handle_bytes = size;
7909 
7910     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7911     unlock_user(name, pathname, 0);
7912 
7913     /* man name_to_handle_at(2):
7914      * Other than the use of the handle_bytes field, the caller should treat
7915      * the file_handle structure as an opaque data type
7916      */
7917 
7918     memcpy(target_fh, fh, total_size);
7919     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7920     target_fh->handle_type = tswap32(fh->handle_type);
7921     g_free(fh);
7922     unlock_user(target_fh, handle, total_size);
7923 
7924     if (put_user_s32(mid, mount_id)) {
7925         return -TARGET_EFAULT;
7926     }
7927 
7928     return ret;
7929 
7930 }
7931 #endif
7932 
7933 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7934 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7935                                      abi_long flags)
7936 {
7937     struct file_handle *target_fh;
7938     struct file_handle *fh;
7939     unsigned int size, total_size;
7940     abi_long ret;
7941 
7942     if (get_user_s32(size, handle)) {
7943         return -TARGET_EFAULT;
7944     }
7945 
7946     total_size = sizeof(struct file_handle) + size;
7947     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7948     if (!target_fh) {
7949         return -TARGET_EFAULT;
7950     }
7951 
7952     fh = g_memdup(target_fh, total_size);
7953     fh->handle_bytes = size;
7954     fh->handle_type = tswap32(target_fh->handle_type);
7955 
7956     ret = get_errno(open_by_handle_at(mount_fd, fh,
7957                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7958 
7959     g_free(fh);
7960 
7961     unlock_user(target_fh, handle, total_size);
7962 
7963     return ret;
7964 }
7965 #endif
7966 
7967 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7968 
7969 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7970 {
7971     int host_flags;
7972     target_sigset_t *target_mask;
7973     sigset_t host_mask;
7974     abi_long ret;
7975 
7976     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7977         return -TARGET_EINVAL;
7978     }
7979     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7980         return -TARGET_EFAULT;
7981     }
7982 
7983     target_to_host_sigset(&host_mask, target_mask);
7984 
7985     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7986 
7987     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7988     if (ret >= 0) {
7989         fd_trans_register(ret, &target_signalfd_trans);
7990     }
7991 
7992     unlock_user_struct(target_mask, mask, 0);
7993 
7994     return ret;
7995 }
7996 #endif
7997 
7998 /* Map host to target signal numbers for the wait family of syscalls.
7999    Assume all other status bits are the same.  */
8000 int host_to_target_waitstatus(int status)
8001 {
8002     if (WIFSIGNALED(status)) {
8003         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8004     }
8005     if (WIFSTOPPED(status)) {
8006         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8007                | (status & 0xff);
8008     }
8009     return status;
8010 }
8011 
8012 static int open_self_cmdline(void *cpu_env, int fd)
8013 {
8014     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8015     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8016     int i;
8017 
8018     for (i = 0; i < bprm->argc; i++) {
8019         size_t len = strlen(bprm->argv[i]) + 1;
8020 
8021         if (write(fd, bprm->argv[i], len) != len) {
8022             return -1;
8023         }
8024     }
8025 
8026     return 0;
8027 }
8028 
8029 static int open_self_maps(void *cpu_env, int fd)
8030 {
8031     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8032     TaskState *ts = cpu->opaque;
8033     GSList *map_info = read_self_maps();
8034     GSList *s;
8035     int count;
8036 
8037     for (s = map_info; s; s = g_slist_next(s)) {
8038         MapInfo *e = (MapInfo *) s->data;
8039 
8040         if (h2g_valid(e->start)) {
8041             unsigned long min = e->start;
8042             unsigned long max = e->end;
8043             int flags = page_get_flags(h2g(min));
8044             const char *path;
8045 
8046             max = h2g_valid(max - 1) ?
8047                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8048 
8049             if (page_check_range(h2g(min), max - min, flags) == -1) {
8050                 continue;
8051             }
8052 
8053             if (h2g(min) == ts->info->stack_limit) {
8054                 path = "[stack]";
8055             } else {
8056                 path = e->path;
8057             }
8058 
8059             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8060                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8061                             h2g(min), h2g(max - 1) + 1,
8062                             (flags & PAGE_READ) ? 'r' : '-',
8063                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8064                             (flags & PAGE_EXEC) ? 'x' : '-',
8065                             e->is_priv ? 'p' : 's',
8066                             (uint64_t) e->offset, e->dev, e->inode);
8067             if (path) {
8068                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8069             } else {
8070                 dprintf(fd, "\n");
8071             }
8072         }
8073     }
8074 
8075     free_self_maps(map_info);
8076 
8077 #ifdef TARGET_VSYSCALL_PAGE
8078     /*
8079      * We only support execution from the vsyscall page.
8080      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8081      */
8082     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8083                     " --xp 00000000 00:00 0",
8084                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8085     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8086 #endif
8087 
8088     return 0;
8089 }
8090 
8091 static int open_self_stat(void *cpu_env, int fd)
8092 {
8093     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8094     TaskState *ts = cpu->opaque;
8095     g_autoptr(GString) buf = g_string_new(NULL);
8096     int i;
8097 
8098     for (i = 0; i < 44; i++) {
8099         if (i == 0) {
8100             /* pid */
8101             g_string_printf(buf, FMT_pid " ", getpid());
8102         } else if (i == 1) {
8103             /* app name */
8104             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8105             bin = bin ? bin + 1 : ts->bprm->argv[0];
8106             g_string_printf(buf, "(%.15s) ", bin);
8107         } else if (i == 3) {
8108             /* ppid */
8109             g_string_printf(buf, FMT_pid " ", getppid());
8110         } else if (i == 27) {
8111             /* stack bottom */
8112             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8113         } else {
8114             /* for the rest, there is MasterCard */
8115             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8116         }
8117 
8118         if (write(fd, buf->str, buf->len) != buf->len) {
8119             return -1;
8120         }
8121     }
8122 
8123     return 0;
8124 }
8125 
8126 static int open_self_auxv(void *cpu_env, int fd)
8127 {
8128     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8129     TaskState *ts = cpu->opaque;
8130     abi_ulong auxv = ts->info->saved_auxv;
8131     abi_ulong len = ts->info->auxv_len;
8132     char *ptr;
8133 
8134     /*
8135      * Auxiliary vector is stored in target process stack.
8136      * read in whole auxv vector and copy it to file
8137      */
8138     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8139     if (ptr != NULL) {
8140         while (len > 0) {
8141             ssize_t r;
8142             r = write(fd, ptr, len);
8143             if (r <= 0) {
8144                 break;
8145             }
8146             len -= r;
8147             ptr += r;
8148         }
8149         lseek(fd, 0, SEEK_SET);
8150         unlock_user(ptr, auxv, len);
8151     }
8152 
8153     return 0;
8154 }
8155 
8156 static int is_proc_myself(const char *filename, const char *entry)
8157 {
8158     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8159         filename += strlen("/proc/");
8160         if (!strncmp(filename, "self/", strlen("self/"))) {
8161             filename += strlen("self/");
8162         } else if (*filename >= '1' && *filename <= '9') {
8163             char myself[80];
8164             snprintf(myself, sizeof(myself), "%d/", getpid());
8165             if (!strncmp(filename, myself, strlen(myself))) {
8166                 filename += strlen(myself);
8167             } else {
8168                 return 0;
8169             }
8170         } else {
8171             return 0;
8172         }
8173         if (!strcmp(filename, entry)) {
8174             return 1;
8175         }
8176     }
8177     return 0;
8178 }
8179 
8180 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8181     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8182 static int is_proc(const char *filename, const char *entry)
8183 {
8184     return strcmp(filename, entry) == 0;
8185 }
8186 #endif
8187 
8188 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8189 static int open_net_route(void *cpu_env, int fd)
8190 {
8191     FILE *fp;
8192     char *line = NULL;
8193     size_t len = 0;
8194     ssize_t read;
8195 
8196     fp = fopen("/proc/net/route", "r");
8197     if (fp == NULL) {
8198         return -1;
8199     }
8200 
8201     /* read header */
8202 
8203     read = getline(&line, &len, fp);
8204     dprintf(fd, "%s", line);
8205 
8206     /* read routes */
8207 
8208     while ((read = getline(&line, &len, fp)) != -1) {
8209         char iface[16];
8210         uint32_t dest, gw, mask;
8211         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8212         int fields;
8213 
8214         fields = sscanf(line,
8215                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8216                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8217                         &mask, &mtu, &window, &irtt);
8218         if (fields != 11) {
8219             continue;
8220         }
8221         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8222                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8223                 metric, tswap32(mask), mtu, window, irtt);
8224     }
8225 
8226     free(line);
8227     fclose(fp);
8228 
8229     return 0;
8230 }
8231 #endif
8232 
8233 #if defined(TARGET_SPARC)
8234 static int open_cpuinfo(void *cpu_env, int fd)
8235 {
8236     dprintf(fd, "type\t\t: sun4u\n");
8237     return 0;
8238 }
8239 #endif
8240 
8241 #if defined(TARGET_HPPA)
8242 static int open_cpuinfo(void *cpu_env, int fd)
8243 {
8244     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8245     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8246     dprintf(fd, "capabilities\t: os32\n");
8247     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8248     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8249     return 0;
8250 }
8251 #endif
8252 
8253 #if defined(TARGET_M68K)
8254 static int open_hardware(void *cpu_env, int fd)
8255 {
8256     dprintf(fd, "Model:\t\tqemu-m68k\n");
8257     return 0;
8258 }
8259 #endif
8260 
8261 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8262 {
8263     struct fake_open {
8264         const char *filename;
8265         int (*fill)(void *cpu_env, int fd);
8266         int (*cmp)(const char *s1, const char *s2);
8267     };
8268     const struct fake_open *fake_open;
8269     static const struct fake_open fakes[] = {
8270         { "maps", open_self_maps, is_proc_myself },
8271         { "stat", open_self_stat, is_proc_myself },
8272         { "auxv", open_self_auxv, is_proc_myself },
8273         { "cmdline", open_self_cmdline, is_proc_myself },
8274 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8275         { "/proc/net/route", open_net_route, is_proc },
8276 #endif
8277 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8278         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8279 #endif
8280 #if defined(TARGET_M68K)
8281         { "/proc/hardware", open_hardware, is_proc },
8282 #endif
8283         { NULL, NULL, NULL }
8284     };
8285 
8286     if (is_proc_myself(pathname, "exe")) {
8287         int execfd = qemu_getauxval(AT_EXECFD);
8288         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8289     }
8290 
8291     for (fake_open = fakes; fake_open->filename; fake_open++) {
8292         if (fake_open->cmp(pathname, fake_open->filename)) {
8293             break;
8294         }
8295     }
8296 
8297     if (fake_open->filename) {
8298         const char *tmpdir;
8299         char filename[PATH_MAX];
8300         int fd, r;
8301 
8302         /* create temporary file to map stat to */
8303         tmpdir = getenv("TMPDIR");
8304         if (!tmpdir)
8305             tmpdir = "/tmp";
8306         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8307         fd = mkstemp(filename);
8308         if (fd < 0) {
8309             return fd;
8310         }
8311         unlink(filename);
8312 
8313         if ((r = fake_open->fill(cpu_env, fd))) {
8314             int e = errno;
8315             close(fd);
8316             errno = e;
8317             return r;
8318         }
8319         lseek(fd, 0, SEEK_SET);
8320 
8321         return fd;
8322     }
8323 
8324     return safe_openat(dirfd, path(pathname), flags, mode);
8325 }
8326 
8327 #define TIMER_MAGIC 0x0caf0000
8328 #define TIMER_MAGIC_MASK 0xffff0000
8329 
8330 /* Convert QEMU provided timer ID back to internal 16bit index format */
8331 static target_timer_t get_timer_id(abi_long arg)
8332 {
8333     target_timer_t timerid = arg;
8334 
8335     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8336         return -TARGET_EINVAL;
8337     }
8338 
8339     timerid &= 0xffff;
8340 
8341     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8342         return -TARGET_EINVAL;
8343     }
8344 
8345     return timerid;
8346 }
8347 
8348 static int target_to_host_cpu_mask(unsigned long *host_mask,
8349                                    size_t host_size,
8350                                    abi_ulong target_addr,
8351                                    size_t target_size)
8352 {
8353     unsigned target_bits = sizeof(abi_ulong) * 8;
8354     unsigned host_bits = sizeof(*host_mask) * 8;
8355     abi_ulong *target_mask;
8356     unsigned i, j;
8357 
8358     assert(host_size >= target_size);
8359 
8360     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8361     if (!target_mask) {
8362         return -TARGET_EFAULT;
8363     }
8364     memset(host_mask, 0, host_size);
8365 
8366     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8367         unsigned bit = i * target_bits;
8368         abi_ulong val;
8369 
8370         __get_user(val, &target_mask[i]);
8371         for (j = 0; j < target_bits; j++, bit++) {
8372             if (val & (1UL << j)) {
8373                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8374             }
8375         }
8376     }
8377 
8378     unlock_user(target_mask, target_addr, 0);
8379     return 0;
8380 }
8381 
8382 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8383                                    size_t host_size,
8384                                    abi_ulong target_addr,
8385                                    size_t target_size)
8386 {
8387     unsigned target_bits = sizeof(abi_ulong) * 8;
8388     unsigned host_bits = sizeof(*host_mask) * 8;
8389     abi_ulong *target_mask;
8390     unsigned i, j;
8391 
8392     assert(host_size >= target_size);
8393 
8394     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8395     if (!target_mask) {
8396         return -TARGET_EFAULT;
8397     }
8398 
8399     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8400         unsigned bit = i * target_bits;
8401         abi_ulong val = 0;
8402 
8403         for (j = 0; j < target_bits; j++, bit++) {
8404             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8405                 val |= 1UL << j;
8406             }
8407         }
8408         __put_user(val, &target_mask[i]);
8409     }
8410 
8411     unlock_user(target_mask, target_addr, target_size);
8412     return 0;
8413 }
8414 
8415 #ifdef TARGET_NR_getdents
8416 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8417 {
8418     g_autofree void *hdirp = NULL;
8419     void *tdirp;
8420     int hlen, hoff, toff;
8421     int hreclen, treclen;
8422     off64_t prev_diroff = 0;
8423 
8424     hdirp = g_try_malloc(count);
8425     if (!hdirp) {
8426         return -TARGET_ENOMEM;
8427     }
8428 
8429 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8430     hlen = sys_getdents(dirfd, hdirp, count);
8431 #else
8432     hlen = sys_getdents64(dirfd, hdirp, count);
8433 #endif
8434 
8435     hlen = get_errno(hlen);
8436     if (is_error(hlen)) {
8437         return hlen;
8438     }
8439 
8440     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8441     if (!tdirp) {
8442         return -TARGET_EFAULT;
8443     }
8444 
8445     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8446 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8447         struct linux_dirent *hde = hdirp + hoff;
8448 #else
8449         struct linux_dirent64 *hde = hdirp + hoff;
8450 #endif
8451         struct target_dirent *tde = tdirp + toff;
8452         int namelen;
8453         uint8_t type;
8454 
8455         namelen = strlen(hde->d_name);
8456         hreclen = hde->d_reclen;
8457         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8458         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8459 
8460         if (toff + treclen > count) {
8461             /*
8462              * If the host struct is smaller than the target struct, or
8463              * requires less alignment and thus packs into less space,
8464              * then the host can return more entries than we can pass
8465              * on to the guest.
8466              */
8467             if (toff == 0) {
8468                 toff = -TARGET_EINVAL; /* result buffer is too small */
8469                 break;
8470             }
8471             /*
8472              * Return what we have, resetting the file pointer to the
8473              * location of the first record not returned.
8474              */
8475             lseek64(dirfd, prev_diroff, SEEK_SET);
8476             break;
8477         }
8478 
8479         prev_diroff = hde->d_off;
8480         tde->d_ino = tswapal(hde->d_ino);
8481         tde->d_off = tswapal(hde->d_off);
8482         tde->d_reclen = tswap16(treclen);
8483         memcpy(tde->d_name, hde->d_name, namelen + 1);
8484 
8485         /*
8486          * The getdents type is in what was formerly a padding byte at the
8487          * end of the structure.
8488          */
8489 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8490         type = *((uint8_t *)hde + hreclen - 1);
8491 #else
8492         type = hde->d_type;
8493 #endif
8494         *((uint8_t *)tde + treclen - 1) = type;
8495     }
8496 
8497     unlock_user(tdirp, arg2, toff);
8498     return toff;
8499 }
8500 #endif /* TARGET_NR_getdents */
8501 
8502 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8503 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8504 {
8505     g_autofree void *hdirp = NULL;
8506     void *tdirp;
8507     int hlen, hoff, toff;
8508     int hreclen, treclen;
8509     off64_t prev_diroff = 0;
8510 
8511     hdirp = g_try_malloc(count);
8512     if (!hdirp) {
8513         return -TARGET_ENOMEM;
8514     }
8515 
8516     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8517     if (is_error(hlen)) {
8518         return hlen;
8519     }
8520 
8521     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8522     if (!tdirp) {
8523         return -TARGET_EFAULT;
8524     }
8525 
8526     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8527         struct linux_dirent64 *hde = hdirp + hoff;
8528         struct target_dirent64 *tde = tdirp + toff;
8529         int namelen;
8530 
8531         namelen = strlen(hde->d_name) + 1;
8532         hreclen = hde->d_reclen;
8533         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8534         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8535 
8536         if (toff + treclen > count) {
8537             /*
8538              * If the host struct is smaller than the target struct, or
8539              * requires less alignment and thus packs into less space,
8540              * then the host can return more entries than we can pass
8541              * on to the guest.
8542              */
8543             if (toff == 0) {
8544                 toff = -TARGET_EINVAL; /* result buffer is too small */
8545                 break;
8546             }
8547             /*
8548              * Return what we have, resetting the file pointer to the
8549              * location of the first record not returned.
8550              */
8551             lseek64(dirfd, prev_diroff, SEEK_SET);
8552             break;
8553         }
8554 
8555         prev_diroff = hde->d_off;
8556         tde->d_ino = tswap64(hde->d_ino);
8557         tde->d_off = tswap64(hde->d_off);
8558         tde->d_reclen = tswap16(treclen);
8559         tde->d_type = hde->d_type;
8560         memcpy(tde->d_name, hde->d_name, namelen);
8561     }
8562 
8563     unlock_user(tdirp, arg2, toff);
8564     return toff;
8565 }
8566 #endif /* TARGET_NR_getdents64 */
8567 
8568 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8569 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8570 #endif
8571 
8572 /* This is an internal helper for do_syscall so that it is easier
8573  * to have a single return point, so that actions, such as logging
8574  * of syscall results, can be performed.
8575  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8576  */
8577 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8578                             abi_long arg2, abi_long arg3, abi_long arg4,
8579                             abi_long arg5, abi_long arg6, abi_long arg7,
8580                             abi_long arg8)
8581 {
8582     CPUState *cpu = env_cpu(cpu_env);
8583     abi_long ret;
8584 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8585     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8586     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8587     || defined(TARGET_NR_statx)
8588     struct stat st;
8589 #endif
8590 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8591     || defined(TARGET_NR_fstatfs)
8592     struct statfs stfs;
8593 #endif
8594     void *p;
8595 
8596     switch(num) {
8597     case TARGET_NR_exit:
8598         /* In old applications this may be used to implement _exit(2).
8599            However in threaded applications it is used for thread termination,
8600            and _exit_group is used for application termination.
8601            Do thread termination if we have more then one thread.  */
8602 
8603         if (block_signals()) {
8604             return -QEMU_ERESTARTSYS;
8605         }
8606 
8607         pthread_mutex_lock(&clone_lock);
8608 
8609         if (CPU_NEXT(first_cpu)) {
8610             TaskState *ts = cpu->opaque;
8611 
8612             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8613             object_unref(OBJECT(cpu));
8614             /*
8615              * At this point the CPU should be unrealized and removed
8616              * from cpu lists. We can clean-up the rest of the thread
8617              * data without the lock held.
8618              */
8619 
8620             pthread_mutex_unlock(&clone_lock);
8621 
8622             if (ts->child_tidptr) {
8623                 put_user_u32(0, ts->child_tidptr);
8624                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8625                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8626             }
8627             thread_cpu = NULL;
8628             g_free(ts);
8629             rcu_unregister_thread();
8630             pthread_exit(NULL);
8631         }
8632 
8633         pthread_mutex_unlock(&clone_lock);
8634         preexit_cleanup(cpu_env, arg1);
8635         _exit(arg1);
8636         return 0; /* avoid warning */
8637     case TARGET_NR_read:
8638         if (arg2 == 0 && arg3 == 0) {
8639             return get_errno(safe_read(arg1, 0, 0));
8640         } else {
8641             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8642                 return -TARGET_EFAULT;
8643             ret = get_errno(safe_read(arg1, p, arg3));
8644             if (ret >= 0 &&
8645                 fd_trans_host_to_target_data(arg1)) {
8646                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8647             }
8648             unlock_user(p, arg2, ret);
8649         }
8650         return ret;
8651     case TARGET_NR_write:
8652         if (arg2 == 0 && arg3 == 0) {
8653             return get_errno(safe_write(arg1, 0, 0));
8654         }
8655         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8656             return -TARGET_EFAULT;
8657         if (fd_trans_target_to_host_data(arg1)) {
8658             void *copy = g_malloc(arg3);
8659             memcpy(copy, p, arg3);
8660             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8661             if (ret >= 0) {
8662                 ret = get_errno(safe_write(arg1, copy, ret));
8663             }
8664             g_free(copy);
8665         } else {
8666             ret = get_errno(safe_write(arg1, p, arg3));
8667         }
8668         unlock_user(p, arg2, 0);
8669         return ret;
8670 
8671 #ifdef TARGET_NR_open
8672     case TARGET_NR_open:
8673         if (!(p = lock_user_string(arg1)))
8674             return -TARGET_EFAULT;
8675         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8676                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8677                                   arg3));
8678         fd_trans_unregister(ret);
8679         unlock_user(p, arg1, 0);
8680         return ret;
8681 #endif
8682     case TARGET_NR_openat:
8683         if (!(p = lock_user_string(arg2)))
8684             return -TARGET_EFAULT;
8685         ret = get_errno(do_openat(cpu_env, arg1, p,
8686                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8687                                   arg4));
8688         fd_trans_unregister(ret);
8689         unlock_user(p, arg2, 0);
8690         return ret;
8691 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8692     case TARGET_NR_name_to_handle_at:
8693         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8694         return ret;
8695 #endif
8696 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8697     case TARGET_NR_open_by_handle_at:
8698         ret = do_open_by_handle_at(arg1, arg2, arg3);
8699         fd_trans_unregister(ret);
8700         return ret;
8701 #endif
8702     case TARGET_NR_close:
8703         fd_trans_unregister(arg1);
8704         return get_errno(close(arg1));
8705 
8706     case TARGET_NR_brk:
8707         return do_brk(arg1);
8708 #ifdef TARGET_NR_fork
8709     case TARGET_NR_fork:
8710         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8711 #endif
8712 #ifdef TARGET_NR_waitpid
8713     case TARGET_NR_waitpid:
8714         {
8715             int status;
8716             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8717             if (!is_error(ret) && arg2 && ret
8718                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8719                 return -TARGET_EFAULT;
8720         }
8721         return ret;
8722 #endif
8723 #ifdef TARGET_NR_waitid
8724     case TARGET_NR_waitid:
8725         {
8726             siginfo_t info;
8727             info.si_pid = 0;
8728             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8729             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8730                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8731                     return -TARGET_EFAULT;
8732                 host_to_target_siginfo(p, &info);
8733                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8734             }
8735         }
8736         return ret;
8737 #endif
8738 #ifdef TARGET_NR_creat /* not on alpha */
8739     case TARGET_NR_creat:
8740         if (!(p = lock_user_string(arg1)))
8741             return -TARGET_EFAULT;
8742         ret = get_errno(creat(p, arg2));
8743         fd_trans_unregister(ret);
8744         unlock_user(p, arg1, 0);
8745         return ret;
8746 #endif
8747 #ifdef TARGET_NR_link
8748     case TARGET_NR_link:
8749         {
8750             void * p2;
8751             p = lock_user_string(arg1);
8752             p2 = lock_user_string(arg2);
8753             if (!p || !p2)
8754                 ret = -TARGET_EFAULT;
8755             else
8756                 ret = get_errno(link(p, p2));
8757             unlock_user(p2, arg2, 0);
8758             unlock_user(p, arg1, 0);
8759         }
8760         return ret;
8761 #endif
8762 #if defined(TARGET_NR_linkat)
8763     case TARGET_NR_linkat:
8764         {
8765             void * p2 = NULL;
8766             if (!arg2 || !arg4)
8767                 return -TARGET_EFAULT;
8768             p  = lock_user_string(arg2);
8769             p2 = lock_user_string(arg4);
8770             if (!p || !p2)
8771                 ret = -TARGET_EFAULT;
8772             else
8773                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8774             unlock_user(p, arg2, 0);
8775             unlock_user(p2, arg4, 0);
8776         }
8777         return ret;
8778 #endif
8779 #ifdef TARGET_NR_unlink
8780     case TARGET_NR_unlink:
8781         if (!(p = lock_user_string(arg1)))
8782             return -TARGET_EFAULT;
8783         ret = get_errno(unlink(p));
8784         unlock_user(p, arg1, 0);
8785         return ret;
8786 #endif
8787 #if defined(TARGET_NR_unlinkat)
8788     case TARGET_NR_unlinkat:
8789         if (!(p = lock_user_string(arg2)))
8790             return -TARGET_EFAULT;
8791         ret = get_errno(unlinkat(arg1, p, arg3));
8792         unlock_user(p, arg2, 0);
8793         return ret;
8794 #endif
8795     case TARGET_NR_execve:
8796         {
8797             char **argp, **envp;
8798             int argc, envc;
8799             abi_ulong gp;
8800             abi_ulong guest_argp;
8801             abi_ulong guest_envp;
8802             abi_ulong addr;
8803             char **q;
8804 
8805             argc = 0;
8806             guest_argp = arg2;
8807             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8808                 if (get_user_ual(addr, gp))
8809                     return -TARGET_EFAULT;
8810                 if (!addr)
8811                     break;
8812                 argc++;
8813             }
8814             envc = 0;
8815             guest_envp = arg3;
8816             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8817                 if (get_user_ual(addr, gp))
8818                     return -TARGET_EFAULT;
8819                 if (!addr)
8820                     break;
8821                 envc++;
8822             }
8823 
8824             argp = g_new0(char *, argc + 1);
8825             envp = g_new0(char *, envc + 1);
8826 
8827             for (gp = guest_argp, q = argp; gp;
8828                   gp += sizeof(abi_ulong), q++) {
8829                 if (get_user_ual(addr, gp))
8830                     goto execve_efault;
8831                 if (!addr)
8832                     break;
8833                 if (!(*q = lock_user_string(addr)))
8834                     goto execve_efault;
8835             }
8836             *q = NULL;
8837 
8838             for (gp = guest_envp, q = envp; gp;
8839                   gp += sizeof(abi_ulong), q++) {
8840                 if (get_user_ual(addr, gp))
8841                     goto execve_efault;
8842                 if (!addr)
8843                     break;
8844                 if (!(*q = lock_user_string(addr)))
8845                     goto execve_efault;
8846             }
8847             *q = NULL;
8848 
8849             if (!(p = lock_user_string(arg1)))
8850                 goto execve_efault;
8851             /* Although execve() is not an interruptible syscall it is
8852              * a special case where we must use the safe_syscall wrapper:
8853              * if we allow a signal to happen before we make the host
8854              * syscall then we will 'lose' it, because at the point of
8855              * execve the process leaves QEMU's control. So we use the
8856              * safe syscall wrapper to ensure that we either take the
8857              * signal as a guest signal, or else it does not happen
8858              * before the execve completes and makes it the other
8859              * program's problem.
8860              */
8861             ret = get_errno(safe_execve(p, argp, envp));
8862             unlock_user(p, arg1, 0);
8863 
8864             goto execve_end;
8865 
8866         execve_efault:
8867             ret = -TARGET_EFAULT;
8868 
8869         execve_end:
8870             for (gp = guest_argp, q = argp; *q;
8871                   gp += sizeof(abi_ulong), q++) {
8872                 if (get_user_ual(addr, gp)
8873                     || !addr)
8874                     break;
8875                 unlock_user(*q, addr, 0);
8876             }
8877             for (gp = guest_envp, q = envp; *q;
8878                   gp += sizeof(abi_ulong), q++) {
8879                 if (get_user_ual(addr, gp)
8880                     || !addr)
8881                     break;
8882                 unlock_user(*q, addr, 0);
8883             }
8884 
8885             g_free(argp);
8886             g_free(envp);
8887         }
8888         return ret;
8889     case TARGET_NR_chdir:
8890         if (!(p = lock_user_string(arg1)))
8891             return -TARGET_EFAULT;
8892         ret = get_errno(chdir(p));
8893         unlock_user(p, arg1, 0);
8894         return ret;
8895 #ifdef TARGET_NR_time
8896     case TARGET_NR_time:
8897         {
8898             time_t host_time;
8899             ret = get_errno(time(&host_time));
8900             if (!is_error(ret)
8901                 && arg1
8902                 && put_user_sal(host_time, arg1))
8903                 return -TARGET_EFAULT;
8904         }
8905         return ret;
8906 #endif
8907 #ifdef TARGET_NR_mknod
8908     case TARGET_NR_mknod:
8909         if (!(p = lock_user_string(arg1)))
8910             return -TARGET_EFAULT;
8911         ret = get_errno(mknod(p, arg2, arg3));
8912         unlock_user(p, arg1, 0);
8913         return ret;
8914 #endif
8915 #if defined(TARGET_NR_mknodat)
8916     case TARGET_NR_mknodat:
8917         if (!(p = lock_user_string(arg2)))
8918             return -TARGET_EFAULT;
8919         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8920         unlock_user(p, arg2, 0);
8921         return ret;
8922 #endif
8923 #ifdef TARGET_NR_chmod
8924     case TARGET_NR_chmod:
8925         if (!(p = lock_user_string(arg1)))
8926             return -TARGET_EFAULT;
8927         ret = get_errno(chmod(p, arg2));
8928         unlock_user(p, arg1, 0);
8929         return ret;
8930 #endif
8931 #ifdef TARGET_NR_lseek
8932     case TARGET_NR_lseek:
8933         return get_errno(lseek(arg1, arg2, arg3));
8934 #endif
8935 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8936     /* Alpha specific */
8937     case TARGET_NR_getxpid:
8938         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8939         return get_errno(getpid());
8940 #endif
8941 #ifdef TARGET_NR_getpid
8942     case TARGET_NR_getpid:
8943         return get_errno(getpid());
8944 #endif
8945     case TARGET_NR_mount:
8946         {
8947             /* need to look at the data field */
8948             void *p2, *p3;
8949 
8950             if (arg1) {
8951                 p = lock_user_string(arg1);
8952                 if (!p) {
8953                     return -TARGET_EFAULT;
8954                 }
8955             } else {
8956                 p = NULL;
8957             }
8958 
8959             p2 = lock_user_string(arg2);
8960             if (!p2) {
8961                 if (arg1) {
8962                     unlock_user(p, arg1, 0);
8963                 }
8964                 return -TARGET_EFAULT;
8965             }
8966 
8967             if (arg3) {
8968                 p3 = lock_user_string(arg3);
8969                 if (!p3) {
8970                     if (arg1) {
8971                         unlock_user(p, arg1, 0);
8972                     }
8973                     unlock_user(p2, arg2, 0);
8974                     return -TARGET_EFAULT;
8975                 }
8976             } else {
8977                 p3 = NULL;
8978             }
8979 
8980             /* FIXME - arg5 should be locked, but it isn't clear how to
8981              * do that since it's not guaranteed to be a NULL-terminated
8982              * string.
8983              */
8984             if (!arg5) {
8985                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8986             } else {
8987                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8988             }
8989             ret = get_errno(ret);
8990 
8991             if (arg1) {
8992                 unlock_user(p, arg1, 0);
8993             }
8994             unlock_user(p2, arg2, 0);
8995             if (arg3) {
8996                 unlock_user(p3, arg3, 0);
8997             }
8998         }
8999         return ret;
9000 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9001 #if defined(TARGET_NR_umount)
9002     case TARGET_NR_umount:
9003 #endif
9004 #if defined(TARGET_NR_oldumount)
9005     case TARGET_NR_oldumount:
9006 #endif
9007         if (!(p = lock_user_string(arg1)))
9008             return -TARGET_EFAULT;
9009         ret = get_errno(umount(p));
9010         unlock_user(p, arg1, 0);
9011         return ret;
9012 #endif
9013 #ifdef TARGET_NR_stime /* not on alpha */
9014     case TARGET_NR_stime:
9015         {
9016             struct timespec ts;
9017             ts.tv_nsec = 0;
9018             if (get_user_sal(ts.tv_sec, arg1)) {
9019                 return -TARGET_EFAULT;
9020             }
9021             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9022         }
9023 #endif
9024 #ifdef TARGET_NR_alarm /* not on alpha */
9025     case TARGET_NR_alarm:
9026         return alarm(arg1);
9027 #endif
9028 #ifdef TARGET_NR_pause /* not on alpha */
9029     case TARGET_NR_pause:
9030         if (!block_signals()) {
9031             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9032         }
9033         return -TARGET_EINTR;
9034 #endif
9035 #ifdef TARGET_NR_utime
9036     case TARGET_NR_utime:
9037         {
9038             struct utimbuf tbuf, *host_tbuf;
9039             struct target_utimbuf *target_tbuf;
9040             if (arg2) {
9041                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9042                     return -TARGET_EFAULT;
9043                 tbuf.actime = tswapal(target_tbuf->actime);
9044                 tbuf.modtime = tswapal(target_tbuf->modtime);
9045                 unlock_user_struct(target_tbuf, arg2, 0);
9046                 host_tbuf = &tbuf;
9047             } else {
9048                 host_tbuf = NULL;
9049             }
9050             if (!(p = lock_user_string(arg1)))
9051                 return -TARGET_EFAULT;
9052             ret = get_errno(utime(p, host_tbuf));
9053             unlock_user(p, arg1, 0);
9054         }
9055         return ret;
9056 #endif
9057 #ifdef TARGET_NR_utimes
9058     case TARGET_NR_utimes:
9059         {
9060             struct timeval *tvp, tv[2];
9061             if (arg2) {
9062                 if (copy_from_user_timeval(&tv[0], arg2)
9063                     || copy_from_user_timeval(&tv[1],
9064                                               arg2 + sizeof(struct target_timeval)))
9065                     return -TARGET_EFAULT;
9066                 tvp = tv;
9067             } else {
9068                 tvp = NULL;
9069             }
9070             if (!(p = lock_user_string(arg1)))
9071                 return -TARGET_EFAULT;
9072             ret = get_errno(utimes(p, tvp));
9073             unlock_user(p, arg1, 0);
9074         }
9075         return ret;
9076 #endif
9077 #if defined(TARGET_NR_futimesat)
9078     case TARGET_NR_futimesat:
9079         {
9080             struct timeval *tvp, tv[2];
9081             if (arg3) {
9082                 if (copy_from_user_timeval(&tv[0], arg3)
9083                     || copy_from_user_timeval(&tv[1],
9084                                               arg3 + sizeof(struct target_timeval)))
9085                     return -TARGET_EFAULT;
9086                 tvp = tv;
9087             } else {
9088                 tvp = NULL;
9089             }
9090             if (!(p = lock_user_string(arg2))) {
9091                 return -TARGET_EFAULT;
9092             }
9093             ret = get_errno(futimesat(arg1, path(p), tvp));
9094             unlock_user(p, arg2, 0);
9095         }
9096         return ret;
9097 #endif
9098 #ifdef TARGET_NR_access
9099     case TARGET_NR_access:
9100         if (!(p = lock_user_string(arg1))) {
9101             return -TARGET_EFAULT;
9102         }
9103         ret = get_errno(access(path(p), arg2));
9104         unlock_user(p, arg1, 0);
9105         return ret;
9106 #endif
9107 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9108     case TARGET_NR_faccessat:
9109         if (!(p = lock_user_string(arg2))) {
9110             return -TARGET_EFAULT;
9111         }
9112         ret = get_errno(faccessat(arg1, p, arg3, 0));
9113         unlock_user(p, arg2, 0);
9114         return ret;
9115 #endif
9116 #ifdef TARGET_NR_nice /* not on alpha */
9117     case TARGET_NR_nice:
9118         return get_errno(nice(arg1));
9119 #endif
9120     case TARGET_NR_sync:
9121         sync();
9122         return 0;
9123 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9124     case TARGET_NR_syncfs:
9125         return get_errno(syncfs(arg1));
9126 #endif
9127     case TARGET_NR_kill:
9128         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9129 #ifdef TARGET_NR_rename
9130     case TARGET_NR_rename:
9131         {
9132             void *p2;
9133             p = lock_user_string(arg1);
9134             p2 = lock_user_string(arg2);
9135             if (!p || !p2)
9136                 ret = -TARGET_EFAULT;
9137             else
9138                 ret = get_errno(rename(p, p2));
9139             unlock_user(p2, arg2, 0);
9140             unlock_user(p, arg1, 0);
9141         }
9142         return ret;
9143 #endif
9144 #if defined(TARGET_NR_renameat)
9145     case TARGET_NR_renameat:
9146         {
9147             void *p2;
9148             p  = lock_user_string(arg2);
9149             p2 = lock_user_string(arg4);
9150             if (!p || !p2)
9151                 ret = -TARGET_EFAULT;
9152             else
9153                 ret = get_errno(renameat(arg1, p, arg3, p2));
9154             unlock_user(p2, arg4, 0);
9155             unlock_user(p, arg2, 0);
9156         }
9157         return ret;
9158 #endif
9159 #if defined(TARGET_NR_renameat2)
9160     case TARGET_NR_renameat2:
9161         {
9162             void *p2;
9163             p  = lock_user_string(arg2);
9164             p2 = lock_user_string(arg4);
9165             if (!p || !p2) {
9166                 ret = -TARGET_EFAULT;
9167             } else {
9168                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9169             }
9170             unlock_user(p2, arg4, 0);
9171             unlock_user(p, arg2, 0);
9172         }
9173         return ret;
9174 #endif
9175 #ifdef TARGET_NR_mkdir
9176     case TARGET_NR_mkdir:
9177         if (!(p = lock_user_string(arg1)))
9178             return -TARGET_EFAULT;
9179         ret = get_errno(mkdir(p, arg2));
9180         unlock_user(p, arg1, 0);
9181         return ret;
9182 #endif
9183 #if defined(TARGET_NR_mkdirat)
9184     case TARGET_NR_mkdirat:
9185         if (!(p = lock_user_string(arg2)))
9186             return -TARGET_EFAULT;
9187         ret = get_errno(mkdirat(arg1, p, arg3));
9188         unlock_user(p, arg2, 0);
9189         return ret;
9190 #endif
9191 #ifdef TARGET_NR_rmdir
9192     case TARGET_NR_rmdir:
9193         if (!(p = lock_user_string(arg1)))
9194             return -TARGET_EFAULT;
9195         ret = get_errno(rmdir(p));
9196         unlock_user(p, arg1, 0);
9197         return ret;
9198 #endif
9199     case TARGET_NR_dup:
9200         ret = get_errno(dup(arg1));
9201         if (ret >= 0) {
9202             fd_trans_dup(arg1, ret);
9203         }
9204         return ret;
9205 #ifdef TARGET_NR_pipe
9206     case TARGET_NR_pipe:
9207         return do_pipe(cpu_env, arg1, 0, 0);
9208 #endif
9209 #ifdef TARGET_NR_pipe2
9210     case TARGET_NR_pipe2:
9211         return do_pipe(cpu_env, arg1,
9212                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9213 #endif
9214     case TARGET_NR_times:
9215         {
9216             struct target_tms *tmsp;
9217             struct tms tms;
9218             ret = get_errno(times(&tms));
9219             if (arg1) {
9220                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9221                 if (!tmsp)
9222                     return -TARGET_EFAULT;
9223                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9224                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9225                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9226                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9227             }
9228             if (!is_error(ret))
9229                 ret = host_to_target_clock_t(ret);
9230         }
9231         return ret;
9232     case TARGET_NR_acct:
9233         if (arg1 == 0) {
9234             ret = get_errno(acct(NULL));
9235         } else {
9236             if (!(p = lock_user_string(arg1))) {
9237                 return -TARGET_EFAULT;
9238             }
9239             ret = get_errno(acct(path(p)));
9240             unlock_user(p, arg1, 0);
9241         }
9242         return ret;
9243 #ifdef TARGET_NR_umount2
9244     case TARGET_NR_umount2:
9245         if (!(p = lock_user_string(arg1)))
9246             return -TARGET_EFAULT;
9247         ret = get_errno(umount2(p, arg2));
9248         unlock_user(p, arg1, 0);
9249         return ret;
9250 #endif
9251     case TARGET_NR_ioctl:
9252         return do_ioctl(arg1, arg2, arg3);
9253 #ifdef TARGET_NR_fcntl
9254     case TARGET_NR_fcntl:
9255         return do_fcntl(arg1, arg2, arg3);
9256 #endif
9257     case TARGET_NR_setpgid:
9258         return get_errno(setpgid(arg1, arg2));
9259     case TARGET_NR_umask:
9260         return get_errno(umask(arg1));
9261     case TARGET_NR_chroot:
9262         if (!(p = lock_user_string(arg1)))
9263             return -TARGET_EFAULT;
9264         ret = get_errno(chroot(p));
9265         unlock_user(p, arg1, 0);
9266         return ret;
9267 #ifdef TARGET_NR_dup2
9268     case TARGET_NR_dup2:
9269         ret = get_errno(dup2(arg1, arg2));
9270         if (ret >= 0) {
9271             fd_trans_dup(arg1, arg2);
9272         }
9273         return ret;
9274 #endif
9275 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9276     case TARGET_NR_dup3:
9277     {
9278         int host_flags;
9279 
9280         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9281             return -EINVAL;
9282         }
9283         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9284         ret = get_errno(dup3(arg1, arg2, host_flags));
9285         if (ret >= 0) {
9286             fd_trans_dup(arg1, arg2);
9287         }
9288         return ret;
9289     }
9290 #endif
9291 #ifdef TARGET_NR_getppid /* not on alpha */
9292     case TARGET_NR_getppid:
9293         return get_errno(getppid());
9294 #endif
9295 #ifdef TARGET_NR_getpgrp
9296     case TARGET_NR_getpgrp:
9297         return get_errno(getpgrp());
9298 #endif
9299     case TARGET_NR_setsid:
9300         return get_errno(setsid());
9301 #ifdef TARGET_NR_sigaction
9302     case TARGET_NR_sigaction:
9303         {
9304 #if defined(TARGET_MIPS)
9305 	    struct target_sigaction act, oact, *pact, *old_act;
9306 
9307 	    if (arg2) {
9308                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9309                     return -TARGET_EFAULT;
9310 		act._sa_handler = old_act->_sa_handler;
9311 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9312 		act.sa_flags = old_act->sa_flags;
9313 		unlock_user_struct(old_act, arg2, 0);
9314 		pact = &act;
9315 	    } else {
9316 		pact = NULL;
9317 	    }
9318 
9319         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9320 
9321 	    if (!is_error(ret) && arg3) {
9322                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9323                     return -TARGET_EFAULT;
9324 		old_act->_sa_handler = oact._sa_handler;
9325 		old_act->sa_flags = oact.sa_flags;
9326 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9327 		old_act->sa_mask.sig[1] = 0;
9328 		old_act->sa_mask.sig[2] = 0;
9329 		old_act->sa_mask.sig[3] = 0;
9330 		unlock_user_struct(old_act, arg3, 1);
9331 	    }
9332 #else
9333             struct target_old_sigaction *old_act;
9334             struct target_sigaction act, oact, *pact;
9335             if (arg2) {
9336                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9337                     return -TARGET_EFAULT;
9338                 act._sa_handler = old_act->_sa_handler;
9339                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9340                 act.sa_flags = old_act->sa_flags;
9341 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9342                 act.sa_restorer = old_act->sa_restorer;
9343 #endif
9344                 unlock_user_struct(old_act, arg2, 0);
9345                 pact = &act;
9346             } else {
9347                 pact = NULL;
9348             }
9349             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9350             if (!is_error(ret) && arg3) {
9351                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9352                     return -TARGET_EFAULT;
9353                 old_act->_sa_handler = oact._sa_handler;
9354                 old_act->sa_mask = oact.sa_mask.sig[0];
9355                 old_act->sa_flags = oact.sa_flags;
9356 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9357                 old_act->sa_restorer = oact.sa_restorer;
9358 #endif
9359                 unlock_user_struct(old_act, arg3, 1);
9360             }
9361 #endif
9362         }
9363         return ret;
9364 #endif
9365     case TARGET_NR_rt_sigaction:
9366         {
9367             /*
9368              * For Alpha and SPARC this is a 5 argument syscall, with
9369              * a 'restorer' parameter which must be copied into the
9370              * sa_restorer field of the sigaction struct.
9371              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9372              * and arg5 is the sigsetsize.
9373              */
9374 #if defined(TARGET_ALPHA)
9375             target_ulong sigsetsize = arg4;
9376             target_ulong restorer = arg5;
9377 #elif defined(TARGET_SPARC)
9378             target_ulong restorer = arg4;
9379             target_ulong sigsetsize = arg5;
9380 #else
9381             target_ulong sigsetsize = arg4;
9382             target_ulong restorer = 0;
9383 #endif
9384             struct target_sigaction *act = NULL;
9385             struct target_sigaction *oact = NULL;
9386 
9387             if (sigsetsize != sizeof(target_sigset_t)) {
9388                 return -TARGET_EINVAL;
9389             }
9390             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9391                 return -TARGET_EFAULT;
9392             }
9393             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9394                 ret = -TARGET_EFAULT;
9395             } else {
9396                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9397                 if (oact) {
9398                     unlock_user_struct(oact, arg3, 1);
9399                 }
9400             }
9401             if (act) {
9402                 unlock_user_struct(act, arg2, 0);
9403             }
9404         }
9405         return ret;
9406 #ifdef TARGET_NR_sgetmask /* not on alpha */
9407     case TARGET_NR_sgetmask:
9408         {
9409             sigset_t cur_set;
9410             abi_ulong target_set;
9411             ret = do_sigprocmask(0, NULL, &cur_set);
9412             if (!ret) {
9413                 host_to_target_old_sigset(&target_set, &cur_set);
9414                 ret = target_set;
9415             }
9416         }
9417         return ret;
9418 #endif
9419 #ifdef TARGET_NR_ssetmask /* not on alpha */
9420     case TARGET_NR_ssetmask:
9421         {
9422             sigset_t set, oset;
9423             abi_ulong target_set = arg1;
9424             target_to_host_old_sigset(&set, &target_set);
9425             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9426             if (!ret) {
9427                 host_to_target_old_sigset(&target_set, &oset);
9428                 ret = target_set;
9429             }
9430         }
9431         return ret;
9432 #endif
9433 #ifdef TARGET_NR_sigprocmask
9434     case TARGET_NR_sigprocmask:
9435         {
9436 #if defined(TARGET_ALPHA)
9437             sigset_t set, oldset;
9438             abi_ulong mask;
9439             int how;
9440 
9441             switch (arg1) {
9442             case TARGET_SIG_BLOCK:
9443                 how = SIG_BLOCK;
9444                 break;
9445             case TARGET_SIG_UNBLOCK:
9446                 how = SIG_UNBLOCK;
9447                 break;
9448             case TARGET_SIG_SETMASK:
9449                 how = SIG_SETMASK;
9450                 break;
9451             default:
9452                 return -TARGET_EINVAL;
9453             }
9454             mask = arg2;
9455             target_to_host_old_sigset(&set, &mask);
9456 
9457             ret = do_sigprocmask(how, &set, &oldset);
9458             if (!is_error(ret)) {
9459                 host_to_target_old_sigset(&mask, &oldset);
9460                 ret = mask;
9461                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9462             }
9463 #else
9464             sigset_t set, oldset, *set_ptr;
9465             int how;
9466 
9467             if (arg2) {
9468                 switch (arg1) {
9469                 case TARGET_SIG_BLOCK:
9470                     how = SIG_BLOCK;
9471                     break;
9472                 case TARGET_SIG_UNBLOCK:
9473                     how = SIG_UNBLOCK;
9474                     break;
9475                 case TARGET_SIG_SETMASK:
9476                     how = SIG_SETMASK;
9477                     break;
9478                 default:
9479                     return -TARGET_EINVAL;
9480                 }
9481                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9482                     return -TARGET_EFAULT;
9483                 target_to_host_old_sigset(&set, p);
9484                 unlock_user(p, arg2, 0);
9485                 set_ptr = &set;
9486             } else {
9487                 how = 0;
9488                 set_ptr = NULL;
9489             }
9490             ret = do_sigprocmask(how, set_ptr, &oldset);
9491             if (!is_error(ret) && arg3) {
9492                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9493                     return -TARGET_EFAULT;
9494                 host_to_target_old_sigset(p, &oldset);
9495                 unlock_user(p, arg3, sizeof(target_sigset_t));
9496             }
9497 #endif
9498         }
9499         return ret;
9500 #endif
9501     case TARGET_NR_rt_sigprocmask:
9502         {
9503             int how = arg1;
9504             sigset_t set, oldset, *set_ptr;
9505 
9506             if (arg4 != sizeof(target_sigset_t)) {
9507                 return -TARGET_EINVAL;
9508             }
9509 
9510             if (arg2) {
9511                 switch(how) {
9512                 case TARGET_SIG_BLOCK:
9513                     how = SIG_BLOCK;
9514                     break;
9515                 case TARGET_SIG_UNBLOCK:
9516                     how = SIG_UNBLOCK;
9517                     break;
9518                 case TARGET_SIG_SETMASK:
9519                     how = SIG_SETMASK;
9520                     break;
9521                 default:
9522                     return -TARGET_EINVAL;
9523                 }
9524                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9525                     return -TARGET_EFAULT;
9526                 target_to_host_sigset(&set, p);
9527                 unlock_user(p, arg2, 0);
9528                 set_ptr = &set;
9529             } else {
9530                 how = 0;
9531                 set_ptr = NULL;
9532             }
9533             ret = do_sigprocmask(how, set_ptr, &oldset);
9534             if (!is_error(ret) && arg3) {
9535                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9536                     return -TARGET_EFAULT;
9537                 host_to_target_sigset(p, &oldset);
9538                 unlock_user(p, arg3, sizeof(target_sigset_t));
9539             }
9540         }
9541         return ret;
9542 #ifdef TARGET_NR_sigpending
9543     case TARGET_NR_sigpending:
9544         {
9545             sigset_t set;
9546             ret = get_errno(sigpending(&set));
9547             if (!is_error(ret)) {
9548                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9549                     return -TARGET_EFAULT;
9550                 host_to_target_old_sigset(p, &set);
9551                 unlock_user(p, arg1, sizeof(target_sigset_t));
9552             }
9553         }
9554         return ret;
9555 #endif
9556     case TARGET_NR_rt_sigpending:
9557         {
9558             sigset_t set;
9559 
9560             /* Yes, this check is >, not != like most. We follow the kernel's
9561              * logic and it does it like this because it implements
9562              * NR_sigpending through the same code path, and in that case
9563              * the old_sigset_t is smaller in size.
9564              */
9565             if (arg2 > sizeof(target_sigset_t)) {
9566                 return -TARGET_EINVAL;
9567             }
9568 
9569             ret = get_errno(sigpending(&set));
9570             if (!is_error(ret)) {
9571                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9572                     return -TARGET_EFAULT;
9573                 host_to_target_sigset(p, &set);
9574                 unlock_user(p, arg1, sizeof(target_sigset_t));
9575             }
9576         }
9577         return ret;
9578 #ifdef TARGET_NR_sigsuspend
9579     case TARGET_NR_sigsuspend:
9580         {
9581             TaskState *ts = cpu->opaque;
9582 #if defined(TARGET_ALPHA)
9583             abi_ulong mask = arg1;
9584             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9585 #else
9586             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9587                 return -TARGET_EFAULT;
9588             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9589             unlock_user(p, arg1, 0);
9590 #endif
9591             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9592                                                SIGSET_T_SIZE));
9593             if (ret != -QEMU_ERESTARTSYS) {
9594                 ts->in_sigsuspend = 1;
9595             }
9596         }
9597         return ret;
9598 #endif
9599     case TARGET_NR_rt_sigsuspend:
9600         {
9601             TaskState *ts = cpu->opaque;
9602 
9603             if (arg2 != sizeof(target_sigset_t)) {
9604                 return -TARGET_EINVAL;
9605             }
9606             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9607                 return -TARGET_EFAULT;
9608             target_to_host_sigset(&ts->sigsuspend_mask, p);
9609             unlock_user(p, arg1, 0);
9610             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9611                                                SIGSET_T_SIZE));
9612             if (ret != -QEMU_ERESTARTSYS) {
9613                 ts->in_sigsuspend = 1;
9614             }
9615         }
9616         return ret;
9617 #ifdef TARGET_NR_rt_sigtimedwait
9618     case TARGET_NR_rt_sigtimedwait:
9619         {
9620             sigset_t set;
9621             struct timespec uts, *puts;
9622             siginfo_t uinfo;
9623 
9624             if (arg4 != sizeof(target_sigset_t)) {
9625                 return -TARGET_EINVAL;
9626             }
9627 
9628             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9629                 return -TARGET_EFAULT;
9630             target_to_host_sigset(&set, p);
9631             unlock_user(p, arg1, 0);
9632             if (arg3) {
9633                 puts = &uts;
9634                 if (target_to_host_timespec(puts, arg3)) {
9635                     return -TARGET_EFAULT;
9636                 }
9637             } else {
9638                 puts = NULL;
9639             }
9640             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9641                                                  SIGSET_T_SIZE));
9642             if (!is_error(ret)) {
9643                 if (arg2) {
9644                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9645                                   0);
9646                     if (!p) {
9647                         return -TARGET_EFAULT;
9648                     }
9649                     host_to_target_siginfo(p, &uinfo);
9650                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9651                 }
9652                 ret = host_to_target_signal(ret);
9653             }
9654         }
9655         return ret;
9656 #endif
9657 #ifdef TARGET_NR_rt_sigtimedwait_time64
9658     case TARGET_NR_rt_sigtimedwait_time64:
9659         {
9660             sigset_t set;
9661             struct timespec uts, *puts;
9662             siginfo_t uinfo;
9663 
9664             if (arg4 != sizeof(target_sigset_t)) {
9665                 return -TARGET_EINVAL;
9666             }
9667 
9668             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9669             if (!p) {
9670                 return -TARGET_EFAULT;
9671             }
9672             target_to_host_sigset(&set, p);
9673             unlock_user(p, arg1, 0);
9674             if (arg3) {
9675                 puts = &uts;
9676                 if (target_to_host_timespec64(puts, arg3)) {
9677                     return -TARGET_EFAULT;
9678                 }
9679             } else {
9680                 puts = NULL;
9681             }
9682             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9683                                                  SIGSET_T_SIZE));
9684             if (!is_error(ret)) {
9685                 if (arg2) {
9686                     p = lock_user(VERIFY_WRITE, arg2,
9687                                   sizeof(target_siginfo_t), 0);
9688                     if (!p) {
9689                         return -TARGET_EFAULT;
9690                     }
9691                     host_to_target_siginfo(p, &uinfo);
9692                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9693                 }
9694                 ret = host_to_target_signal(ret);
9695             }
9696         }
9697         return ret;
9698 #endif
9699     case TARGET_NR_rt_sigqueueinfo:
9700         {
9701             siginfo_t uinfo;
9702 
9703             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9704             if (!p) {
9705                 return -TARGET_EFAULT;
9706             }
9707             target_to_host_siginfo(&uinfo, p);
9708             unlock_user(p, arg3, 0);
9709             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9710         }
9711         return ret;
9712     case TARGET_NR_rt_tgsigqueueinfo:
9713         {
9714             siginfo_t uinfo;
9715 
9716             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9717             if (!p) {
9718                 return -TARGET_EFAULT;
9719             }
9720             target_to_host_siginfo(&uinfo, p);
9721             unlock_user(p, arg4, 0);
9722             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9723         }
9724         return ret;
9725 #ifdef TARGET_NR_sigreturn
9726     case TARGET_NR_sigreturn:
9727         if (block_signals()) {
9728             return -QEMU_ERESTARTSYS;
9729         }
9730         return do_sigreturn(cpu_env);
9731 #endif
9732     case TARGET_NR_rt_sigreturn:
9733         if (block_signals()) {
9734             return -QEMU_ERESTARTSYS;
9735         }
9736         return do_rt_sigreturn(cpu_env);
9737     case TARGET_NR_sethostname:
9738         if (!(p = lock_user_string(arg1)))
9739             return -TARGET_EFAULT;
9740         ret = get_errno(sethostname(p, arg2));
9741         unlock_user(p, arg1, 0);
9742         return ret;
9743 #ifdef TARGET_NR_setrlimit
9744     case TARGET_NR_setrlimit:
9745         {
9746             int resource = target_to_host_resource(arg1);
9747             struct target_rlimit *target_rlim;
9748             struct rlimit rlim;
9749             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9750                 return -TARGET_EFAULT;
9751             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9752             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9753             unlock_user_struct(target_rlim, arg2, 0);
9754             /*
9755              * If we just passed through resource limit settings for memory then
9756              * they would also apply to QEMU's own allocations, and QEMU will
9757              * crash or hang or die if its allocations fail. Ideally we would
9758              * track the guest allocations in QEMU and apply the limits ourselves.
9759              * For now, just tell the guest the call succeeded but don't actually
9760              * limit anything.
9761              */
9762             if (resource != RLIMIT_AS &&
9763                 resource != RLIMIT_DATA &&
9764                 resource != RLIMIT_STACK) {
9765                 return get_errno(setrlimit(resource, &rlim));
9766             } else {
9767                 return 0;
9768             }
9769         }
9770 #endif
9771 #ifdef TARGET_NR_getrlimit
9772     case TARGET_NR_getrlimit:
9773         {
9774             int resource = target_to_host_resource(arg1);
9775             struct target_rlimit *target_rlim;
9776             struct rlimit rlim;
9777 
9778             ret = get_errno(getrlimit(resource, &rlim));
9779             if (!is_error(ret)) {
9780                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9781                     return -TARGET_EFAULT;
9782                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9783                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9784                 unlock_user_struct(target_rlim, arg2, 1);
9785             }
9786         }
9787         return ret;
9788 #endif
9789     case TARGET_NR_getrusage:
9790         {
9791             struct rusage rusage;
9792             ret = get_errno(getrusage(arg1, &rusage));
9793             if (!is_error(ret)) {
9794                 ret = host_to_target_rusage(arg2, &rusage);
9795             }
9796         }
9797         return ret;
9798 #if defined(TARGET_NR_gettimeofday)
9799     case TARGET_NR_gettimeofday:
9800         {
9801             struct timeval tv;
9802             struct timezone tz;
9803 
9804             ret = get_errno(gettimeofday(&tv, &tz));
9805             if (!is_error(ret)) {
9806                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9807                     return -TARGET_EFAULT;
9808                 }
9809                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9810                     return -TARGET_EFAULT;
9811                 }
9812             }
9813         }
9814         return ret;
9815 #endif
9816 #if defined(TARGET_NR_settimeofday)
9817     case TARGET_NR_settimeofday:
9818         {
9819             struct timeval tv, *ptv = NULL;
9820             struct timezone tz, *ptz = NULL;
9821 
9822             if (arg1) {
9823                 if (copy_from_user_timeval(&tv, arg1)) {
9824                     return -TARGET_EFAULT;
9825                 }
9826                 ptv = &tv;
9827             }
9828 
9829             if (arg2) {
9830                 if (copy_from_user_timezone(&tz, arg2)) {
9831                     return -TARGET_EFAULT;
9832                 }
9833                 ptz = &tz;
9834             }
9835 
9836             return get_errno(settimeofday(ptv, ptz));
9837         }
9838 #endif
9839 #if defined(TARGET_NR_select)
9840     case TARGET_NR_select:
9841 #if defined(TARGET_WANT_NI_OLD_SELECT)
9842         /* some architectures used to have old_select here
9843          * but now ENOSYS it.
9844          */
9845         ret = -TARGET_ENOSYS;
9846 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9847         ret = do_old_select(arg1);
9848 #else
9849         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9850 #endif
9851         return ret;
9852 #endif
9853 #ifdef TARGET_NR_pselect6
9854     case TARGET_NR_pselect6:
9855         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9856 #endif
9857 #ifdef TARGET_NR_pselect6_time64
9858     case TARGET_NR_pselect6_time64:
9859         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9860 #endif
9861 #ifdef TARGET_NR_symlink
9862     case TARGET_NR_symlink:
9863         {
9864             void *p2;
9865             p = lock_user_string(arg1);
9866             p2 = lock_user_string(arg2);
9867             if (!p || !p2)
9868                 ret = -TARGET_EFAULT;
9869             else
9870                 ret = get_errno(symlink(p, p2));
9871             unlock_user(p2, arg2, 0);
9872             unlock_user(p, arg1, 0);
9873         }
9874         return ret;
9875 #endif
9876 #if defined(TARGET_NR_symlinkat)
9877     case TARGET_NR_symlinkat:
9878         {
9879             void *p2;
9880             p  = lock_user_string(arg1);
9881             p2 = lock_user_string(arg3);
9882             if (!p || !p2)
9883                 ret = -TARGET_EFAULT;
9884             else
9885                 ret = get_errno(symlinkat(p, arg2, p2));
9886             unlock_user(p2, arg3, 0);
9887             unlock_user(p, arg1, 0);
9888         }
9889         return ret;
9890 #endif
9891 #ifdef TARGET_NR_readlink
9892     case TARGET_NR_readlink:
9893         {
9894             void *p2;
9895             p = lock_user_string(arg1);
9896             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9897             if (!p || !p2) {
9898                 ret = -TARGET_EFAULT;
9899             } else if (!arg3) {
9900                 /* Short circuit this for the magic exe check. */
9901                 ret = -TARGET_EINVAL;
9902             } else if (is_proc_myself((const char *)p, "exe")) {
9903                 char real[PATH_MAX], *temp;
9904                 temp = realpath(exec_path, real);
9905                 /* Return value is # of bytes that we wrote to the buffer. */
9906                 if (temp == NULL) {
9907                     ret = get_errno(-1);
9908                 } else {
9909                     /* Don't worry about sign mismatch as earlier mapping
9910                      * logic would have thrown a bad address error. */
9911                     ret = MIN(strlen(real), arg3);
9912                     /* We cannot NUL terminate the string. */
9913                     memcpy(p2, real, ret);
9914                 }
9915             } else {
9916                 ret = get_errno(readlink(path(p), p2, arg3));
9917             }
9918             unlock_user(p2, arg2, ret);
9919             unlock_user(p, arg1, 0);
9920         }
9921         return ret;
9922 #endif
9923 #if defined(TARGET_NR_readlinkat)
9924     case TARGET_NR_readlinkat:
9925         {
9926             void *p2;
9927             p  = lock_user_string(arg2);
9928             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9929             if (!p || !p2) {
9930                 ret = -TARGET_EFAULT;
9931             } else if (is_proc_myself((const char *)p, "exe")) {
9932                 char real[PATH_MAX], *temp;
9933                 temp = realpath(exec_path, real);
9934                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9935                 snprintf((char *)p2, arg4, "%s", real);
9936             } else {
9937                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9938             }
9939             unlock_user(p2, arg3, ret);
9940             unlock_user(p, arg2, 0);
9941         }
9942         return ret;
9943 #endif
9944 #ifdef TARGET_NR_swapon
9945     case TARGET_NR_swapon:
9946         if (!(p = lock_user_string(arg1)))
9947             return -TARGET_EFAULT;
9948         ret = get_errno(swapon(p, arg2));
9949         unlock_user(p, arg1, 0);
9950         return ret;
9951 #endif
9952     case TARGET_NR_reboot:
9953         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9954            /* arg4 must be ignored in all other cases */
9955            p = lock_user_string(arg4);
9956            if (!p) {
9957                return -TARGET_EFAULT;
9958            }
9959            ret = get_errno(reboot(arg1, arg2, arg3, p));
9960            unlock_user(p, arg4, 0);
9961         } else {
9962            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9963         }
9964         return ret;
9965 #ifdef TARGET_NR_mmap
9966     case TARGET_NR_mmap:
9967 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9968     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9969     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9970     || defined(TARGET_S390X)
9971         {
9972             abi_ulong *v;
9973             abi_ulong v1, v2, v3, v4, v5, v6;
9974             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9975                 return -TARGET_EFAULT;
9976             v1 = tswapal(v[0]);
9977             v2 = tswapal(v[1]);
9978             v3 = tswapal(v[2]);
9979             v4 = tswapal(v[3]);
9980             v5 = tswapal(v[4]);
9981             v6 = tswapal(v[5]);
9982             unlock_user(v, arg1, 0);
9983             ret = get_errno(target_mmap(v1, v2, v3,
9984                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9985                                         v5, v6));
9986         }
9987 #else
9988         /* mmap pointers are always untagged */
9989         ret = get_errno(target_mmap(arg1, arg2, arg3,
9990                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9991                                     arg5,
9992                                     arg6));
9993 #endif
9994         return ret;
9995 #endif
9996 #ifdef TARGET_NR_mmap2
9997     case TARGET_NR_mmap2:
9998 #ifndef MMAP_SHIFT
9999 #define MMAP_SHIFT 12
10000 #endif
10001         ret = target_mmap(arg1, arg2, arg3,
10002                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10003                           arg5, arg6 << MMAP_SHIFT);
10004         return get_errno(ret);
10005 #endif
10006     case TARGET_NR_munmap:
10007         arg1 = cpu_untagged_addr(cpu, arg1);
10008         return get_errno(target_munmap(arg1, arg2));
10009     case TARGET_NR_mprotect:
10010         arg1 = cpu_untagged_addr(cpu, arg1);
10011         {
10012             TaskState *ts = cpu->opaque;
10013             /* Special hack to detect libc making the stack executable.  */
10014             if ((arg3 & PROT_GROWSDOWN)
10015                 && arg1 >= ts->info->stack_limit
10016                 && arg1 <= ts->info->start_stack) {
10017                 arg3 &= ~PROT_GROWSDOWN;
10018                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10019                 arg1 = ts->info->stack_limit;
10020             }
10021         }
10022         return get_errno(target_mprotect(arg1, arg2, arg3));
10023 #ifdef TARGET_NR_mremap
10024     case TARGET_NR_mremap:
10025         arg1 = cpu_untagged_addr(cpu, arg1);
10026         /* mremap new_addr (arg5) is always untagged */
10027         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10028 #endif
10029         /* ??? msync/mlock/munlock are broken for softmmu.  */
10030 #ifdef TARGET_NR_msync
10031     case TARGET_NR_msync:
10032         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10033 #endif
10034 #ifdef TARGET_NR_mlock
10035     case TARGET_NR_mlock:
10036         return get_errno(mlock(g2h(cpu, arg1), arg2));
10037 #endif
10038 #ifdef TARGET_NR_munlock
10039     case TARGET_NR_munlock:
10040         return get_errno(munlock(g2h(cpu, arg1), arg2));
10041 #endif
10042 #ifdef TARGET_NR_mlockall
10043     case TARGET_NR_mlockall:
10044         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10045 #endif
10046 #ifdef TARGET_NR_munlockall
10047     case TARGET_NR_munlockall:
10048         return get_errno(munlockall());
10049 #endif
10050 #ifdef TARGET_NR_truncate
10051     case TARGET_NR_truncate:
10052         if (!(p = lock_user_string(arg1)))
10053             return -TARGET_EFAULT;
10054         ret = get_errno(truncate(p, arg2));
10055         unlock_user(p, arg1, 0);
10056         return ret;
10057 #endif
10058 #ifdef TARGET_NR_ftruncate
10059     case TARGET_NR_ftruncate:
10060         return get_errno(ftruncate(arg1, arg2));
10061 #endif
10062     case TARGET_NR_fchmod:
10063         return get_errno(fchmod(arg1, arg2));
10064 #if defined(TARGET_NR_fchmodat)
10065     case TARGET_NR_fchmodat:
10066         if (!(p = lock_user_string(arg2)))
10067             return -TARGET_EFAULT;
10068         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10069         unlock_user(p, arg2, 0);
10070         return ret;
10071 #endif
10072     case TARGET_NR_getpriority:
10073         /* Note that negative values are valid for getpriority, so we must
10074            differentiate based on errno settings.  */
10075         errno = 0;
10076         ret = getpriority(arg1, arg2);
10077         if (ret == -1 && errno != 0) {
10078             return -host_to_target_errno(errno);
10079         }
10080 #ifdef TARGET_ALPHA
10081         /* Return value is the unbiased priority.  Signal no error.  */
10082         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
10083 #else
10084         /* Return value is a biased priority to avoid negative numbers.  */
10085         ret = 20 - ret;
10086 #endif
10087         return ret;
10088     case TARGET_NR_setpriority:
10089         return get_errno(setpriority(arg1, arg2, arg3));
10090 #ifdef TARGET_NR_statfs
10091     case TARGET_NR_statfs:
10092         if (!(p = lock_user_string(arg1))) {
10093             return -TARGET_EFAULT;
10094         }
10095         ret = get_errno(statfs(path(p), &stfs));
10096         unlock_user(p, arg1, 0);
10097     convert_statfs:
10098         if (!is_error(ret)) {
10099             struct target_statfs *target_stfs;
10100 
10101             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10102                 return -TARGET_EFAULT;
10103             __put_user(stfs.f_type, &target_stfs->f_type);
10104             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10105             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10106             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10107             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10108             __put_user(stfs.f_files, &target_stfs->f_files);
10109             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10110             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10111             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10112             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10113             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10114 #ifdef _STATFS_F_FLAGS
10115             __put_user(stfs.f_flags, &target_stfs->f_flags);
10116 #else
10117             __put_user(0, &target_stfs->f_flags);
10118 #endif
10119             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10120             unlock_user_struct(target_stfs, arg2, 1);
10121         }
10122         return ret;
10123 #endif
10124 #ifdef TARGET_NR_fstatfs
10125     case TARGET_NR_fstatfs:
10126         ret = get_errno(fstatfs(arg1, &stfs));
10127         goto convert_statfs;
10128 #endif
10129 #ifdef TARGET_NR_statfs64
10130     case TARGET_NR_statfs64:
10131         if (!(p = lock_user_string(arg1))) {
10132             return -TARGET_EFAULT;
10133         }
10134         ret = get_errno(statfs(path(p), &stfs));
10135         unlock_user(p, arg1, 0);
10136     convert_statfs64:
10137         if (!is_error(ret)) {
10138             struct target_statfs64 *target_stfs;
10139 
10140             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10141                 return -TARGET_EFAULT;
10142             __put_user(stfs.f_type, &target_stfs->f_type);
10143             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10144             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10145             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10146             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10147             __put_user(stfs.f_files, &target_stfs->f_files);
10148             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10149             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10150             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10151             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10152             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10153 #ifdef _STATFS_F_FLAGS
10154             __put_user(stfs.f_flags, &target_stfs->f_flags);
10155 #else
10156             __put_user(0, &target_stfs->f_flags);
10157 #endif
10158             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10159             unlock_user_struct(target_stfs, arg3, 1);
10160         }
10161         return ret;
10162     case TARGET_NR_fstatfs64:
10163         ret = get_errno(fstatfs(arg1, &stfs));
10164         goto convert_statfs64;
10165 #endif
10166 #ifdef TARGET_NR_socketcall
10167     case TARGET_NR_socketcall:
10168         return do_socketcall(arg1, arg2);
10169 #endif
10170 #ifdef TARGET_NR_accept
10171     case TARGET_NR_accept:
10172         return do_accept4(arg1, arg2, arg3, 0);
10173 #endif
10174 #ifdef TARGET_NR_accept4
10175     case TARGET_NR_accept4:
10176         return do_accept4(arg1, arg2, arg3, arg4);
10177 #endif
10178 #ifdef TARGET_NR_bind
10179     case TARGET_NR_bind:
10180         return do_bind(arg1, arg2, arg3);
10181 #endif
10182 #ifdef TARGET_NR_connect
10183     case TARGET_NR_connect:
10184         return do_connect(arg1, arg2, arg3);
10185 #endif
10186 #ifdef TARGET_NR_getpeername
10187     case TARGET_NR_getpeername:
10188         return do_getpeername(arg1, arg2, arg3);
10189 #endif
10190 #ifdef TARGET_NR_getsockname
10191     case TARGET_NR_getsockname:
10192         return do_getsockname(arg1, arg2, arg3);
10193 #endif
10194 #ifdef TARGET_NR_getsockopt
10195     case TARGET_NR_getsockopt:
10196         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10197 #endif
10198 #ifdef TARGET_NR_listen
10199     case TARGET_NR_listen:
10200         return get_errno(listen(arg1, arg2));
10201 #endif
10202 #ifdef TARGET_NR_recv
10203     case TARGET_NR_recv:
10204         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10205 #endif
10206 #ifdef TARGET_NR_recvfrom
10207     case TARGET_NR_recvfrom:
10208         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10209 #endif
10210 #ifdef TARGET_NR_recvmsg
10211     case TARGET_NR_recvmsg:
10212         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10213 #endif
10214 #ifdef TARGET_NR_send
10215     case TARGET_NR_send:
10216         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10217 #endif
10218 #ifdef TARGET_NR_sendmsg
10219     case TARGET_NR_sendmsg:
10220         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10221 #endif
10222 #ifdef TARGET_NR_sendmmsg
10223     case TARGET_NR_sendmmsg:
10224         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10225 #endif
10226 #ifdef TARGET_NR_recvmmsg
10227     case TARGET_NR_recvmmsg:
10228         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10229 #endif
10230 #ifdef TARGET_NR_sendto
10231     case TARGET_NR_sendto:
10232         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10233 #endif
10234 #ifdef TARGET_NR_shutdown
10235     case TARGET_NR_shutdown:
10236         return get_errno(shutdown(arg1, arg2));
10237 #endif
10238 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10239     case TARGET_NR_getrandom:
10240         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10241         if (!p) {
10242             return -TARGET_EFAULT;
10243         }
10244         ret = get_errno(getrandom(p, arg2, arg3));
10245         unlock_user(p, arg1, ret);
10246         return ret;
10247 #endif
10248 #ifdef TARGET_NR_socket
10249     case TARGET_NR_socket:
10250         return do_socket(arg1, arg2, arg3);
10251 #endif
10252 #ifdef TARGET_NR_socketpair
10253     case TARGET_NR_socketpair:
10254         return do_socketpair(arg1, arg2, arg3, arg4);
10255 #endif
10256 #ifdef TARGET_NR_setsockopt
10257     case TARGET_NR_setsockopt:
10258         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10259 #endif
10260 #if defined(TARGET_NR_syslog)
10261     case TARGET_NR_syslog:
10262         {
10263             int len = arg2;
10264 
10265             switch (arg1) {
10266             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10267             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10268             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10269             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10270             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10271             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10272             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10273             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10274                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10275             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10276             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10277             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10278                 {
10279                     if (len < 0) {
10280                         return -TARGET_EINVAL;
10281                     }
10282                     if (len == 0) {
10283                         return 0;
10284                     }
10285                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10286                     if (!p) {
10287                         return -TARGET_EFAULT;
10288                     }
10289                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10290                     unlock_user(p, arg2, arg3);
10291                 }
10292                 return ret;
10293             default:
10294                 return -TARGET_EINVAL;
10295             }
10296         }
10297         break;
10298 #endif
10299     case TARGET_NR_setitimer:
10300         {
10301             struct itimerval value, ovalue, *pvalue;
10302 
10303             if (arg2) {
10304                 pvalue = &value;
10305                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10306                     || copy_from_user_timeval(&pvalue->it_value,
10307                                               arg2 + sizeof(struct target_timeval)))
10308                     return -TARGET_EFAULT;
10309             } else {
10310                 pvalue = NULL;
10311             }
10312             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10313             if (!is_error(ret) && arg3) {
10314                 if (copy_to_user_timeval(arg3,
10315                                          &ovalue.it_interval)
10316                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10317                                             &ovalue.it_value))
10318                     return -TARGET_EFAULT;
10319             }
10320         }
10321         return ret;
10322     case TARGET_NR_getitimer:
10323         {
10324             struct itimerval value;
10325 
10326             ret = get_errno(getitimer(arg1, &value));
10327             if (!is_error(ret) && arg2) {
10328                 if (copy_to_user_timeval(arg2,
10329                                          &value.it_interval)
10330                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10331                                             &value.it_value))
10332                     return -TARGET_EFAULT;
10333             }
10334         }
10335         return ret;
10336 #ifdef TARGET_NR_stat
10337     case TARGET_NR_stat:
10338         if (!(p = lock_user_string(arg1))) {
10339             return -TARGET_EFAULT;
10340         }
10341         ret = get_errno(stat(path(p), &st));
10342         unlock_user(p, arg1, 0);
10343         goto do_stat;
10344 #endif
10345 #ifdef TARGET_NR_lstat
10346     case TARGET_NR_lstat:
10347         if (!(p = lock_user_string(arg1))) {
10348             return -TARGET_EFAULT;
10349         }
10350         ret = get_errno(lstat(path(p), &st));
10351         unlock_user(p, arg1, 0);
10352         goto do_stat;
10353 #endif
10354 #ifdef TARGET_NR_fstat
10355     case TARGET_NR_fstat:
10356         {
10357             ret = get_errno(fstat(arg1, &st));
10358 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10359         do_stat:
10360 #endif
10361             if (!is_error(ret)) {
10362                 struct target_stat *target_st;
10363 
10364                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10365                     return -TARGET_EFAULT;
10366                 memset(target_st, 0, sizeof(*target_st));
10367                 __put_user(st.st_dev, &target_st->st_dev);
10368                 __put_user(st.st_ino, &target_st->st_ino);
10369                 __put_user(st.st_mode, &target_st->st_mode);
10370                 __put_user(st.st_uid, &target_st->st_uid);
10371                 __put_user(st.st_gid, &target_st->st_gid);
10372                 __put_user(st.st_nlink, &target_st->st_nlink);
10373                 __put_user(st.st_rdev, &target_st->st_rdev);
10374                 __put_user(st.st_size, &target_st->st_size);
10375                 __put_user(st.st_blksize, &target_st->st_blksize);
10376                 __put_user(st.st_blocks, &target_st->st_blocks);
10377                 __put_user(st.st_atime, &target_st->target_st_atime);
10378                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10379                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10380 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10381                 __put_user(st.st_atim.tv_nsec,
10382                            &target_st->target_st_atime_nsec);
10383                 __put_user(st.st_mtim.tv_nsec,
10384                            &target_st->target_st_mtime_nsec);
10385                 __put_user(st.st_ctim.tv_nsec,
10386                            &target_st->target_st_ctime_nsec);
10387 #endif
10388                 unlock_user_struct(target_st, arg2, 1);
10389             }
10390         }
10391         return ret;
10392 #endif
10393     case TARGET_NR_vhangup:
10394         return get_errno(vhangup());
10395 #ifdef TARGET_NR_syscall
10396     case TARGET_NR_syscall:
10397         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10398                           arg6, arg7, arg8, 0);
10399 #endif
10400 #if defined(TARGET_NR_wait4)
10401     case TARGET_NR_wait4:
10402         {
10403             int status;
10404             abi_long status_ptr = arg2;
10405             struct rusage rusage, *rusage_ptr;
10406             abi_ulong target_rusage = arg4;
10407             abi_long rusage_err;
10408             if (target_rusage)
10409                 rusage_ptr = &rusage;
10410             else
10411                 rusage_ptr = NULL;
10412             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10413             if (!is_error(ret)) {
10414                 if (status_ptr && ret) {
10415                     status = host_to_target_waitstatus(status);
10416                     if (put_user_s32(status, status_ptr))
10417                         return -TARGET_EFAULT;
10418                 }
10419                 if (target_rusage) {
10420                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10421                     if (rusage_err) {
10422                         ret = rusage_err;
10423                     }
10424                 }
10425             }
10426         }
10427         return ret;
10428 #endif
10429 #ifdef TARGET_NR_swapoff
10430     case TARGET_NR_swapoff:
10431         if (!(p = lock_user_string(arg1)))
10432             return -TARGET_EFAULT;
10433         ret = get_errno(swapoff(p));
10434         unlock_user(p, arg1, 0);
10435         return ret;
10436 #endif
10437     case TARGET_NR_sysinfo:
10438         {
10439             struct target_sysinfo *target_value;
10440             struct sysinfo value;
10441             ret = get_errno(sysinfo(&value));
10442             if (!is_error(ret) && arg1)
10443             {
10444                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10445                     return -TARGET_EFAULT;
10446                 __put_user(value.uptime, &target_value->uptime);
10447                 __put_user(value.loads[0], &target_value->loads[0]);
10448                 __put_user(value.loads[1], &target_value->loads[1]);
10449                 __put_user(value.loads[2], &target_value->loads[2]);
10450                 __put_user(value.totalram, &target_value->totalram);
10451                 __put_user(value.freeram, &target_value->freeram);
10452                 __put_user(value.sharedram, &target_value->sharedram);
10453                 __put_user(value.bufferram, &target_value->bufferram);
10454                 __put_user(value.totalswap, &target_value->totalswap);
10455                 __put_user(value.freeswap, &target_value->freeswap);
10456                 __put_user(value.procs, &target_value->procs);
10457                 __put_user(value.totalhigh, &target_value->totalhigh);
10458                 __put_user(value.freehigh, &target_value->freehigh);
10459                 __put_user(value.mem_unit, &target_value->mem_unit);
10460                 unlock_user_struct(target_value, arg1, 1);
10461             }
10462         }
10463         return ret;
10464 #ifdef TARGET_NR_ipc
10465     case TARGET_NR_ipc:
10466         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10467 #endif
10468 #ifdef TARGET_NR_semget
10469     case TARGET_NR_semget:
10470         return get_errno(semget(arg1, arg2, arg3));
10471 #endif
10472 #ifdef TARGET_NR_semop
10473     case TARGET_NR_semop:
10474         return do_semtimedop(arg1, arg2, arg3, 0, false);
10475 #endif
10476 #ifdef TARGET_NR_semtimedop
10477     case TARGET_NR_semtimedop:
10478         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10479 #endif
10480 #ifdef TARGET_NR_semtimedop_time64
10481     case TARGET_NR_semtimedop_time64:
10482         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10483 #endif
10484 #ifdef TARGET_NR_semctl
10485     case TARGET_NR_semctl:
10486         return do_semctl(arg1, arg2, arg3, arg4);
10487 #endif
10488 #ifdef TARGET_NR_msgctl
10489     case TARGET_NR_msgctl:
10490         return do_msgctl(arg1, arg2, arg3);
10491 #endif
10492 #ifdef TARGET_NR_msgget
10493     case TARGET_NR_msgget:
10494         return get_errno(msgget(arg1, arg2));
10495 #endif
10496 #ifdef TARGET_NR_msgrcv
10497     case TARGET_NR_msgrcv:
10498         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10499 #endif
10500 #ifdef TARGET_NR_msgsnd
10501     case TARGET_NR_msgsnd:
10502         return do_msgsnd(arg1, arg2, arg3, arg4);
10503 #endif
10504 #ifdef TARGET_NR_shmget
10505     case TARGET_NR_shmget:
10506         return get_errno(shmget(arg1, arg2, arg3));
10507 #endif
10508 #ifdef TARGET_NR_shmctl
10509     case TARGET_NR_shmctl:
10510         return do_shmctl(arg1, arg2, arg3);
10511 #endif
10512 #ifdef TARGET_NR_shmat
10513     case TARGET_NR_shmat:
10514         return do_shmat(cpu_env, arg1, arg2, arg3);
10515 #endif
10516 #ifdef TARGET_NR_shmdt
10517     case TARGET_NR_shmdt:
10518         return do_shmdt(arg1);
10519 #endif
10520     case TARGET_NR_fsync:
10521         return get_errno(fsync(arg1));
10522     case TARGET_NR_clone:
10523         /* Linux manages to have three different orderings for its
10524          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10525          * match the kernel's CONFIG_CLONE_* settings.
10526          * Microblaze is further special in that it uses a sixth
10527          * implicit argument to clone for the TLS pointer.
10528          */
10529 #if defined(TARGET_MICROBLAZE)
10530         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10531 #elif defined(TARGET_CLONE_BACKWARDS)
10532         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10533 #elif defined(TARGET_CLONE_BACKWARDS2)
10534         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10535 #else
10536         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10537 #endif
10538         return ret;
10539 #ifdef __NR_exit_group
10540         /* new thread calls */
10541     case TARGET_NR_exit_group:
10542         preexit_cleanup(cpu_env, arg1);
10543         return get_errno(exit_group(arg1));
10544 #endif
10545     case TARGET_NR_setdomainname:
10546         if (!(p = lock_user_string(arg1)))
10547             return -TARGET_EFAULT;
10548         ret = get_errno(setdomainname(p, arg2));
10549         unlock_user(p, arg1, 0);
10550         return ret;
10551     case TARGET_NR_uname:
10552         /* no need to transcode because we use the linux syscall */
10553         {
10554             struct new_utsname * buf;
10555 
10556             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10557                 return -TARGET_EFAULT;
10558             ret = get_errno(sys_uname(buf));
10559             if (!is_error(ret)) {
10560                 /* Overwrite the native machine name with whatever is being
10561                    emulated. */
10562                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10563                           sizeof(buf->machine));
10564                 /* Allow the user to override the reported release.  */
10565                 if (qemu_uname_release && *qemu_uname_release) {
10566                     g_strlcpy(buf->release, qemu_uname_release,
10567                               sizeof(buf->release));
10568                 }
10569             }
10570             unlock_user_struct(buf, arg1, 1);
10571         }
10572         return ret;
10573 #ifdef TARGET_I386
10574     case TARGET_NR_modify_ldt:
10575         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10576 #if !defined(TARGET_X86_64)
10577     case TARGET_NR_vm86:
10578         return do_vm86(cpu_env, arg1, arg2);
10579 #endif
10580 #endif
10581 #if defined(TARGET_NR_adjtimex)
10582     case TARGET_NR_adjtimex:
10583         {
10584             struct timex host_buf;
10585 
10586             if (target_to_host_timex(&host_buf, arg1) != 0) {
10587                 return -TARGET_EFAULT;
10588             }
10589             ret = get_errno(adjtimex(&host_buf));
10590             if (!is_error(ret)) {
10591                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10592                     return -TARGET_EFAULT;
10593                 }
10594             }
10595         }
10596         return ret;
10597 #endif
10598 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10599     case TARGET_NR_clock_adjtime:
10600         {
10601             struct timex htx, *phtx = &htx;
10602 
10603             if (target_to_host_timex(phtx, arg2) != 0) {
10604                 return -TARGET_EFAULT;
10605             }
10606             ret = get_errno(clock_adjtime(arg1, phtx));
10607             if (!is_error(ret) && phtx) {
10608                 if (host_to_target_timex(arg2, phtx) != 0) {
10609                     return -TARGET_EFAULT;
10610                 }
10611             }
10612         }
10613         return ret;
10614 #endif
10615 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10616     case TARGET_NR_clock_adjtime64:
10617         {
10618             struct timex htx;
10619 
10620             if (target_to_host_timex64(&htx, arg2) != 0) {
10621                 return -TARGET_EFAULT;
10622             }
10623             ret = get_errno(clock_adjtime(arg1, &htx));
10624             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10625                     return -TARGET_EFAULT;
10626             }
10627         }
10628         return ret;
10629 #endif
10630     case TARGET_NR_getpgid:
10631         return get_errno(getpgid(arg1));
10632     case TARGET_NR_fchdir:
10633         return get_errno(fchdir(arg1));
10634     case TARGET_NR_personality:
10635         return get_errno(personality(arg1));
10636 #ifdef TARGET_NR__llseek /* Not on alpha */
10637     case TARGET_NR__llseek:
10638         {
10639             int64_t res;
10640 #if !defined(__NR_llseek)
10641             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10642             if (res == -1) {
10643                 ret = get_errno(res);
10644             } else {
10645                 ret = 0;
10646             }
10647 #else
10648             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10649 #endif
10650             if ((ret == 0) && put_user_s64(res, arg4)) {
10651                 return -TARGET_EFAULT;
10652             }
10653         }
10654         return ret;
10655 #endif
10656 #ifdef TARGET_NR_getdents
10657     case TARGET_NR_getdents:
10658         return do_getdents(arg1, arg2, arg3);
10659 #endif /* TARGET_NR_getdents */
10660 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10661     case TARGET_NR_getdents64:
10662         return do_getdents64(arg1, arg2, arg3);
10663 #endif /* TARGET_NR_getdents64 */
10664 #if defined(TARGET_NR__newselect)
10665     case TARGET_NR__newselect:
10666         return do_select(arg1, arg2, arg3, arg4, arg5);
10667 #endif
10668 #ifdef TARGET_NR_poll
10669     case TARGET_NR_poll:
10670         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10671 #endif
10672 #ifdef TARGET_NR_ppoll
10673     case TARGET_NR_ppoll:
10674         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10675 #endif
10676 #ifdef TARGET_NR_ppoll_time64
10677     case TARGET_NR_ppoll_time64:
10678         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10679 #endif
10680     case TARGET_NR_flock:
10681         /* NOTE: the flock constant seems to be the same for every
10682            Linux platform */
10683         return get_errno(safe_flock(arg1, arg2));
10684     case TARGET_NR_readv:
10685         {
10686             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10687             if (vec != NULL) {
10688                 ret = get_errno(safe_readv(arg1, vec, arg3));
10689                 unlock_iovec(vec, arg2, arg3, 1);
10690             } else {
10691                 ret = -host_to_target_errno(errno);
10692             }
10693         }
10694         return ret;
10695     case TARGET_NR_writev:
10696         {
10697             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10698             if (vec != NULL) {
10699                 ret = get_errno(safe_writev(arg1, vec, arg3));
10700                 unlock_iovec(vec, arg2, arg3, 0);
10701             } else {
10702                 ret = -host_to_target_errno(errno);
10703             }
10704         }
10705         return ret;
10706 #if defined(TARGET_NR_preadv)
10707     case TARGET_NR_preadv:
10708         {
10709             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10710             if (vec != NULL) {
10711                 unsigned long low, high;
10712 
10713                 target_to_host_low_high(arg4, arg5, &low, &high);
10714                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10715                 unlock_iovec(vec, arg2, arg3, 1);
10716             } else {
10717                 ret = -host_to_target_errno(errno);
10718            }
10719         }
10720         return ret;
10721 #endif
10722 #if defined(TARGET_NR_pwritev)
10723     case TARGET_NR_pwritev:
10724         {
10725             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10726             if (vec != NULL) {
10727                 unsigned long low, high;
10728 
10729                 target_to_host_low_high(arg4, arg5, &low, &high);
10730                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10731                 unlock_iovec(vec, arg2, arg3, 0);
10732             } else {
10733                 ret = -host_to_target_errno(errno);
10734            }
10735         }
10736         return ret;
10737 #endif
10738     case TARGET_NR_getsid:
10739         return get_errno(getsid(arg1));
10740 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10741     case TARGET_NR_fdatasync:
10742         return get_errno(fdatasync(arg1));
10743 #endif
10744     case TARGET_NR_sched_getaffinity:
10745         {
10746             unsigned int mask_size;
10747             unsigned long *mask;
10748 
10749             /*
10750              * sched_getaffinity needs multiples of ulong, so need to take
10751              * care of mismatches between target ulong and host ulong sizes.
10752              */
10753             if (arg2 & (sizeof(abi_ulong) - 1)) {
10754                 return -TARGET_EINVAL;
10755             }
10756             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10757 
10758             mask = alloca(mask_size);
10759             memset(mask, 0, mask_size);
10760             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10761 
10762             if (!is_error(ret)) {
10763                 if (ret > arg2) {
10764                     /* More data returned than the caller's buffer will fit.
10765                      * This only happens if sizeof(abi_long) < sizeof(long)
10766                      * and the caller passed us a buffer holding an odd number
10767                      * of abi_longs. If the host kernel is actually using the
10768                      * extra 4 bytes then fail EINVAL; otherwise we can just
10769                      * ignore them and only copy the interesting part.
10770                      */
10771                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10772                     if (numcpus > arg2 * 8) {
10773                         return -TARGET_EINVAL;
10774                     }
10775                     ret = arg2;
10776                 }
10777 
10778                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10779                     return -TARGET_EFAULT;
10780                 }
10781             }
10782         }
10783         return ret;
10784     case TARGET_NR_sched_setaffinity:
10785         {
10786             unsigned int mask_size;
10787             unsigned long *mask;
10788 
10789             /*
10790              * sched_setaffinity needs multiples of ulong, so need to take
10791              * care of mismatches between target ulong and host ulong sizes.
10792              */
10793             if (arg2 & (sizeof(abi_ulong) - 1)) {
10794                 return -TARGET_EINVAL;
10795             }
10796             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10797             mask = alloca(mask_size);
10798 
10799             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10800             if (ret) {
10801                 return ret;
10802             }
10803 
10804             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10805         }
10806     case TARGET_NR_getcpu:
10807         {
10808             unsigned cpu, node;
10809             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10810                                        arg2 ? &node : NULL,
10811                                        NULL));
10812             if (is_error(ret)) {
10813                 return ret;
10814             }
10815             if (arg1 && put_user_u32(cpu, arg1)) {
10816                 return -TARGET_EFAULT;
10817             }
10818             if (arg2 && put_user_u32(node, arg2)) {
10819                 return -TARGET_EFAULT;
10820             }
10821         }
10822         return ret;
10823     case TARGET_NR_sched_setparam:
10824         {
10825             struct target_sched_param *target_schp;
10826             struct sched_param schp;
10827 
10828             if (arg2 == 0) {
10829                 return -TARGET_EINVAL;
10830             }
10831             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10832                 return -TARGET_EFAULT;
10833             }
10834             schp.sched_priority = tswap32(target_schp->sched_priority);
10835             unlock_user_struct(target_schp, arg2, 0);
10836             return get_errno(sys_sched_setparam(arg1, &schp));
10837         }
10838     case TARGET_NR_sched_getparam:
10839         {
10840             struct target_sched_param *target_schp;
10841             struct sched_param schp;
10842 
10843             if (arg2 == 0) {
10844                 return -TARGET_EINVAL;
10845             }
10846             ret = get_errno(sys_sched_getparam(arg1, &schp));
10847             if (!is_error(ret)) {
10848                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10849                     return -TARGET_EFAULT;
10850                 }
10851                 target_schp->sched_priority = tswap32(schp.sched_priority);
10852                 unlock_user_struct(target_schp, arg2, 1);
10853             }
10854         }
10855         return ret;
10856     case TARGET_NR_sched_setscheduler:
10857         {
10858             struct target_sched_param *target_schp;
10859             struct sched_param schp;
10860             if (arg3 == 0) {
10861                 return -TARGET_EINVAL;
10862             }
10863             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10864                 return -TARGET_EFAULT;
10865             }
10866             schp.sched_priority = tswap32(target_schp->sched_priority);
10867             unlock_user_struct(target_schp, arg3, 0);
10868             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10869         }
10870     case TARGET_NR_sched_getscheduler:
10871         return get_errno(sys_sched_getscheduler(arg1));
10872     case TARGET_NR_sched_getattr:
10873         {
10874             struct target_sched_attr *target_scha;
10875             struct sched_attr scha;
10876             if (arg2 == 0) {
10877                 return -TARGET_EINVAL;
10878             }
10879             if (arg3 > sizeof(scha)) {
10880                 arg3 = sizeof(scha);
10881             }
10882             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10883             if (!is_error(ret)) {
10884                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10885                 if (!target_scha) {
10886                     return -TARGET_EFAULT;
10887                 }
10888                 target_scha->size = tswap32(scha.size);
10889                 target_scha->sched_policy = tswap32(scha.sched_policy);
10890                 target_scha->sched_flags = tswap64(scha.sched_flags);
10891                 target_scha->sched_nice = tswap32(scha.sched_nice);
10892                 target_scha->sched_priority = tswap32(scha.sched_priority);
10893                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10894                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10895                 target_scha->sched_period = tswap64(scha.sched_period);
10896                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10897                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10898                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10899                 }
10900                 unlock_user(target_scha, arg2, arg3);
10901             }
10902             return ret;
10903         }
10904     case TARGET_NR_sched_setattr:
10905         {
10906             struct target_sched_attr *target_scha;
10907             struct sched_attr scha;
10908             uint32_t size;
10909             int zeroed;
10910             if (arg2 == 0) {
10911                 return -TARGET_EINVAL;
10912             }
10913             if (get_user_u32(size, arg2)) {
10914                 return -TARGET_EFAULT;
10915             }
10916             if (!size) {
10917                 size = offsetof(struct target_sched_attr, sched_util_min);
10918             }
10919             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10920                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10921                     return -TARGET_EFAULT;
10922                 }
10923                 return -TARGET_E2BIG;
10924             }
10925 
10926             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
10927             if (zeroed < 0) {
10928                 return zeroed;
10929             } else if (zeroed == 0) {
10930                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10931                     return -TARGET_EFAULT;
10932                 }
10933                 return -TARGET_E2BIG;
10934             }
10935             if (size > sizeof(struct target_sched_attr)) {
10936                 size = sizeof(struct target_sched_attr);
10937             }
10938 
10939             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
10940             if (!target_scha) {
10941                 return -TARGET_EFAULT;
10942             }
10943             scha.size = size;
10944             scha.sched_policy = tswap32(target_scha->sched_policy);
10945             scha.sched_flags = tswap64(target_scha->sched_flags);
10946             scha.sched_nice = tswap32(target_scha->sched_nice);
10947             scha.sched_priority = tswap32(target_scha->sched_priority);
10948             scha.sched_runtime = tswap64(target_scha->sched_runtime);
10949             scha.sched_deadline = tswap64(target_scha->sched_deadline);
10950             scha.sched_period = tswap64(target_scha->sched_period);
10951             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
10952                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
10953                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
10954             }
10955             unlock_user(target_scha, arg2, 0);
10956             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
10957         }
10958     case TARGET_NR_sched_yield:
10959         return get_errno(sched_yield());
10960     case TARGET_NR_sched_get_priority_max:
10961         return get_errno(sched_get_priority_max(arg1));
10962     case TARGET_NR_sched_get_priority_min:
10963         return get_errno(sched_get_priority_min(arg1));
10964 #ifdef TARGET_NR_sched_rr_get_interval
10965     case TARGET_NR_sched_rr_get_interval:
10966         {
10967             struct timespec ts;
10968             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10969             if (!is_error(ret)) {
10970                 ret = host_to_target_timespec(arg2, &ts);
10971             }
10972         }
10973         return ret;
10974 #endif
10975 #ifdef TARGET_NR_sched_rr_get_interval_time64
10976     case TARGET_NR_sched_rr_get_interval_time64:
10977         {
10978             struct timespec ts;
10979             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10980             if (!is_error(ret)) {
10981                 ret = host_to_target_timespec64(arg2, &ts);
10982             }
10983         }
10984         return ret;
10985 #endif
10986 #if defined(TARGET_NR_nanosleep)
10987     case TARGET_NR_nanosleep:
10988         {
10989             struct timespec req, rem;
10990             target_to_host_timespec(&req, arg1);
10991             ret = get_errno(safe_nanosleep(&req, &rem));
10992             if (is_error(ret) && arg2) {
10993                 host_to_target_timespec(arg2, &rem);
10994             }
10995         }
10996         return ret;
10997 #endif
10998     case TARGET_NR_prctl:
10999         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11000         break;
11001 #ifdef TARGET_NR_arch_prctl
11002     case TARGET_NR_arch_prctl:
11003         return do_arch_prctl(cpu_env, arg1, arg2);
11004 #endif
11005 #ifdef TARGET_NR_pread64
11006     case TARGET_NR_pread64:
11007         if (regpairs_aligned(cpu_env, num)) {
11008             arg4 = arg5;
11009             arg5 = arg6;
11010         }
11011         if (arg2 == 0 && arg3 == 0) {
11012             /* Special-case NULL buffer and zero length, which should succeed */
11013             p = 0;
11014         } else {
11015             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11016             if (!p) {
11017                 return -TARGET_EFAULT;
11018             }
11019         }
11020         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11021         unlock_user(p, arg2, ret);
11022         return ret;
11023     case TARGET_NR_pwrite64:
11024         if (regpairs_aligned(cpu_env, num)) {
11025             arg4 = arg5;
11026             arg5 = arg6;
11027         }
11028         if (arg2 == 0 && arg3 == 0) {
11029             /* Special-case NULL buffer and zero length, which should succeed */
11030             p = 0;
11031         } else {
11032             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11033             if (!p) {
11034                 return -TARGET_EFAULT;
11035             }
11036         }
11037         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11038         unlock_user(p, arg2, 0);
11039         return ret;
11040 #endif
11041     case TARGET_NR_getcwd:
11042         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11043             return -TARGET_EFAULT;
11044         ret = get_errno(sys_getcwd1(p, arg2));
11045         unlock_user(p, arg1, ret);
11046         return ret;
11047     case TARGET_NR_capget:
11048     case TARGET_NR_capset:
11049     {
11050         struct target_user_cap_header *target_header;
11051         struct target_user_cap_data *target_data = NULL;
11052         struct __user_cap_header_struct header;
11053         struct __user_cap_data_struct data[2];
11054         struct __user_cap_data_struct *dataptr = NULL;
11055         int i, target_datalen;
11056         int data_items = 1;
11057 
11058         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11059             return -TARGET_EFAULT;
11060         }
11061         header.version = tswap32(target_header->version);
11062         header.pid = tswap32(target_header->pid);
11063 
11064         if (header.version != _LINUX_CAPABILITY_VERSION) {
11065             /* Version 2 and up takes pointer to two user_data structs */
11066             data_items = 2;
11067         }
11068 
11069         target_datalen = sizeof(*target_data) * data_items;
11070 
11071         if (arg2) {
11072             if (num == TARGET_NR_capget) {
11073                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11074             } else {
11075                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11076             }
11077             if (!target_data) {
11078                 unlock_user_struct(target_header, arg1, 0);
11079                 return -TARGET_EFAULT;
11080             }
11081 
11082             if (num == TARGET_NR_capset) {
11083                 for (i = 0; i < data_items; i++) {
11084                     data[i].effective = tswap32(target_data[i].effective);
11085                     data[i].permitted = tswap32(target_data[i].permitted);
11086                     data[i].inheritable = tswap32(target_data[i].inheritable);
11087                 }
11088             }
11089 
11090             dataptr = data;
11091         }
11092 
11093         if (num == TARGET_NR_capget) {
11094             ret = get_errno(capget(&header, dataptr));
11095         } else {
11096             ret = get_errno(capset(&header, dataptr));
11097         }
11098 
11099         /* The kernel always updates version for both capget and capset */
11100         target_header->version = tswap32(header.version);
11101         unlock_user_struct(target_header, arg1, 1);
11102 
11103         if (arg2) {
11104             if (num == TARGET_NR_capget) {
11105                 for (i = 0; i < data_items; i++) {
11106                     target_data[i].effective = tswap32(data[i].effective);
11107                     target_data[i].permitted = tswap32(data[i].permitted);
11108                     target_data[i].inheritable = tswap32(data[i].inheritable);
11109                 }
11110                 unlock_user(target_data, arg2, target_datalen);
11111             } else {
11112                 unlock_user(target_data, arg2, 0);
11113             }
11114         }
11115         return ret;
11116     }
11117     case TARGET_NR_sigaltstack:
11118         return do_sigaltstack(arg1, arg2, cpu_env);
11119 
11120 #ifdef CONFIG_SENDFILE
11121 #ifdef TARGET_NR_sendfile
11122     case TARGET_NR_sendfile:
11123     {
11124         off_t *offp = NULL;
11125         off_t off;
11126         if (arg3) {
11127             ret = get_user_sal(off, arg3);
11128             if (is_error(ret)) {
11129                 return ret;
11130             }
11131             offp = &off;
11132         }
11133         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11134         if (!is_error(ret) && arg3) {
11135             abi_long ret2 = put_user_sal(off, arg3);
11136             if (is_error(ret2)) {
11137                 ret = ret2;
11138             }
11139         }
11140         return ret;
11141     }
11142 #endif
11143 #ifdef TARGET_NR_sendfile64
11144     case TARGET_NR_sendfile64:
11145     {
11146         off_t *offp = NULL;
11147         off_t off;
11148         if (arg3) {
11149             ret = get_user_s64(off, arg3);
11150             if (is_error(ret)) {
11151                 return ret;
11152             }
11153             offp = &off;
11154         }
11155         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11156         if (!is_error(ret) && arg3) {
11157             abi_long ret2 = put_user_s64(off, arg3);
11158             if (is_error(ret2)) {
11159                 ret = ret2;
11160             }
11161         }
11162         return ret;
11163     }
11164 #endif
11165 #endif
11166 #ifdef TARGET_NR_vfork
11167     case TARGET_NR_vfork:
11168         return get_errno(do_fork(cpu_env,
11169                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11170                          0, 0, 0, 0));
11171 #endif
11172 #ifdef TARGET_NR_ugetrlimit
11173     case TARGET_NR_ugetrlimit:
11174     {
11175 	struct rlimit rlim;
11176 	int resource = target_to_host_resource(arg1);
11177 	ret = get_errno(getrlimit(resource, &rlim));
11178 	if (!is_error(ret)) {
11179 	    struct target_rlimit *target_rlim;
11180             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11181                 return -TARGET_EFAULT;
11182 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11183 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11184             unlock_user_struct(target_rlim, arg2, 1);
11185 	}
11186         return ret;
11187     }
11188 #endif
11189 #ifdef TARGET_NR_truncate64
11190     case TARGET_NR_truncate64:
11191         if (!(p = lock_user_string(arg1)))
11192             return -TARGET_EFAULT;
11193 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11194         unlock_user(p, arg1, 0);
11195         return ret;
11196 #endif
11197 #ifdef TARGET_NR_ftruncate64
11198     case TARGET_NR_ftruncate64:
11199         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11200 #endif
11201 #ifdef TARGET_NR_stat64
11202     case TARGET_NR_stat64:
11203         if (!(p = lock_user_string(arg1))) {
11204             return -TARGET_EFAULT;
11205         }
11206         ret = get_errno(stat(path(p), &st));
11207         unlock_user(p, arg1, 0);
11208         if (!is_error(ret))
11209             ret = host_to_target_stat64(cpu_env, arg2, &st);
11210         return ret;
11211 #endif
11212 #ifdef TARGET_NR_lstat64
11213     case TARGET_NR_lstat64:
11214         if (!(p = lock_user_string(arg1))) {
11215             return -TARGET_EFAULT;
11216         }
11217         ret = get_errno(lstat(path(p), &st));
11218         unlock_user(p, arg1, 0);
11219         if (!is_error(ret))
11220             ret = host_to_target_stat64(cpu_env, arg2, &st);
11221         return ret;
11222 #endif
11223 #ifdef TARGET_NR_fstat64
11224     case TARGET_NR_fstat64:
11225         ret = get_errno(fstat(arg1, &st));
11226         if (!is_error(ret))
11227             ret = host_to_target_stat64(cpu_env, arg2, &st);
11228         return ret;
11229 #endif
11230 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11231 #ifdef TARGET_NR_fstatat64
11232     case TARGET_NR_fstatat64:
11233 #endif
11234 #ifdef TARGET_NR_newfstatat
11235     case TARGET_NR_newfstatat:
11236 #endif
11237         if (!(p = lock_user_string(arg2))) {
11238             return -TARGET_EFAULT;
11239         }
11240         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11241         unlock_user(p, arg2, 0);
11242         if (!is_error(ret))
11243             ret = host_to_target_stat64(cpu_env, arg3, &st);
11244         return ret;
11245 #endif
11246 #if defined(TARGET_NR_statx)
11247     case TARGET_NR_statx:
11248         {
11249             struct target_statx *target_stx;
11250             int dirfd = arg1;
11251             int flags = arg3;
11252 
11253             p = lock_user_string(arg2);
11254             if (p == NULL) {
11255                 return -TARGET_EFAULT;
11256             }
11257 #if defined(__NR_statx)
11258             {
11259                 /*
11260                  * It is assumed that struct statx is architecture independent.
11261                  */
11262                 struct target_statx host_stx;
11263                 int mask = arg4;
11264 
11265                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11266                 if (!is_error(ret)) {
11267                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11268                         unlock_user(p, arg2, 0);
11269                         return -TARGET_EFAULT;
11270                     }
11271                 }
11272 
11273                 if (ret != -TARGET_ENOSYS) {
11274                     unlock_user(p, arg2, 0);
11275                     return ret;
11276                 }
11277             }
11278 #endif
11279             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11280             unlock_user(p, arg2, 0);
11281 
11282             if (!is_error(ret)) {
11283                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11284                     return -TARGET_EFAULT;
11285                 }
11286                 memset(target_stx, 0, sizeof(*target_stx));
11287                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11288                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11289                 __put_user(st.st_ino, &target_stx->stx_ino);
11290                 __put_user(st.st_mode, &target_stx->stx_mode);
11291                 __put_user(st.st_uid, &target_stx->stx_uid);
11292                 __put_user(st.st_gid, &target_stx->stx_gid);
11293                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11294                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11295                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11296                 __put_user(st.st_size, &target_stx->stx_size);
11297                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11298                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11299                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11300                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11301                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11302                 unlock_user_struct(target_stx, arg5, 1);
11303             }
11304         }
11305         return ret;
11306 #endif
11307 #ifdef TARGET_NR_lchown
11308     case TARGET_NR_lchown:
11309         if (!(p = lock_user_string(arg1)))
11310             return -TARGET_EFAULT;
11311         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11312         unlock_user(p, arg1, 0);
11313         return ret;
11314 #endif
11315 #ifdef TARGET_NR_getuid
11316     case TARGET_NR_getuid:
11317         return get_errno(high2lowuid(getuid()));
11318 #endif
11319 #ifdef TARGET_NR_getgid
11320     case TARGET_NR_getgid:
11321         return get_errno(high2lowgid(getgid()));
11322 #endif
11323 #ifdef TARGET_NR_geteuid
11324     case TARGET_NR_geteuid:
11325         return get_errno(high2lowuid(geteuid()));
11326 #endif
11327 #ifdef TARGET_NR_getegid
11328     case TARGET_NR_getegid:
11329         return get_errno(high2lowgid(getegid()));
11330 #endif
11331     case TARGET_NR_setreuid:
11332         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11333     case TARGET_NR_setregid:
11334         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11335     case TARGET_NR_getgroups:
11336         {
11337             int gidsetsize = arg1;
11338             target_id *target_grouplist;
11339             gid_t *grouplist;
11340             int i;
11341 
11342             grouplist = alloca(gidsetsize * sizeof(gid_t));
11343             ret = get_errno(getgroups(gidsetsize, grouplist));
11344             if (gidsetsize == 0)
11345                 return ret;
11346             if (!is_error(ret)) {
11347                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11348                 if (!target_grouplist)
11349                     return -TARGET_EFAULT;
11350                 for(i = 0;i < ret; i++)
11351                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11352                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11353             }
11354         }
11355         return ret;
11356     case TARGET_NR_setgroups:
11357         {
11358             int gidsetsize = arg1;
11359             target_id *target_grouplist;
11360             gid_t *grouplist = NULL;
11361             int i;
11362             if (gidsetsize) {
11363                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11364                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11365                 if (!target_grouplist) {
11366                     return -TARGET_EFAULT;
11367                 }
11368                 for (i = 0; i < gidsetsize; i++) {
11369                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11370                 }
11371                 unlock_user(target_grouplist, arg2, 0);
11372             }
11373             return get_errno(setgroups(gidsetsize, grouplist));
11374         }
11375     case TARGET_NR_fchown:
11376         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11377 #if defined(TARGET_NR_fchownat)
11378     case TARGET_NR_fchownat:
11379         if (!(p = lock_user_string(arg2)))
11380             return -TARGET_EFAULT;
11381         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11382                                  low2highgid(arg4), arg5));
11383         unlock_user(p, arg2, 0);
11384         return ret;
11385 #endif
11386 #ifdef TARGET_NR_setresuid
11387     case TARGET_NR_setresuid:
11388         return get_errno(sys_setresuid(low2highuid(arg1),
11389                                        low2highuid(arg2),
11390                                        low2highuid(arg3)));
11391 #endif
11392 #ifdef TARGET_NR_getresuid
11393     case TARGET_NR_getresuid:
11394         {
11395             uid_t ruid, euid, suid;
11396             ret = get_errno(getresuid(&ruid, &euid, &suid));
11397             if (!is_error(ret)) {
11398                 if (put_user_id(high2lowuid(ruid), arg1)
11399                     || put_user_id(high2lowuid(euid), arg2)
11400                     || put_user_id(high2lowuid(suid), arg3))
11401                     return -TARGET_EFAULT;
11402             }
11403         }
11404         return ret;
11405 #endif
11406 #ifdef TARGET_NR_getresgid
11407     case TARGET_NR_setresgid:
11408         return get_errno(sys_setresgid(low2highgid(arg1),
11409                                        low2highgid(arg2),
11410                                        low2highgid(arg3)));
11411 #endif
11412 #ifdef TARGET_NR_getresgid
11413     case TARGET_NR_getresgid:
11414         {
11415             gid_t rgid, egid, sgid;
11416             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11417             if (!is_error(ret)) {
11418                 if (put_user_id(high2lowgid(rgid), arg1)
11419                     || put_user_id(high2lowgid(egid), arg2)
11420                     || put_user_id(high2lowgid(sgid), arg3))
11421                     return -TARGET_EFAULT;
11422             }
11423         }
11424         return ret;
11425 #endif
11426 #ifdef TARGET_NR_chown
11427     case TARGET_NR_chown:
11428         if (!(p = lock_user_string(arg1)))
11429             return -TARGET_EFAULT;
11430         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11431         unlock_user(p, arg1, 0);
11432         return ret;
11433 #endif
11434     case TARGET_NR_setuid:
11435         return get_errno(sys_setuid(low2highuid(arg1)));
11436     case TARGET_NR_setgid:
11437         return get_errno(sys_setgid(low2highgid(arg1)));
11438     case TARGET_NR_setfsuid:
11439         return get_errno(setfsuid(arg1));
11440     case TARGET_NR_setfsgid:
11441         return get_errno(setfsgid(arg1));
11442 
11443 #ifdef TARGET_NR_lchown32
11444     case TARGET_NR_lchown32:
11445         if (!(p = lock_user_string(arg1)))
11446             return -TARGET_EFAULT;
11447         ret = get_errno(lchown(p, arg2, arg3));
11448         unlock_user(p, arg1, 0);
11449         return ret;
11450 #endif
11451 #ifdef TARGET_NR_getuid32
11452     case TARGET_NR_getuid32:
11453         return get_errno(getuid());
11454 #endif
11455 
11456 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11457    /* Alpha specific */
11458     case TARGET_NR_getxuid:
11459          {
11460             uid_t euid;
11461             euid=geteuid();
11462             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11463          }
11464         return get_errno(getuid());
11465 #endif
11466 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11467    /* Alpha specific */
11468     case TARGET_NR_getxgid:
11469          {
11470             uid_t egid;
11471             egid=getegid();
11472             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11473          }
11474         return get_errno(getgid());
11475 #endif
11476 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11477     /* Alpha specific */
11478     case TARGET_NR_osf_getsysinfo:
11479         ret = -TARGET_EOPNOTSUPP;
11480         switch (arg1) {
11481           case TARGET_GSI_IEEE_FP_CONTROL:
11482             {
11483                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11484                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11485 
11486                 swcr &= ~SWCR_STATUS_MASK;
11487                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11488 
11489                 if (put_user_u64 (swcr, arg2))
11490                         return -TARGET_EFAULT;
11491                 ret = 0;
11492             }
11493             break;
11494 
11495           /* case GSI_IEEE_STATE_AT_SIGNAL:
11496              -- Not implemented in linux kernel.
11497              case GSI_UACPROC:
11498              -- Retrieves current unaligned access state; not much used.
11499              case GSI_PROC_TYPE:
11500              -- Retrieves implver information; surely not used.
11501              case GSI_GET_HWRPB:
11502              -- Grabs a copy of the HWRPB; surely not used.
11503           */
11504         }
11505         return ret;
11506 #endif
11507 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11508     /* Alpha specific */
11509     case TARGET_NR_osf_setsysinfo:
11510         ret = -TARGET_EOPNOTSUPP;
11511         switch (arg1) {
11512           case TARGET_SSI_IEEE_FP_CONTROL:
11513             {
11514                 uint64_t swcr, fpcr;
11515 
11516                 if (get_user_u64 (swcr, arg2)) {
11517                     return -TARGET_EFAULT;
11518                 }
11519 
11520                 /*
11521                  * The kernel calls swcr_update_status to update the
11522                  * status bits from the fpcr at every point that it
11523                  * could be queried.  Therefore, we store the status
11524                  * bits only in FPCR.
11525                  */
11526                 ((CPUAlphaState *)cpu_env)->swcr
11527                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11528 
11529                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11530                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11531                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11532                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11533                 ret = 0;
11534             }
11535             break;
11536 
11537           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11538             {
11539                 uint64_t exc, fpcr, fex;
11540 
11541                 if (get_user_u64(exc, arg2)) {
11542                     return -TARGET_EFAULT;
11543                 }
11544                 exc &= SWCR_STATUS_MASK;
11545                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11546 
11547                 /* Old exceptions are not signaled.  */
11548                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11549                 fex = exc & ~fex;
11550                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11551                 fex &= ((CPUArchState *)cpu_env)->swcr;
11552 
11553                 /* Update the hardware fpcr.  */
11554                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11555                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11556 
11557                 if (fex) {
11558                     int si_code = TARGET_FPE_FLTUNK;
11559                     target_siginfo_t info;
11560 
11561                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11562                         si_code = TARGET_FPE_FLTUND;
11563                     }
11564                     if (fex & SWCR_TRAP_ENABLE_INE) {
11565                         si_code = TARGET_FPE_FLTRES;
11566                     }
11567                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11568                         si_code = TARGET_FPE_FLTUND;
11569                     }
11570                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11571                         si_code = TARGET_FPE_FLTOVF;
11572                     }
11573                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11574                         si_code = TARGET_FPE_FLTDIV;
11575                     }
11576                     if (fex & SWCR_TRAP_ENABLE_INV) {
11577                         si_code = TARGET_FPE_FLTINV;
11578                     }
11579 
11580                     info.si_signo = SIGFPE;
11581                     info.si_errno = 0;
11582                     info.si_code = si_code;
11583                     info._sifields._sigfault._addr
11584                         = ((CPUArchState *)cpu_env)->pc;
11585                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11586                                  QEMU_SI_FAULT, &info);
11587                 }
11588                 ret = 0;
11589             }
11590             break;
11591 
11592           /* case SSI_NVPAIRS:
11593              -- Used with SSIN_UACPROC to enable unaligned accesses.
11594              case SSI_IEEE_STATE_AT_SIGNAL:
11595              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11596              -- Not implemented in linux kernel
11597           */
11598         }
11599         return ret;
11600 #endif
11601 #ifdef TARGET_NR_osf_sigprocmask
11602     /* Alpha specific.  */
11603     case TARGET_NR_osf_sigprocmask:
11604         {
11605             abi_ulong mask;
11606             int how;
11607             sigset_t set, oldset;
11608 
11609             switch(arg1) {
11610             case TARGET_SIG_BLOCK:
11611                 how = SIG_BLOCK;
11612                 break;
11613             case TARGET_SIG_UNBLOCK:
11614                 how = SIG_UNBLOCK;
11615                 break;
11616             case TARGET_SIG_SETMASK:
11617                 how = SIG_SETMASK;
11618                 break;
11619             default:
11620                 return -TARGET_EINVAL;
11621             }
11622             mask = arg2;
11623             target_to_host_old_sigset(&set, &mask);
11624             ret = do_sigprocmask(how, &set, &oldset);
11625             if (!ret) {
11626                 host_to_target_old_sigset(&mask, &oldset);
11627                 ret = mask;
11628             }
11629         }
11630         return ret;
11631 #endif
11632 
11633 #ifdef TARGET_NR_getgid32
11634     case TARGET_NR_getgid32:
11635         return get_errno(getgid());
11636 #endif
11637 #ifdef TARGET_NR_geteuid32
11638     case TARGET_NR_geteuid32:
11639         return get_errno(geteuid());
11640 #endif
11641 #ifdef TARGET_NR_getegid32
11642     case TARGET_NR_getegid32:
11643         return get_errno(getegid());
11644 #endif
11645 #ifdef TARGET_NR_setreuid32
11646     case TARGET_NR_setreuid32:
11647         return get_errno(setreuid(arg1, arg2));
11648 #endif
11649 #ifdef TARGET_NR_setregid32
11650     case TARGET_NR_setregid32:
11651         return get_errno(setregid(arg1, arg2));
11652 #endif
11653 #ifdef TARGET_NR_getgroups32
11654     case TARGET_NR_getgroups32:
11655         {
11656             int gidsetsize = arg1;
11657             uint32_t *target_grouplist;
11658             gid_t *grouplist;
11659             int i;
11660 
11661             grouplist = alloca(gidsetsize * sizeof(gid_t));
11662             ret = get_errno(getgroups(gidsetsize, grouplist));
11663             if (gidsetsize == 0)
11664                 return ret;
11665             if (!is_error(ret)) {
11666                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11667                 if (!target_grouplist) {
11668                     return -TARGET_EFAULT;
11669                 }
11670                 for(i = 0;i < ret; i++)
11671                     target_grouplist[i] = tswap32(grouplist[i]);
11672                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11673             }
11674         }
11675         return ret;
11676 #endif
11677 #ifdef TARGET_NR_setgroups32
11678     case TARGET_NR_setgroups32:
11679         {
11680             int gidsetsize = arg1;
11681             uint32_t *target_grouplist;
11682             gid_t *grouplist;
11683             int i;
11684 
11685             grouplist = alloca(gidsetsize * sizeof(gid_t));
11686             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11687             if (!target_grouplist) {
11688                 return -TARGET_EFAULT;
11689             }
11690             for(i = 0;i < gidsetsize; i++)
11691                 grouplist[i] = tswap32(target_grouplist[i]);
11692             unlock_user(target_grouplist, arg2, 0);
11693             return get_errno(setgroups(gidsetsize, grouplist));
11694         }
11695 #endif
11696 #ifdef TARGET_NR_fchown32
11697     case TARGET_NR_fchown32:
11698         return get_errno(fchown(arg1, arg2, arg3));
11699 #endif
11700 #ifdef TARGET_NR_setresuid32
11701     case TARGET_NR_setresuid32:
11702         return get_errno(sys_setresuid(arg1, arg2, arg3));
11703 #endif
11704 #ifdef TARGET_NR_getresuid32
11705     case TARGET_NR_getresuid32:
11706         {
11707             uid_t ruid, euid, suid;
11708             ret = get_errno(getresuid(&ruid, &euid, &suid));
11709             if (!is_error(ret)) {
11710                 if (put_user_u32(ruid, arg1)
11711                     || put_user_u32(euid, arg2)
11712                     || put_user_u32(suid, arg3))
11713                     return -TARGET_EFAULT;
11714             }
11715         }
11716         return ret;
11717 #endif
11718 #ifdef TARGET_NR_setresgid32
11719     case TARGET_NR_setresgid32:
11720         return get_errno(sys_setresgid(arg1, arg2, arg3));
11721 #endif
11722 #ifdef TARGET_NR_getresgid32
11723     case TARGET_NR_getresgid32:
11724         {
11725             gid_t rgid, egid, sgid;
11726             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11727             if (!is_error(ret)) {
11728                 if (put_user_u32(rgid, arg1)
11729                     || put_user_u32(egid, arg2)
11730                     || put_user_u32(sgid, arg3))
11731                     return -TARGET_EFAULT;
11732             }
11733         }
11734         return ret;
11735 #endif
11736 #ifdef TARGET_NR_chown32
11737     case TARGET_NR_chown32:
11738         if (!(p = lock_user_string(arg1)))
11739             return -TARGET_EFAULT;
11740         ret = get_errno(chown(p, arg2, arg3));
11741         unlock_user(p, arg1, 0);
11742         return ret;
11743 #endif
11744 #ifdef TARGET_NR_setuid32
11745     case TARGET_NR_setuid32:
11746         return get_errno(sys_setuid(arg1));
11747 #endif
11748 #ifdef TARGET_NR_setgid32
11749     case TARGET_NR_setgid32:
11750         return get_errno(sys_setgid(arg1));
11751 #endif
11752 #ifdef TARGET_NR_setfsuid32
11753     case TARGET_NR_setfsuid32:
11754         return get_errno(setfsuid(arg1));
11755 #endif
11756 #ifdef TARGET_NR_setfsgid32
11757     case TARGET_NR_setfsgid32:
11758         return get_errno(setfsgid(arg1));
11759 #endif
11760 #ifdef TARGET_NR_mincore
11761     case TARGET_NR_mincore:
11762         {
11763             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11764             if (!a) {
11765                 return -TARGET_ENOMEM;
11766             }
11767             p = lock_user_string(arg3);
11768             if (!p) {
11769                 ret = -TARGET_EFAULT;
11770             } else {
11771                 ret = get_errno(mincore(a, arg2, p));
11772                 unlock_user(p, arg3, ret);
11773             }
11774             unlock_user(a, arg1, 0);
11775         }
11776         return ret;
11777 #endif
11778 #ifdef TARGET_NR_arm_fadvise64_64
11779     case TARGET_NR_arm_fadvise64_64:
11780         /* arm_fadvise64_64 looks like fadvise64_64 but
11781          * with different argument order: fd, advice, offset, len
11782          * rather than the usual fd, offset, len, advice.
11783          * Note that offset and len are both 64-bit so appear as
11784          * pairs of 32-bit registers.
11785          */
11786         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11787                             target_offset64(arg5, arg6), arg2);
11788         return -host_to_target_errno(ret);
11789 #endif
11790 
11791 #if TARGET_ABI_BITS == 32
11792 
11793 #ifdef TARGET_NR_fadvise64_64
11794     case TARGET_NR_fadvise64_64:
11795 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11796         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11797         ret = arg2;
11798         arg2 = arg3;
11799         arg3 = arg4;
11800         arg4 = arg5;
11801         arg5 = arg6;
11802         arg6 = ret;
11803 #else
11804         /* 6 args: fd, offset (high, low), len (high, low), advice */
11805         if (regpairs_aligned(cpu_env, num)) {
11806             /* offset is in (3,4), len in (5,6) and advice in 7 */
11807             arg2 = arg3;
11808             arg3 = arg4;
11809             arg4 = arg5;
11810             arg5 = arg6;
11811             arg6 = arg7;
11812         }
11813 #endif
11814         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11815                             target_offset64(arg4, arg5), arg6);
11816         return -host_to_target_errno(ret);
11817 #endif
11818 
11819 #ifdef TARGET_NR_fadvise64
11820     case TARGET_NR_fadvise64:
11821         /* 5 args: fd, offset (high, low), len, advice */
11822         if (regpairs_aligned(cpu_env, num)) {
11823             /* offset is in (3,4), len in 5 and advice in 6 */
11824             arg2 = arg3;
11825             arg3 = arg4;
11826             arg4 = arg5;
11827             arg5 = arg6;
11828         }
11829         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11830         return -host_to_target_errno(ret);
11831 #endif
11832 
11833 #else /* not a 32-bit ABI */
11834 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11835 #ifdef TARGET_NR_fadvise64_64
11836     case TARGET_NR_fadvise64_64:
11837 #endif
11838 #ifdef TARGET_NR_fadvise64
11839     case TARGET_NR_fadvise64:
11840 #endif
11841 #ifdef TARGET_S390X
11842         switch (arg4) {
11843         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11844         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11845         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11846         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11847         default: break;
11848         }
11849 #endif
11850         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11851 #endif
11852 #endif /* end of 64-bit ABI fadvise handling */
11853 
11854 #ifdef TARGET_NR_madvise
11855     case TARGET_NR_madvise:
11856         /* A straight passthrough may not be safe because qemu sometimes
11857            turns private file-backed mappings into anonymous mappings.
11858            This will break MADV_DONTNEED.
11859            This is a hint, so ignoring and returning success is ok.  */
11860         return 0;
11861 #endif
11862 #ifdef TARGET_NR_fcntl64
11863     case TARGET_NR_fcntl64:
11864     {
11865         int cmd;
11866         struct flock64 fl;
11867         from_flock64_fn *copyfrom = copy_from_user_flock64;
11868         to_flock64_fn *copyto = copy_to_user_flock64;
11869 
11870 #ifdef TARGET_ARM
11871         if (!((CPUARMState *)cpu_env)->eabi) {
11872             copyfrom = copy_from_user_oabi_flock64;
11873             copyto = copy_to_user_oabi_flock64;
11874         }
11875 #endif
11876 
11877         cmd = target_to_host_fcntl_cmd(arg2);
11878         if (cmd == -TARGET_EINVAL) {
11879             return cmd;
11880         }
11881 
11882         switch(arg2) {
11883         case TARGET_F_GETLK64:
11884             ret = copyfrom(&fl, arg3);
11885             if (ret) {
11886                 break;
11887             }
11888             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11889             if (ret == 0) {
11890                 ret = copyto(arg3, &fl);
11891             }
11892 	    break;
11893 
11894         case TARGET_F_SETLK64:
11895         case TARGET_F_SETLKW64:
11896             ret = copyfrom(&fl, arg3);
11897             if (ret) {
11898                 break;
11899             }
11900             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11901 	    break;
11902         default:
11903             ret = do_fcntl(arg1, arg2, arg3);
11904             break;
11905         }
11906         return ret;
11907     }
11908 #endif
11909 #ifdef TARGET_NR_cacheflush
11910     case TARGET_NR_cacheflush:
11911         /* self-modifying code is handled automatically, so nothing needed */
11912         return 0;
11913 #endif
11914 #ifdef TARGET_NR_getpagesize
11915     case TARGET_NR_getpagesize:
11916         return TARGET_PAGE_SIZE;
11917 #endif
11918     case TARGET_NR_gettid:
11919         return get_errno(sys_gettid());
11920 #ifdef TARGET_NR_readahead
11921     case TARGET_NR_readahead:
11922 #if TARGET_ABI_BITS == 32
11923         if (regpairs_aligned(cpu_env, num)) {
11924             arg2 = arg3;
11925             arg3 = arg4;
11926             arg4 = arg5;
11927         }
11928         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11929 #else
11930         ret = get_errno(readahead(arg1, arg2, arg3));
11931 #endif
11932         return ret;
11933 #endif
11934 #ifdef CONFIG_ATTR
11935 #ifdef TARGET_NR_setxattr
11936     case TARGET_NR_listxattr:
11937     case TARGET_NR_llistxattr:
11938     {
11939         void *p, *b = 0;
11940         if (arg2) {
11941             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11942             if (!b) {
11943                 return -TARGET_EFAULT;
11944             }
11945         }
11946         p = lock_user_string(arg1);
11947         if (p) {
11948             if (num == TARGET_NR_listxattr) {
11949                 ret = get_errno(listxattr(p, b, arg3));
11950             } else {
11951                 ret = get_errno(llistxattr(p, b, arg3));
11952             }
11953         } else {
11954             ret = -TARGET_EFAULT;
11955         }
11956         unlock_user(p, arg1, 0);
11957         unlock_user(b, arg2, arg3);
11958         return ret;
11959     }
11960     case TARGET_NR_flistxattr:
11961     {
11962         void *b = 0;
11963         if (arg2) {
11964             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11965             if (!b) {
11966                 return -TARGET_EFAULT;
11967             }
11968         }
11969         ret = get_errno(flistxattr(arg1, b, arg3));
11970         unlock_user(b, arg2, arg3);
11971         return ret;
11972     }
11973     case TARGET_NR_setxattr:
11974     case TARGET_NR_lsetxattr:
11975         {
11976             void *p, *n, *v = 0;
11977             if (arg3) {
11978                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11979                 if (!v) {
11980                     return -TARGET_EFAULT;
11981                 }
11982             }
11983             p = lock_user_string(arg1);
11984             n = lock_user_string(arg2);
11985             if (p && n) {
11986                 if (num == TARGET_NR_setxattr) {
11987                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11988                 } else {
11989                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11990                 }
11991             } else {
11992                 ret = -TARGET_EFAULT;
11993             }
11994             unlock_user(p, arg1, 0);
11995             unlock_user(n, arg2, 0);
11996             unlock_user(v, arg3, 0);
11997         }
11998         return ret;
11999     case TARGET_NR_fsetxattr:
12000         {
12001             void *n, *v = 0;
12002             if (arg3) {
12003                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12004                 if (!v) {
12005                     return -TARGET_EFAULT;
12006                 }
12007             }
12008             n = lock_user_string(arg2);
12009             if (n) {
12010                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12011             } else {
12012                 ret = -TARGET_EFAULT;
12013             }
12014             unlock_user(n, arg2, 0);
12015             unlock_user(v, arg3, 0);
12016         }
12017         return ret;
12018     case TARGET_NR_getxattr:
12019     case TARGET_NR_lgetxattr:
12020         {
12021             void *p, *n, *v = 0;
12022             if (arg3) {
12023                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12024                 if (!v) {
12025                     return -TARGET_EFAULT;
12026                 }
12027             }
12028             p = lock_user_string(arg1);
12029             n = lock_user_string(arg2);
12030             if (p && n) {
12031                 if (num == TARGET_NR_getxattr) {
12032                     ret = get_errno(getxattr(p, n, v, arg4));
12033                 } else {
12034                     ret = get_errno(lgetxattr(p, n, v, arg4));
12035                 }
12036             } else {
12037                 ret = -TARGET_EFAULT;
12038             }
12039             unlock_user(p, arg1, 0);
12040             unlock_user(n, arg2, 0);
12041             unlock_user(v, arg3, arg4);
12042         }
12043         return ret;
12044     case TARGET_NR_fgetxattr:
12045         {
12046             void *n, *v = 0;
12047             if (arg3) {
12048                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12049                 if (!v) {
12050                     return -TARGET_EFAULT;
12051                 }
12052             }
12053             n = lock_user_string(arg2);
12054             if (n) {
12055                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12056             } else {
12057                 ret = -TARGET_EFAULT;
12058             }
12059             unlock_user(n, arg2, 0);
12060             unlock_user(v, arg3, arg4);
12061         }
12062         return ret;
12063     case TARGET_NR_removexattr:
12064     case TARGET_NR_lremovexattr:
12065         {
12066             void *p, *n;
12067             p = lock_user_string(arg1);
12068             n = lock_user_string(arg2);
12069             if (p && n) {
12070                 if (num == TARGET_NR_removexattr) {
12071                     ret = get_errno(removexattr(p, n));
12072                 } else {
12073                     ret = get_errno(lremovexattr(p, n));
12074                 }
12075             } else {
12076                 ret = -TARGET_EFAULT;
12077             }
12078             unlock_user(p, arg1, 0);
12079             unlock_user(n, arg2, 0);
12080         }
12081         return ret;
12082     case TARGET_NR_fremovexattr:
12083         {
12084             void *n;
12085             n = lock_user_string(arg2);
12086             if (n) {
12087                 ret = get_errno(fremovexattr(arg1, n));
12088             } else {
12089                 ret = -TARGET_EFAULT;
12090             }
12091             unlock_user(n, arg2, 0);
12092         }
12093         return ret;
12094 #endif
12095 #endif /* CONFIG_ATTR */
12096 #ifdef TARGET_NR_set_thread_area
12097     case TARGET_NR_set_thread_area:
12098 #if defined(TARGET_MIPS)
12099       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12100       return 0;
12101 #elif defined(TARGET_CRIS)
12102       if (arg1 & 0xff)
12103           ret = -TARGET_EINVAL;
12104       else {
12105           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12106           ret = 0;
12107       }
12108       return ret;
12109 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12110       return do_set_thread_area(cpu_env, arg1);
12111 #elif defined(TARGET_M68K)
12112       {
12113           TaskState *ts = cpu->opaque;
12114           ts->tp_value = arg1;
12115           return 0;
12116       }
12117 #else
12118       return -TARGET_ENOSYS;
12119 #endif
12120 #endif
12121 #ifdef TARGET_NR_get_thread_area
12122     case TARGET_NR_get_thread_area:
12123 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12124         return do_get_thread_area(cpu_env, arg1);
12125 #elif defined(TARGET_M68K)
12126         {
12127             TaskState *ts = cpu->opaque;
12128             return ts->tp_value;
12129         }
12130 #else
12131         return -TARGET_ENOSYS;
12132 #endif
12133 #endif
12134 #ifdef TARGET_NR_getdomainname
12135     case TARGET_NR_getdomainname:
12136         return -TARGET_ENOSYS;
12137 #endif
12138 
12139 #ifdef TARGET_NR_clock_settime
12140     case TARGET_NR_clock_settime:
12141     {
12142         struct timespec ts;
12143 
12144         ret = target_to_host_timespec(&ts, arg2);
12145         if (!is_error(ret)) {
12146             ret = get_errno(clock_settime(arg1, &ts));
12147         }
12148         return ret;
12149     }
12150 #endif
12151 #ifdef TARGET_NR_clock_settime64
12152     case TARGET_NR_clock_settime64:
12153     {
12154         struct timespec ts;
12155 
12156         ret = target_to_host_timespec64(&ts, arg2);
12157         if (!is_error(ret)) {
12158             ret = get_errno(clock_settime(arg1, &ts));
12159         }
12160         return ret;
12161     }
12162 #endif
12163 #ifdef TARGET_NR_clock_gettime
12164     case TARGET_NR_clock_gettime:
12165     {
12166         struct timespec ts;
12167         ret = get_errno(clock_gettime(arg1, &ts));
12168         if (!is_error(ret)) {
12169             ret = host_to_target_timespec(arg2, &ts);
12170         }
12171         return ret;
12172     }
12173 #endif
12174 #ifdef TARGET_NR_clock_gettime64
12175     case TARGET_NR_clock_gettime64:
12176     {
12177         struct timespec ts;
12178         ret = get_errno(clock_gettime(arg1, &ts));
12179         if (!is_error(ret)) {
12180             ret = host_to_target_timespec64(arg2, &ts);
12181         }
12182         return ret;
12183     }
12184 #endif
12185 #ifdef TARGET_NR_clock_getres
12186     case TARGET_NR_clock_getres:
12187     {
12188         struct timespec ts;
12189         ret = get_errno(clock_getres(arg1, &ts));
12190         if (!is_error(ret)) {
12191             host_to_target_timespec(arg2, &ts);
12192         }
12193         return ret;
12194     }
12195 #endif
12196 #ifdef TARGET_NR_clock_getres_time64
12197     case TARGET_NR_clock_getres_time64:
12198     {
12199         struct timespec ts;
12200         ret = get_errno(clock_getres(arg1, &ts));
12201         if (!is_error(ret)) {
12202             host_to_target_timespec64(arg2, &ts);
12203         }
12204         return ret;
12205     }
12206 #endif
12207 #ifdef TARGET_NR_clock_nanosleep
12208     case TARGET_NR_clock_nanosleep:
12209     {
12210         struct timespec ts;
12211         if (target_to_host_timespec(&ts, arg3)) {
12212             return -TARGET_EFAULT;
12213         }
12214         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12215                                              &ts, arg4 ? &ts : NULL));
12216         /*
12217          * if the call is interrupted by a signal handler, it fails
12218          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12219          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12220          */
12221         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12222             host_to_target_timespec(arg4, &ts)) {
12223               return -TARGET_EFAULT;
12224         }
12225 
12226         return ret;
12227     }
12228 #endif
12229 #ifdef TARGET_NR_clock_nanosleep_time64
12230     case TARGET_NR_clock_nanosleep_time64:
12231     {
12232         struct timespec ts;
12233 
12234         if (target_to_host_timespec64(&ts, arg3)) {
12235             return -TARGET_EFAULT;
12236         }
12237 
12238         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12239                                              &ts, arg4 ? &ts : NULL));
12240 
12241         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12242             host_to_target_timespec64(arg4, &ts)) {
12243             return -TARGET_EFAULT;
12244         }
12245         return ret;
12246     }
12247 #endif
12248 
12249 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12250     case TARGET_NR_set_tid_address:
12251         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12252 #endif
12253 
12254     case TARGET_NR_tkill:
12255         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12256 
12257     case TARGET_NR_tgkill:
12258         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12259                          target_to_host_signal(arg3)));
12260 
12261 #ifdef TARGET_NR_set_robust_list
12262     case TARGET_NR_set_robust_list:
12263     case TARGET_NR_get_robust_list:
12264         /* The ABI for supporting robust futexes has userspace pass
12265          * the kernel a pointer to a linked list which is updated by
12266          * userspace after the syscall; the list is walked by the kernel
12267          * when the thread exits. Since the linked list in QEMU guest
12268          * memory isn't a valid linked list for the host and we have
12269          * no way to reliably intercept the thread-death event, we can't
12270          * support these. Silently return ENOSYS so that guest userspace
12271          * falls back to a non-robust futex implementation (which should
12272          * be OK except in the corner case of the guest crashing while
12273          * holding a mutex that is shared with another process via
12274          * shared memory).
12275          */
12276         return -TARGET_ENOSYS;
12277 #endif
12278 
12279 #if defined(TARGET_NR_utimensat)
12280     case TARGET_NR_utimensat:
12281         {
12282             struct timespec *tsp, ts[2];
12283             if (!arg3) {
12284                 tsp = NULL;
12285             } else {
12286                 if (target_to_host_timespec(ts, arg3)) {
12287                     return -TARGET_EFAULT;
12288                 }
12289                 if (target_to_host_timespec(ts + 1, arg3 +
12290                                             sizeof(struct target_timespec))) {
12291                     return -TARGET_EFAULT;
12292                 }
12293                 tsp = ts;
12294             }
12295             if (!arg2)
12296                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12297             else {
12298                 if (!(p = lock_user_string(arg2))) {
12299                     return -TARGET_EFAULT;
12300                 }
12301                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12302                 unlock_user(p, arg2, 0);
12303             }
12304         }
12305         return ret;
12306 #endif
12307 #ifdef TARGET_NR_utimensat_time64
12308     case TARGET_NR_utimensat_time64:
12309         {
12310             struct timespec *tsp, ts[2];
12311             if (!arg3) {
12312                 tsp = NULL;
12313             } else {
12314                 if (target_to_host_timespec64(ts, arg3)) {
12315                     return -TARGET_EFAULT;
12316                 }
12317                 if (target_to_host_timespec64(ts + 1, arg3 +
12318                                      sizeof(struct target__kernel_timespec))) {
12319                     return -TARGET_EFAULT;
12320                 }
12321                 tsp = ts;
12322             }
12323             if (!arg2)
12324                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12325             else {
12326                 p = lock_user_string(arg2);
12327                 if (!p) {
12328                     return -TARGET_EFAULT;
12329                 }
12330                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12331                 unlock_user(p, arg2, 0);
12332             }
12333         }
12334         return ret;
12335 #endif
12336 #ifdef TARGET_NR_futex
12337     case TARGET_NR_futex:
12338         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12339 #endif
12340 #ifdef TARGET_NR_futex_time64
12341     case TARGET_NR_futex_time64:
12342         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12343 #endif
12344 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12345     case TARGET_NR_inotify_init:
12346         ret = get_errno(sys_inotify_init());
12347         if (ret >= 0) {
12348             fd_trans_register(ret, &target_inotify_trans);
12349         }
12350         return ret;
12351 #endif
12352 #ifdef CONFIG_INOTIFY1
12353 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12354     case TARGET_NR_inotify_init1:
12355         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12356                                           fcntl_flags_tbl)));
12357         if (ret >= 0) {
12358             fd_trans_register(ret, &target_inotify_trans);
12359         }
12360         return ret;
12361 #endif
12362 #endif
12363 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12364     case TARGET_NR_inotify_add_watch:
12365         p = lock_user_string(arg2);
12366         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12367         unlock_user(p, arg2, 0);
12368         return ret;
12369 #endif
12370 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12371     case TARGET_NR_inotify_rm_watch:
12372         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12373 #endif
12374 
12375 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12376     case TARGET_NR_mq_open:
12377         {
12378             struct mq_attr posix_mq_attr;
12379             struct mq_attr *pposix_mq_attr;
12380             int host_flags;
12381 
12382             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12383             pposix_mq_attr = NULL;
12384             if (arg4) {
12385                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12386                     return -TARGET_EFAULT;
12387                 }
12388                 pposix_mq_attr = &posix_mq_attr;
12389             }
12390             p = lock_user_string(arg1 - 1);
12391             if (!p) {
12392                 return -TARGET_EFAULT;
12393             }
12394             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12395             unlock_user (p, arg1, 0);
12396         }
12397         return ret;
12398 
12399     case TARGET_NR_mq_unlink:
12400         p = lock_user_string(arg1 - 1);
12401         if (!p) {
12402             return -TARGET_EFAULT;
12403         }
12404         ret = get_errno(mq_unlink(p));
12405         unlock_user (p, arg1, 0);
12406         return ret;
12407 
12408 #ifdef TARGET_NR_mq_timedsend
12409     case TARGET_NR_mq_timedsend:
12410         {
12411             struct timespec ts;
12412 
12413             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12414             if (arg5 != 0) {
12415                 if (target_to_host_timespec(&ts, arg5)) {
12416                     return -TARGET_EFAULT;
12417                 }
12418                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12419                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12420                     return -TARGET_EFAULT;
12421                 }
12422             } else {
12423                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12424             }
12425             unlock_user (p, arg2, arg3);
12426         }
12427         return ret;
12428 #endif
12429 #ifdef TARGET_NR_mq_timedsend_time64
12430     case TARGET_NR_mq_timedsend_time64:
12431         {
12432             struct timespec ts;
12433 
12434             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12435             if (arg5 != 0) {
12436                 if (target_to_host_timespec64(&ts, arg5)) {
12437                     return -TARGET_EFAULT;
12438                 }
12439                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12440                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12441                     return -TARGET_EFAULT;
12442                 }
12443             } else {
12444                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12445             }
12446             unlock_user(p, arg2, arg3);
12447         }
12448         return ret;
12449 #endif
12450 
12451 #ifdef TARGET_NR_mq_timedreceive
12452     case TARGET_NR_mq_timedreceive:
12453         {
12454             struct timespec ts;
12455             unsigned int prio;
12456 
12457             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12458             if (arg5 != 0) {
12459                 if (target_to_host_timespec(&ts, arg5)) {
12460                     return -TARGET_EFAULT;
12461                 }
12462                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12463                                                      &prio, &ts));
12464                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12465                     return -TARGET_EFAULT;
12466                 }
12467             } else {
12468                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12469                                                      &prio, NULL));
12470             }
12471             unlock_user (p, arg2, arg3);
12472             if (arg4 != 0)
12473                 put_user_u32(prio, arg4);
12474         }
12475         return ret;
12476 #endif
12477 #ifdef TARGET_NR_mq_timedreceive_time64
12478     case TARGET_NR_mq_timedreceive_time64:
12479         {
12480             struct timespec ts;
12481             unsigned int prio;
12482 
12483             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12484             if (arg5 != 0) {
12485                 if (target_to_host_timespec64(&ts, arg5)) {
12486                     return -TARGET_EFAULT;
12487                 }
12488                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12489                                                      &prio, &ts));
12490                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12491                     return -TARGET_EFAULT;
12492                 }
12493             } else {
12494                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12495                                                      &prio, NULL));
12496             }
12497             unlock_user(p, arg2, arg3);
12498             if (arg4 != 0) {
12499                 put_user_u32(prio, arg4);
12500             }
12501         }
12502         return ret;
12503 #endif
12504 
12505     /* Not implemented for now... */
12506 /*     case TARGET_NR_mq_notify: */
12507 /*         break; */
12508 
12509     case TARGET_NR_mq_getsetattr:
12510         {
12511             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12512             ret = 0;
12513             if (arg2 != 0) {
12514                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12515                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12516                                            &posix_mq_attr_out));
12517             } else if (arg3 != 0) {
12518                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12519             }
12520             if (ret == 0 && arg3 != 0) {
12521                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12522             }
12523         }
12524         return ret;
12525 #endif
12526 
12527 #ifdef CONFIG_SPLICE
12528 #ifdef TARGET_NR_tee
12529     case TARGET_NR_tee:
12530         {
12531             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12532         }
12533         return ret;
12534 #endif
12535 #ifdef TARGET_NR_splice
12536     case TARGET_NR_splice:
12537         {
12538             loff_t loff_in, loff_out;
12539             loff_t *ploff_in = NULL, *ploff_out = NULL;
12540             if (arg2) {
12541                 if (get_user_u64(loff_in, arg2)) {
12542                     return -TARGET_EFAULT;
12543                 }
12544                 ploff_in = &loff_in;
12545             }
12546             if (arg4) {
12547                 if (get_user_u64(loff_out, arg4)) {
12548                     return -TARGET_EFAULT;
12549                 }
12550                 ploff_out = &loff_out;
12551             }
12552             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12553             if (arg2) {
12554                 if (put_user_u64(loff_in, arg2)) {
12555                     return -TARGET_EFAULT;
12556                 }
12557             }
12558             if (arg4) {
12559                 if (put_user_u64(loff_out, arg4)) {
12560                     return -TARGET_EFAULT;
12561                 }
12562             }
12563         }
12564         return ret;
12565 #endif
12566 #ifdef TARGET_NR_vmsplice
12567 	case TARGET_NR_vmsplice:
12568         {
12569             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12570             if (vec != NULL) {
12571                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12572                 unlock_iovec(vec, arg2, arg3, 0);
12573             } else {
12574                 ret = -host_to_target_errno(errno);
12575             }
12576         }
12577         return ret;
12578 #endif
12579 #endif /* CONFIG_SPLICE */
12580 #ifdef CONFIG_EVENTFD
12581 #if defined(TARGET_NR_eventfd)
12582     case TARGET_NR_eventfd:
12583         ret = get_errno(eventfd(arg1, 0));
12584         if (ret >= 0) {
12585             fd_trans_register(ret, &target_eventfd_trans);
12586         }
12587         return ret;
12588 #endif
12589 #if defined(TARGET_NR_eventfd2)
12590     case TARGET_NR_eventfd2:
12591     {
12592         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12593         if (arg2 & TARGET_O_NONBLOCK) {
12594             host_flags |= O_NONBLOCK;
12595         }
12596         if (arg2 & TARGET_O_CLOEXEC) {
12597             host_flags |= O_CLOEXEC;
12598         }
12599         ret = get_errno(eventfd(arg1, host_flags));
12600         if (ret >= 0) {
12601             fd_trans_register(ret, &target_eventfd_trans);
12602         }
12603         return ret;
12604     }
12605 #endif
12606 #endif /* CONFIG_EVENTFD  */
12607 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12608     case TARGET_NR_fallocate:
12609 #if TARGET_ABI_BITS == 32
12610         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12611                                   target_offset64(arg5, arg6)));
12612 #else
12613         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12614 #endif
12615         return ret;
12616 #endif
12617 #if defined(CONFIG_SYNC_FILE_RANGE)
12618 #if defined(TARGET_NR_sync_file_range)
12619     case TARGET_NR_sync_file_range:
12620 #if TARGET_ABI_BITS == 32
12621 #if defined(TARGET_MIPS)
12622         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12623                                         target_offset64(arg5, arg6), arg7));
12624 #else
12625         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12626                                         target_offset64(arg4, arg5), arg6));
12627 #endif /* !TARGET_MIPS */
12628 #else
12629         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12630 #endif
12631         return ret;
12632 #endif
12633 #if defined(TARGET_NR_sync_file_range2) || \
12634     defined(TARGET_NR_arm_sync_file_range)
12635 #if defined(TARGET_NR_sync_file_range2)
12636     case TARGET_NR_sync_file_range2:
12637 #endif
12638 #if defined(TARGET_NR_arm_sync_file_range)
12639     case TARGET_NR_arm_sync_file_range:
12640 #endif
12641         /* This is like sync_file_range but the arguments are reordered */
12642 #if TARGET_ABI_BITS == 32
12643         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12644                                         target_offset64(arg5, arg6), arg2));
12645 #else
12646         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12647 #endif
12648         return ret;
12649 #endif
12650 #endif
12651 #if defined(TARGET_NR_signalfd4)
12652     case TARGET_NR_signalfd4:
12653         return do_signalfd4(arg1, arg2, arg4);
12654 #endif
12655 #if defined(TARGET_NR_signalfd)
12656     case TARGET_NR_signalfd:
12657         return do_signalfd4(arg1, arg2, 0);
12658 #endif
12659 #if defined(CONFIG_EPOLL)
12660 #if defined(TARGET_NR_epoll_create)
12661     case TARGET_NR_epoll_create:
12662         return get_errno(epoll_create(arg1));
12663 #endif
12664 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12665     case TARGET_NR_epoll_create1:
12666         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12667 #endif
12668 #if defined(TARGET_NR_epoll_ctl)
12669     case TARGET_NR_epoll_ctl:
12670     {
12671         struct epoll_event ep;
12672         struct epoll_event *epp = 0;
12673         if (arg4) {
12674             if (arg2 != EPOLL_CTL_DEL) {
12675                 struct target_epoll_event *target_ep;
12676                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12677                     return -TARGET_EFAULT;
12678                 }
12679                 ep.events = tswap32(target_ep->events);
12680                 /*
12681                  * The epoll_data_t union is just opaque data to the kernel,
12682                  * so we transfer all 64 bits across and need not worry what
12683                  * actual data type it is.
12684                  */
12685                 ep.data.u64 = tswap64(target_ep->data.u64);
12686                 unlock_user_struct(target_ep, arg4, 0);
12687             }
12688             /*
12689              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12690              * non-null pointer, even though this argument is ignored.
12691              *
12692              */
12693             epp = &ep;
12694         }
12695         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12696     }
12697 #endif
12698 
12699 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12700 #if defined(TARGET_NR_epoll_wait)
12701     case TARGET_NR_epoll_wait:
12702 #endif
12703 #if defined(TARGET_NR_epoll_pwait)
12704     case TARGET_NR_epoll_pwait:
12705 #endif
12706     {
12707         struct target_epoll_event *target_ep;
12708         struct epoll_event *ep;
12709         int epfd = arg1;
12710         int maxevents = arg3;
12711         int timeout = arg4;
12712 
12713         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12714             return -TARGET_EINVAL;
12715         }
12716 
12717         target_ep = lock_user(VERIFY_WRITE, arg2,
12718                               maxevents * sizeof(struct target_epoll_event), 1);
12719         if (!target_ep) {
12720             return -TARGET_EFAULT;
12721         }
12722 
12723         ep = g_try_new(struct epoll_event, maxevents);
12724         if (!ep) {
12725             unlock_user(target_ep, arg2, 0);
12726             return -TARGET_ENOMEM;
12727         }
12728 
12729         switch (num) {
12730 #if defined(TARGET_NR_epoll_pwait)
12731         case TARGET_NR_epoll_pwait:
12732         {
12733             target_sigset_t *target_set;
12734             sigset_t _set, *set = &_set;
12735 
12736             if (arg5) {
12737                 if (arg6 != sizeof(target_sigset_t)) {
12738                     ret = -TARGET_EINVAL;
12739                     break;
12740                 }
12741 
12742                 target_set = lock_user(VERIFY_READ, arg5,
12743                                        sizeof(target_sigset_t), 1);
12744                 if (!target_set) {
12745                     ret = -TARGET_EFAULT;
12746                     break;
12747                 }
12748                 target_to_host_sigset(set, target_set);
12749                 unlock_user(target_set, arg5, 0);
12750             } else {
12751                 set = NULL;
12752             }
12753 
12754             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12755                                              set, SIGSET_T_SIZE));
12756             break;
12757         }
12758 #endif
12759 #if defined(TARGET_NR_epoll_wait)
12760         case TARGET_NR_epoll_wait:
12761             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12762                                              NULL, 0));
12763             break;
12764 #endif
12765         default:
12766             ret = -TARGET_ENOSYS;
12767         }
12768         if (!is_error(ret)) {
12769             int i;
12770             for (i = 0; i < ret; i++) {
12771                 target_ep[i].events = tswap32(ep[i].events);
12772                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12773             }
12774             unlock_user(target_ep, arg2,
12775                         ret * sizeof(struct target_epoll_event));
12776         } else {
12777             unlock_user(target_ep, arg2, 0);
12778         }
12779         g_free(ep);
12780         return ret;
12781     }
12782 #endif
12783 #endif
12784 #ifdef TARGET_NR_prlimit64
12785     case TARGET_NR_prlimit64:
12786     {
12787         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12788         struct target_rlimit64 *target_rnew, *target_rold;
12789         struct host_rlimit64 rnew, rold, *rnewp = 0;
12790         int resource = target_to_host_resource(arg2);
12791 
12792         if (arg3 && (resource != RLIMIT_AS &&
12793                      resource != RLIMIT_DATA &&
12794                      resource != RLIMIT_STACK)) {
12795             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12796                 return -TARGET_EFAULT;
12797             }
12798             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12799             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12800             unlock_user_struct(target_rnew, arg3, 0);
12801             rnewp = &rnew;
12802         }
12803 
12804         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12805         if (!is_error(ret) && arg4) {
12806             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12807                 return -TARGET_EFAULT;
12808             }
12809             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12810             target_rold->rlim_max = tswap64(rold.rlim_max);
12811             unlock_user_struct(target_rold, arg4, 1);
12812         }
12813         return ret;
12814     }
12815 #endif
12816 #ifdef TARGET_NR_gethostname
12817     case TARGET_NR_gethostname:
12818     {
12819         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12820         if (name) {
12821             ret = get_errno(gethostname(name, arg2));
12822             unlock_user(name, arg1, arg2);
12823         } else {
12824             ret = -TARGET_EFAULT;
12825         }
12826         return ret;
12827     }
12828 #endif
12829 #ifdef TARGET_NR_atomic_cmpxchg_32
12830     case TARGET_NR_atomic_cmpxchg_32:
12831     {
12832         /* should use start_exclusive from main.c */
12833         abi_ulong mem_value;
12834         if (get_user_u32(mem_value, arg6)) {
12835             target_siginfo_t info;
12836             info.si_signo = SIGSEGV;
12837             info.si_errno = 0;
12838             info.si_code = TARGET_SEGV_MAPERR;
12839             info._sifields._sigfault._addr = arg6;
12840             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12841                          QEMU_SI_FAULT, &info);
12842             ret = 0xdeadbeef;
12843 
12844         }
12845         if (mem_value == arg2)
12846             put_user_u32(arg1, arg6);
12847         return mem_value;
12848     }
12849 #endif
12850 #ifdef TARGET_NR_atomic_barrier
12851     case TARGET_NR_atomic_barrier:
12852         /* Like the kernel implementation and the
12853            qemu arm barrier, no-op this? */
12854         return 0;
12855 #endif
12856 
12857 #ifdef TARGET_NR_timer_create
12858     case TARGET_NR_timer_create:
12859     {
12860         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12861 
12862         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12863 
12864         int clkid = arg1;
12865         int timer_index = next_free_host_timer();
12866 
12867         if (timer_index < 0) {
12868             ret = -TARGET_EAGAIN;
12869         } else {
12870             timer_t *phtimer = g_posix_timers  + timer_index;
12871 
12872             if (arg2) {
12873                 phost_sevp = &host_sevp;
12874                 ret = target_to_host_sigevent(phost_sevp, arg2);
12875                 if (ret != 0) {
12876                     return ret;
12877                 }
12878             }
12879 
12880             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12881             if (ret) {
12882                 phtimer = NULL;
12883             } else {
12884                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12885                     return -TARGET_EFAULT;
12886                 }
12887             }
12888         }
12889         return ret;
12890     }
12891 #endif
12892 
12893 #ifdef TARGET_NR_timer_settime
12894     case TARGET_NR_timer_settime:
12895     {
12896         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12897          * struct itimerspec * old_value */
12898         target_timer_t timerid = get_timer_id(arg1);
12899 
12900         if (timerid < 0) {
12901             ret = timerid;
12902         } else if (arg3 == 0) {
12903             ret = -TARGET_EINVAL;
12904         } else {
12905             timer_t htimer = g_posix_timers[timerid];
12906             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12907 
12908             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12909                 return -TARGET_EFAULT;
12910             }
12911             ret = get_errno(
12912                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12913             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12914                 return -TARGET_EFAULT;
12915             }
12916         }
12917         return ret;
12918     }
12919 #endif
12920 
12921 #ifdef TARGET_NR_timer_settime64
12922     case TARGET_NR_timer_settime64:
12923     {
12924         target_timer_t timerid = get_timer_id(arg1);
12925 
12926         if (timerid < 0) {
12927             ret = timerid;
12928         } else if (arg3 == 0) {
12929             ret = -TARGET_EINVAL;
12930         } else {
12931             timer_t htimer = g_posix_timers[timerid];
12932             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12933 
12934             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12935                 return -TARGET_EFAULT;
12936             }
12937             ret = get_errno(
12938                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12939             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12940                 return -TARGET_EFAULT;
12941             }
12942         }
12943         return ret;
12944     }
12945 #endif
12946 
12947 #ifdef TARGET_NR_timer_gettime
12948     case TARGET_NR_timer_gettime:
12949     {
12950         /* args: timer_t timerid, struct itimerspec *curr_value */
12951         target_timer_t timerid = get_timer_id(arg1);
12952 
12953         if (timerid < 0) {
12954             ret = timerid;
12955         } else if (!arg2) {
12956             ret = -TARGET_EFAULT;
12957         } else {
12958             timer_t htimer = g_posix_timers[timerid];
12959             struct itimerspec hspec;
12960             ret = get_errno(timer_gettime(htimer, &hspec));
12961 
12962             if (host_to_target_itimerspec(arg2, &hspec)) {
12963                 ret = -TARGET_EFAULT;
12964             }
12965         }
12966         return ret;
12967     }
12968 #endif
12969 
12970 #ifdef TARGET_NR_timer_gettime64
12971     case TARGET_NR_timer_gettime64:
12972     {
12973         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12974         target_timer_t timerid = get_timer_id(arg1);
12975 
12976         if (timerid < 0) {
12977             ret = timerid;
12978         } else if (!arg2) {
12979             ret = -TARGET_EFAULT;
12980         } else {
12981             timer_t htimer = g_posix_timers[timerid];
12982             struct itimerspec hspec;
12983             ret = get_errno(timer_gettime(htimer, &hspec));
12984 
12985             if (host_to_target_itimerspec64(arg2, &hspec)) {
12986                 ret = -TARGET_EFAULT;
12987             }
12988         }
12989         return ret;
12990     }
12991 #endif
12992 
12993 #ifdef TARGET_NR_timer_getoverrun
12994     case TARGET_NR_timer_getoverrun:
12995     {
12996         /* args: timer_t timerid */
12997         target_timer_t timerid = get_timer_id(arg1);
12998 
12999         if (timerid < 0) {
13000             ret = timerid;
13001         } else {
13002             timer_t htimer = g_posix_timers[timerid];
13003             ret = get_errno(timer_getoverrun(htimer));
13004         }
13005         return ret;
13006     }
13007 #endif
13008 
13009 #ifdef TARGET_NR_timer_delete
13010     case TARGET_NR_timer_delete:
13011     {
13012         /* args: timer_t timerid */
13013         target_timer_t timerid = get_timer_id(arg1);
13014 
13015         if (timerid < 0) {
13016             ret = timerid;
13017         } else {
13018             timer_t htimer = g_posix_timers[timerid];
13019             ret = get_errno(timer_delete(htimer));
13020             g_posix_timers[timerid] = 0;
13021         }
13022         return ret;
13023     }
13024 #endif
13025 
13026 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13027     case TARGET_NR_timerfd_create:
13028         return get_errno(timerfd_create(arg1,
13029                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13030 #endif
13031 
13032 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13033     case TARGET_NR_timerfd_gettime:
13034         {
13035             struct itimerspec its_curr;
13036 
13037             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13038 
13039             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13040                 return -TARGET_EFAULT;
13041             }
13042         }
13043         return ret;
13044 #endif
13045 
13046 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13047     case TARGET_NR_timerfd_gettime64:
13048         {
13049             struct itimerspec its_curr;
13050 
13051             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13052 
13053             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13054                 return -TARGET_EFAULT;
13055             }
13056         }
13057         return ret;
13058 #endif
13059 
13060 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13061     case TARGET_NR_timerfd_settime:
13062         {
13063             struct itimerspec its_new, its_old, *p_new;
13064 
13065             if (arg3) {
13066                 if (target_to_host_itimerspec(&its_new, arg3)) {
13067                     return -TARGET_EFAULT;
13068                 }
13069                 p_new = &its_new;
13070             } else {
13071                 p_new = NULL;
13072             }
13073 
13074             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13075 
13076             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13077                 return -TARGET_EFAULT;
13078             }
13079         }
13080         return ret;
13081 #endif
13082 
13083 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13084     case TARGET_NR_timerfd_settime64:
13085         {
13086             struct itimerspec its_new, its_old, *p_new;
13087 
13088             if (arg3) {
13089                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13090                     return -TARGET_EFAULT;
13091                 }
13092                 p_new = &its_new;
13093             } else {
13094                 p_new = NULL;
13095             }
13096 
13097             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13098 
13099             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13100                 return -TARGET_EFAULT;
13101             }
13102         }
13103         return ret;
13104 #endif
13105 
13106 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13107     case TARGET_NR_ioprio_get:
13108         return get_errno(ioprio_get(arg1, arg2));
13109 #endif
13110 
13111 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13112     case TARGET_NR_ioprio_set:
13113         return get_errno(ioprio_set(arg1, arg2, arg3));
13114 #endif
13115 
13116 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13117     case TARGET_NR_setns:
13118         return get_errno(setns(arg1, arg2));
13119 #endif
13120 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13121     case TARGET_NR_unshare:
13122         return get_errno(unshare(arg1));
13123 #endif
13124 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13125     case TARGET_NR_kcmp:
13126         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13127 #endif
13128 #ifdef TARGET_NR_swapcontext
13129     case TARGET_NR_swapcontext:
13130         /* PowerPC specific.  */
13131         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13132 #endif
13133 #ifdef TARGET_NR_memfd_create
13134     case TARGET_NR_memfd_create:
13135         p = lock_user_string(arg1);
13136         if (!p) {
13137             return -TARGET_EFAULT;
13138         }
13139         ret = get_errno(memfd_create(p, arg2));
13140         fd_trans_unregister(ret);
13141         unlock_user(p, arg1, 0);
13142         return ret;
13143 #endif
13144 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13145     case TARGET_NR_membarrier:
13146         return get_errno(membarrier(arg1, arg2));
13147 #endif
13148 
13149 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13150     case TARGET_NR_copy_file_range:
13151         {
13152             loff_t inoff, outoff;
13153             loff_t *pinoff = NULL, *poutoff = NULL;
13154 
13155             if (arg2) {
13156                 if (get_user_u64(inoff, arg2)) {
13157                     return -TARGET_EFAULT;
13158                 }
13159                 pinoff = &inoff;
13160             }
13161             if (arg4) {
13162                 if (get_user_u64(outoff, arg4)) {
13163                     return -TARGET_EFAULT;
13164                 }
13165                 poutoff = &outoff;
13166             }
13167             /* Do not sign-extend the count parameter. */
13168             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13169                                                  (abi_ulong)arg5, arg6));
13170             if (!is_error(ret) && ret > 0) {
13171                 if (arg2) {
13172                     if (put_user_u64(inoff, arg2)) {
13173                         return -TARGET_EFAULT;
13174                     }
13175                 }
13176                 if (arg4) {
13177                     if (put_user_u64(outoff, arg4)) {
13178                         return -TARGET_EFAULT;
13179                     }
13180                 }
13181             }
13182         }
13183         return ret;
13184 #endif
13185 
13186 #if defined(TARGET_NR_pivot_root)
13187     case TARGET_NR_pivot_root:
13188         {
13189             void *p2;
13190             p = lock_user_string(arg1); /* new_root */
13191             p2 = lock_user_string(arg2); /* put_old */
13192             if (!p || !p2) {
13193                 ret = -TARGET_EFAULT;
13194             } else {
13195                 ret = get_errno(pivot_root(p, p2));
13196             }
13197             unlock_user(p2, arg2, 0);
13198             unlock_user(p, arg1, 0);
13199         }
13200         return ret;
13201 #endif
13202 
13203     default:
13204         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13205         return -TARGET_ENOSYS;
13206     }
13207     return ret;
13208 }
13209 
13210 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13211                     abi_long arg2, abi_long arg3, abi_long arg4,
13212                     abi_long arg5, abi_long arg6, abi_long arg7,
13213                     abi_long arg8)
13214 {
13215     CPUState *cpu = env_cpu(cpu_env);
13216     abi_long ret;
13217 
13218 #ifdef DEBUG_ERESTARTSYS
13219     /* Debug-only code for exercising the syscall-restart code paths
13220      * in the per-architecture cpu main loops: restart every syscall
13221      * the guest makes once before letting it through.
13222      */
13223     {
13224         static bool flag;
13225         flag = !flag;
13226         if (flag) {
13227             return -QEMU_ERESTARTSYS;
13228         }
13229     }
13230 #endif
13231 
13232     record_syscall_start(cpu, num, arg1,
13233                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13234 
13235     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13236         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13237     }
13238 
13239     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13240                       arg5, arg6, arg7, arg8);
13241 
13242     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13243         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13244                           arg3, arg4, arg5, arg6);
13245     }
13246 
13247     record_syscall_return(cpu, num, ret);
13248     return ret;
13249 }
13250