xref: /openbmc/qemu/linux-user/syscall.c (revision eb33cdae)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 
144 #ifndef CLONE_IO
145 #define CLONE_IO                0x80000000      /* Clone io context */
146 #endif
147 
148 /* We can't directly call the host clone syscall, because this will
149  * badly confuse libc (breaking mutexes, for example). So we must
150  * divide clone flags into:
151  *  * flag combinations that look like pthread_create()
152  *  * flag combinations that look like fork()
153  *  * flags we can implement within QEMU itself
154  *  * flags we can't support and will return an error for
155  */
156 /* For thread creation, all these flags must be present; for
157  * fork, none must be present.
158  */
159 #define CLONE_THREAD_FLAGS                              \
160     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
161      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
162 
163 /* These flags are ignored:
164  * CLONE_DETACHED is now ignored by the kernel;
165  * CLONE_IO is just an optimisation hint to the I/O scheduler
166  */
167 #define CLONE_IGNORED_FLAGS                     \
168     (CLONE_DETACHED | CLONE_IO)
169 
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS               \
172     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
173      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
174 
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
177     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
178      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
179 
180 #define CLONE_INVALID_FORK_FLAGS                                        \
181     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
182 
183 #define CLONE_INVALID_THREAD_FLAGS                                      \
184     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
185        CLONE_IGNORED_FLAGS))
186 
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188  * have almost all been allocated. We cannot support any of
189  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191  * The checks against the invalid thread masks above will catch these.
192  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
193  */
194 
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196  * once. This exercises the codepaths for restart.
197  */
198 //#define DEBUG_ERESTARTSYS
199 
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
205 
206 #undef _syscall0
207 #undef _syscall1
208 #undef _syscall2
209 #undef _syscall3
210 #undef _syscall4
211 #undef _syscall5
212 #undef _syscall6
213 
214 #define _syscall0(type,name)		\
215 static type name (void)			\
216 {					\
217 	return syscall(__NR_##name);	\
218 }
219 
220 #define _syscall1(type,name,type1,arg1)		\
221 static type name (type1 arg1)			\
222 {						\
223 	return syscall(__NR_##name, arg1);	\
224 }
225 
226 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
227 static type name (type1 arg1,type2 arg2)		\
228 {							\
229 	return syscall(__NR_##name, arg1, arg2);	\
230 }
231 
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
233 static type name (type1 arg1,type2 arg2,type3 arg3)		\
234 {								\
235 	return syscall(__NR_##name, arg1, arg2, arg3);		\
236 }
237 
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
242 }
243 
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
245 		  type5,arg5)							\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
249 }
250 
251 
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
253 		  type5,arg5,type6,arg6)					\
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
255                   type6 arg6)							\
256 {										\
257 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
258 }
259 
260 
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
271 #endif
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
274 #endif
275 #define __NR_sys_statx __NR_statx
276 
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
279 #endif
280 
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
284 #endif
285 
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
289 #endif
290 
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid)
293 
294 /* For the 64-bit guest on 32-bit host case we must emulate
295  * getdents using getdents64, because otherwise the host
296  * might hand us back more dirent records than we can fit
297  * into the guest buffer after structure format conversion.
298  * Otherwise we emulate getdents with getdents if the host has it.
299  */
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
302 #endif
303 
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
306 #endif
307 #if (defined(TARGET_NR_getdents) && \
308       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
311 #endif
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
314           loff_t *, res, uint, wh);
315 #endif
316 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
317 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
318           siginfo_t *, uinfo)
319 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group,int,error_code)
322 #endif
323 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
324 _syscall1(int,set_tid_address,int *,tidptr)
325 #endif
326 #if defined(__NR_futex)
327 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
328           const struct timespec *,timeout,int *,uaddr2,int,val3)
329 #endif
330 #if defined(__NR_futex_time64)
331 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
332           const struct timespec *,timeout,int *,uaddr2,int,val3)
333 #endif
334 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
335 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
336           unsigned long *, user_mask_ptr);
337 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
338 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
339           unsigned long *, user_mask_ptr);
340 /* sched_attr is not defined in glibc */
341 struct sched_attr {
342     uint32_t size;
343     uint32_t sched_policy;
344     uint64_t sched_flags;
345     int32_t sched_nice;
346     uint32_t sched_priority;
347     uint64_t sched_runtime;
348     uint64_t sched_deadline;
349     uint64_t sched_period;
350     uint32_t sched_util_min;
351     uint32_t sched_util_max;
352 };
353 #define __NR_sys_sched_getattr __NR_sched_getattr
354 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
355           unsigned int, size, unsigned int, flags);
356 #define __NR_sys_sched_setattr __NR_sched_setattr
357 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
358           unsigned int, flags);
359 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
360 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
361 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
362 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
363           const struct sched_param *, param);
364 #define __NR_sys_sched_getparam __NR_sched_getparam
365 _syscall2(int, sys_sched_getparam, pid_t, pid,
366           struct sched_param *, param);
367 #define __NR_sys_sched_setparam __NR_sched_setparam
368 _syscall2(int, sys_sched_setparam, pid_t, pid,
369           const struct sched_param *, param);
370 #define __NR_sys_getcpu __NR_getcpu
371 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
372 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
373           void *, arg);
374 _syscall2(int, capget, struct __user_cap_header_struct *, header,
375           struct __user_cap_data_struct *, data);
376 _syscall2(int, capset, struct __user_cap_header_struct *, header,
377           struct __user_cap_data_struct *, data);
378 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
379 _syscall2(int, ioprio_get, int, which, int, who)
380 #endif
381 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
382 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
383 #endif
384 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
385 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
386 #endif
387 
388 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
389 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
390           unsigned long, idx1, unsigned long, idx2)
391 #endif
392 
393 /*
394  * It is assumed that struct statx is architecture independent.
395  */
396 #if defined(TARGET_NR_statx) && defined(__NR_statx)
397 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
398           unsigned int, mask, struct target_statx *, statxbuf)
399 #endif
400 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
401 _syscall2(int, membarrier, int, cmd, int, flags)
402 #endif
403 
404 static const bitmask_transtbl fcntl_flags_tbl[] = {
405   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
406   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
407   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
408   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
409   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
410   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
411   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
412   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
413   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
414   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
415   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
416   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
417   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
418 #if defined(O_DIRECT)
419   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
420 #endif
421 #if defined(O_NOATIME)
422   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
423 #endif
424 #if defined(O_CLOEXEC)
425   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
426 #endif
427 #if defined(O_PATH)
428   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
429 #endif
430 #if defined(O_TMPFILE)
431   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
432 #endif
433   /* Don't terminate the list prematurely on 64-bit host+guest.  */
434 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
435   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
436 #endif
437   { 0, 0, 0, 0 }
438 };
439 
440 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
441 
442 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
443 #if defined(__NR_utimensat)
444 #define __NR_sys_utimensat __NR_utimensat
445 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
446           const struct timespec *,tsp,int,flags)
447 #else
448 static int sys_utimensat(int dirfd, const char *pathname,
449                          const struct timespec times[2], int flags)
450 {
451     errno = ENOSYS;
452     return -1;
453 }
454 #endif
455 #endif /* TARGET_NR_utimensat */
456 
457 #ifdef TARGET_NR_renameat2
458 #if defined(__NR_renameat2)
459 #define __NR_sys_renameat2 __NR_renameat2
460 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
461           const char *, new, unsigned int, flags)
462 #else
463 static int sys_renameat2(int oldfd, const char *old,
464                          int newfd, const char *new, int flags)
465 {
466     if (flags == 0) {
467         return renameat(oldfd, old, newfd, new);
468     }
469     errno = ENOSYS;
470     return -1;
471 }
472 #endif
473 #endif /* TARGET_NR_renameat2 */
474 
475 #ifdef CONFIG_INOTIFY
476 #include <sys/inotify.h>
477 #else
478 /* Userspace can usually survive runtime without inotify */
479 #undef TARGET_NR_inotify_init
480 #undef TARGET_NR_inotify_init1
481 #undef TARGET_NR_inotify_add_watch
482 #undef TARGET_NR_inotify_rm_watch
483 #endif /* CONFIG_INOTIFY  */
484 
485 #if defined(TARGET_NR_prlimit64)
486 #ifndef __NR_prlimit64
487 # define __NR_prlimit64 -1
488 #endif
489 #define __NR_sys_prlimit64 __NR_prlimit64
490 /* The glibc rlimit structure may not be that used by the underlying syscall */
491 struct host_rlimit64 {
492     uint64_t rlim_cur;
493     uint64_t rlim_max;
494 };
495 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
496           const struct host_rlimit64 *, new_limit,
497           struct host_rlimit64 *, old_limit)
498 #endif
499 
500 
501 #if defined(TARGET_NR_timer_create)
502 /* Maximum of 32 active POSIX timers allowed at any one time. */
503 static timer_t g_posix_timers[32] = { 0, } ;
504 
505 static inline int next_free_host_timer(void)
506 {
507     int k ;
508     /* FIXME: Does finding the next free slot require a lock? */
509     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
510         if (g_posix_timers[k] == 0) {
511             g_posix_timers[k] = (timer_t) 1;
512             return k;
513         }
514     }
515     return -1;
516 }
517 #endif
518 
519 static inline int host_to_target_errno(int host_errno)
520 {
521     switch (host_errno) {
522 #define E(X)  case X: return TARGET_##X;
523 #include "errnos.c.inc"
524 #undef E
525     default:
526         return host_errno;
527     }
528 }
529 
530 static inline int target_to_host_errno(int target_errno)
531 {
532     switch (target_errno) {
533 #define E(X)  case TARGET_##X: return X;
534 #include "errnos.c.inc"
535 #undef E
536     default:
537         return target_errno;
538     }
539 }
540 
541 static inline abi_long get_errno(abi_long ret)
542 {
543     if (ret == -1)
544         return -host_to_target_errno(errno);
545     else
546         return ret;
547 }
548 
549 const char *target_strerror(int err)
550 {
551     if (err == QEMU_ERESTARTSYS) {
552         return "To be restarted";
553     }
554     if (err == QEMU_ESIGRETURN) {
555         return "Successful exit from sigreturn";
556     }
557 
558     return strerror(target_to_host_errno(err));
559 }
560 
561 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
562 {
563     int i;
564     uint8_t b;
565     if (usize <= ksize) {
566         return 1;
567     }
568     for (i = ksize; i < usize; i++) {
569         if (get_user_u8(b, addr + i)) {
570             return -TARGET_EFAULT;
571         }
572         if (b != 0) {
573             return 0;
574         }
575     }
576     return 1;
577 }
578 
579 #define safe_syscall0(type, name) \
580 static type safe_##name(void) \
581 { \
582     return safe_syscall(__NR_##name); \
583 }
584 
585 #define safe_syscall1(type, name, type1, arg1) \
586 static type safe_##name(type1 arg1) \
587 { \
588     return safe_syscall(__NR_##name, arg1); \
589 }
590 
591 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
592 static type safe_##name(type1 arg1, type2 arg2) \
593 { \
594     return safe_syscall(__NR_##name, arg1, arg2); \
595 }
596 
597 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
598 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
599 { \
600     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
601 }
602 
603 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
604     type4, arg4) \
605 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
606 { \
607     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
608 }
609 
610 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
611     type4, arg4, type5, arg5) \
612 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
613     type5 arg5) \
614 { \
615     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
616 }
617 
618 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
619     type4, arg4, type5, arg5, type6, arg6) \
620 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
621     type5 arg5, type6 arg6) \
622 { \
623     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
624 }
625 
626 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
627 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
628 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
629               int, flags, mode_t, mode)
630 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
631 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
632               struct rusage *, rusage)
633 #endif
634 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
635               int, options, struct rusage *, rusage)
636 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
637 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
638     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
639 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
640               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
641 #endif
642 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
643 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
644               struct timespec *, tsp, const sigset_t *, sigmask,
645               size_t, sigsetsize)
646 #endif
647 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
648               int, maxevents, int, timeout, const sigset_t *, sigmask,
649               size_t, sigsetsize)
650 #if defined(__NR_futex)
651 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
652               const struct timespec *,timeout,int *,uaddr2,int,val3)
653 #endif
654 #if defined(__NR_futex_time64)
655 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
656               const struct timespec *,timeout,int *,uaddr2,int,val3)
657 #endif
658 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
659 safe_syscall2(int, kill, pid_t, pid, int, sig)
660 safe_syscall2(int, tkill, int, tid, int, sig)
661 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
662 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
663 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
664 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
665               unsigned long, pos_l, unsigned long, pos_h)
666 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
667               unsigned long, pos_l, unsigned long, pos_h)
668 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
669               socklen_t, addrlen)
670 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
671               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
672 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
673               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
674 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
675 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
676 safe_syscall2(int, flock, int, fd, int, operation)
677 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
678 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
679               const struct timespec *, uts, size_t, sigsetsize)
680 #endif
681 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
682               int, flags)
683 #if defined(TARGET_NR_nanosleep)
684 safe_syscall2(int, nanosleep, const struct timespec *, req,
685               struct timespec *, rem)
686 #endif
687 #if defined(TARGET_NR_clock_nanosleep) || \
688     defined(TARGET_NR_clock_nanosleep_time64)
689 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
690               const struct timespec *, req, struct timespec *, rem)
691 #endif
692 #ifdef __NR_ipc
693 #ifdef __s390x__
694 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
695               void *, ptr)
696 #else
697 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
698               void *, ptr, long, fifth)
699 #endif
700 #endif
701 #ifdef __NR_msgsnd
702 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
703               int, flags)
704 #endif
705 #ifdef __NR_msgrcv
706 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
707               long, msgtype, int, flags)
708 #endif
709 #ifdef __NR_semtimedop
710 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
711               unsigned, nsops, const struct timespec *, timeout)
712 #endif
713 #if defined(TARGET_NR_mq_timedsend) || \
714     defined(TARGET_NR_mq_timedsend_time64)
715 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
716               size_t, len, unsigned, prio, const struct timespec *, timeout)
717 #endif
718 #if defined(TARGET_NR_mq_timedreceive) || \
719     defined(TARGET_NR_mq_timedreceive_time64)
720 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
721               size_t, len, unsigned *, prio, const struct timespec *, timeout)
722 #endif
723 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
724 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
725               int, outfd, loff_t *, poutoff, size_t, length,
726               unsigned int, flags)
727 #endif
728 
729 /* We do ioctl like this rather than via safe_syscall3 to preserve the
730  * "third argument might be integer or pointer or not present" behaviour of
731  * the libc function.
732  */
733 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
734 /* Similarly for fcntl. Note that callers must always:
735  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
736  *  use the flock64 struct rather than unsuffixed flock
737  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
738  */
739 #ifdef __NR_fcntl64
740 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
741 #else
742 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
743 #endif
744 
745 static inline int host_to_target_sock_type(int host_type)
746 {
747     int target_type;
748 
749     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
750     case SOCK_DGRAM:
751         target_type = TARGET_SOCK_DGRAM;
752         break;
753     case SOCK_STREAM:
754         target_type = TARGET_SOCK_STREAM;
755         break;
756     default:
757         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
758         break;
759     }
760 
761 #if defined(SOCK_CLOEXEC)
762     if (host_type & SOCK_CLOEXEC) {
763         target_type |= TARGET_SOCK_CLOEXEC;
764     }
765 #endif
766 
767 #if defined(SOCK_NONBLOCK)
768     if (host_type & SOCK_NONBLOCK) {
769         target_type |= TARGET_SOCK_NONBLOCK;
770     }
771 #endif
772 
773     return target_type;
774 }
775 
776 static abi_ulong target_brk;
777 static abi_ulong target_original_brk;
778 static abi_ulong brk_page;
779 
780 void target_set_brk(abi_ulong new_brk)
781 {
782     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
783     brk_page = HOST_PAGE_ALIGN(target_brk);
784 }
785 
786 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
787 #define DEBUGF_BRK(message, args...)
788 
789 /* do_brk() must return target values and target errnos. */
790 abi_long do_brk(abi_ulong new_brk)
791 {
792     abi_long mapped_addr;
793     abi_ulong new_alloc_size;
794 
795     /* brk pointers are always untagged */
796 
797     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
798 
799     if (!new_brk) {
800         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
801         return target_brk;
802     }
803     if (new_brk < target_original_brk) {
804         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
805                    target_brk);
806         return target_brk;
807     }
808 
809     /* If the new brk is less than the highest page reserved to the
810      * target heap allocation, set it and we're almost done...  */
811     if (new_brk <= brk_page) {
812         /* Heap contents are initialized to zero, as for anonymous
813          * mapped pages.  */
814         if (new_brk > target_brk) {
815             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
816         }
817 	target_brk = new_brk;
818         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
819 	return target_brk;
820     }
821 
822     /* We need to allocate more memory after the brk... Note that
823      * we don't use MAP_FIXED because that will map over the top of
824      * any existing mapping (like the one with the host libc or qemu
825      * itself); instead we treat "mapped but at wrong address" as
826      * a failure and unmap again.
827      */
828     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
829     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
830                                         PROT_READ|PROT_WRITE,
831                                         MAP_ANON|MAP_PRIVATE, 0, 0));
832 
833     if (mapped_addr == brk_page) {
834         /* Heap contents are initialized to zero, as for anonymous
835          * mapped pages.  Technically the new pages are already
836          * initialized to zero since they *are* anonymous mapped
837          * pages, however we have to take care with the contents that
838          * come from the remaining part of the previous page: it may
839          * contains garbage data due to a previous heap usage (grown
840          * then shrunken).  */
841         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
842 
843         target_brk = new_brk;
844         brk_page = HOST_PAGE_ALIGN(target_brk);
845         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
846             target_brk);
847         return target_brk;
848     } else if (mapped_addr != -1) {
849         /* Mapped but at wrong address, meaning there wasn't actually
850          * enough space for this brk.
851          */
852         target_munmap(mapped_addr, new_alloc_size);
853         mapped_addr = -1;
854         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
855     }
856     else {
857         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
858     }
859 
860 #if defined(TARGET_ALPHA)
861     /* We (partially) emulate OSF/1 on Alpha, which requires we
862        return a proper errno, not an unchanged brk value.  */
863     return -TARGET_ENOMEM;
864 #endif
865     /* For everything else, return the previous break. */
866     return target_brk;
867 }
868 
869 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
870     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
871 static inline abi_long copy_from_user_fdset(fd_set *fds,
872                                             abi_ulong target_fds_addr,
873                                             int n)
874 {
875     int i, nw, j, k;
876     abi_ulong b, *target_fds;
877 
878     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
879     if (!(target_fds = lock_user(VERIFY_READ,
880                                  target_fds_addr,
881                                  sizeof(abi_ulong) * nw,
882                                  1)))
883         return -TARGET_EFAULT;
884 
885     FD_ZERO(fds);
886     k = 0;
887     for (i = 0; i < nw; i++) {
888         /* grab the abi_ulong */
889         __get_user(b, &target_fds[i]);
890         for (j = 0; j < TARGET_ABI_BITS; j++) {
891             /* check the bit inside the abi_ulong */
892             if ((b >> j) & 1)
893                 FD_SET(k, fds);
894             k++;
895         }
896     }
897 
898     unlock_user(target_fds, target_fds_addr, 0);
899 
900     return 0;
901 }
902 
903 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
904                                                  abi_ulong target_fds_addr,
905                                                  int n)
906 {
907     if (target_fds_addr) {
908         if (copy_from_user_fdset(fds, target_fds_addr, n))
909             return -TARGET_EFAULT;
910         *fds_ptr = fds;
911     } else {
912         *fds_ptr = NULL;
913     }
914     return 0;
915 }
916 
917 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
918                                           const fd_set *fds,
919                                           int n)
920 {
921     int i, nw, j, k;
922     abi_long v;
923     abi_ulong *target_fds;
924 
925     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
926     if (!(target_fds = lock_user(VERIFY_WRITE,
927                                  target_fds_addr,
928                                  sizeof(abi_ulong) * nw,
929                                  0)))
930         return -TARGET_EFAULT;
931 
932     k = 0;
933     for (i = 0; i < nw; i++) {
934         v = 0;
935         for (j = 0; j < TARGET_ABI_BITS; j++) {
936             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
937             k++;
938         }
939         __put_user(v, &target_fds[i]);
940     }
941 
942     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
943 
944     return 0;
945 }
946 #endif
947 
948 #if defined(__alpha__)
949 #define HOST_HZ 1024
950 #else
951 #define HOST_HZ 100
952 #endif
953 
954 static inline abi_long host_to_target_clock_t(long ticks)
955 {
956 #if HOST_HZ == TARGET_HZ
957     return ticks;
958 #else
959     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
960 #endif
961 }
962 
963 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
964                                              const struct rusage *rusage)
965 {
966     struct target_rusage *target_rusage;
967 
968     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
969         return -TARGET_EFAULT;
970     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
971     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
972     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
973     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
974     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
975     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
976     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
977     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
978     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
979     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
980     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
981     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
982     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
983     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
984     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
985     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
986     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
987     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
988     unlock_user_struct(target_rusage, target_addr, 1);
989 
990     return 0;
991 }
992 
993 #ifdef TARGET_NR_setrlimit
994 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
995 {
996     abi_ulong target_rlim_swap;
997     rlim_t result;
998 
999     target_rlim_swap = tswapal(target_rlim);
1000     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1001         return RLIM_INFINITY;
1002 
1003     result = target_rlim_swap;
1004     if (target_rlim_swap != (rlim_t)result)
1005         return RLIM_INFINITY;
1006 
1007     return result;
1008 }
1009 #endif
1010 
1011 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1012 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1013 {
1014     abi_ulong target_rlim_swap;
1015     abi_ulong result;
1016 
1017     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1018         target_rlim_swap = TARGET_RLIM_INFINITY;
1019     else
1020         target_rlim_swap = rlim;
1021     result = tswapal(target_rlim_swap);
1022 
1023     return result;
1024 }
1025 #endif
1026 
1027 static inline int target_to_host_resource(int code)
1028 {
1029     switch (code) {
1030     case TARGET_RLIMIT_AS:
1031         return RLIMIT_AS;
1032     case TARGET_RLIMIT_CORE:
1033         return RLIMIT_CORE;
1034     case TARGET_RLIMIT_CPU:
1035         return RLIMIT_CPU;
1036     case TARGET_RLIMIT_DATA:
1037         return RLIMIT_DATA;
1038     case TARGET_RLIMIT_FSIZE:
1039         return RLIMIT_FSIZE;
1040     case TARGET_RLIMIT_LOCKS:
1041         return RLIMIT_LOCKS;
1042     case TARGET_RLIMIT_MEMLOCK:
1043         return RLIMIT_MEMLOCK;
1044     case TARGET_RLIMIT_MSGQUEUE:
1045         return RLIMIT_MSGQUEUE;
1046     case TARGET_RLIMIT_NICE:
1047         return RLIMIT_NICE;
1048     case TARGET_RLIMIT_NOFILE:
1049         return RLIMIT_NOFILE;
1050     case TARGET_RLIMIT_NPROC:
1051         return RLIMIT_NPROC;
1052     case TARGET_RLIMIT_RSS:
1053         return RLIMIT_RSS;
1054     case TARGET_RLIMIT_RTPRIO:
1055         return RLIMIT_RTPRIO;
1056     case TARGET_RLIMIT_SIGPENDING:
1057         return RLIMIT_SIGPENDING;
1058     case TARGET_RLIMIT_STACK:
1059         return RLIMIT_STACK;
1060     default:
1061         return code;
1062     }
1063 }
1064 
1065 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1066                                               abi_ulong target_tv_addr)
1067 {
1068     struct target_timeval *target_tv;
1069 
1070     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1071         return -TARGET_EFAULT;
1072     }
1073 
1074     __get_user(tv->tv_sec, &target_tv->tv_sec);
1075     __get_user(tv->tv_usec, &target_tv->tv_usec);
1076 
1077     unlock_user_struct(target_tv, target_tv_addr, 0);
1078 
1079     return 0;
1080 }
1081 
1082 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1083                                             const struct timeval *tv)
1084 {
1085     struct target_timeval *target_tv;
1086 
1087     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1088         return -TARGET_EFAULT;
1089     }
1090 
1091     __put_user(tv->tv_sec, &target_tv->tv_sec);
1092     __put_user(tv->tv_usec, &target_tv->tv_usec);
1093 
1094     unlock_user_struct(target_tv, target_tv_addr, 1);
1095 
1096     return 0;
1097 }
1098 
1099 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1100 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1101                                                 abi_ulong target_tv_addr)
1102 {
1103     struct target__kernel_sock_timeval *target_tv;
1104 
1105     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1106         return -TARGET_EFAULT;
1107     }
1108 
1109     __get_user(tv->tv_sec, &target_tv->tv_sec);
1110     __get_user(tv->tv_usec, &target_tv->tv_usec);
1111 
1112     unlock_user_struct(target_tv, target_tv_addr, 0);
1113 
1114     return 0;
1115 }
1116 #endif
1117 
1118 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1119                                               const struct timeval *tv)
1120 {
1121     struct target__kernel_sock_timeval *target_tv;
1122 
1123     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1124         return -TARGET_EFAULT;
1125     }
1126 
1127     __put_user(tv->tv_sec, &target_tv->tv_sec);
1128     __put_user(tv->tv_usec, &target_tv->tv_usec);
1129 
1130     unlock_user_struct(target_tv, target_tv_addr, 1);
1131 
1132     return 0;
1133 }
1134 
1135 #if defined(TARGET_NR_futex) || \
1136     defined(TARGET_NR_rt_sigtimedwait) || \
1137     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1138     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1139     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1140     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1141     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1142     defined(TARGET_NR_timer_settime) || \
1143     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1144 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1145                                                abi_ulong target_addr)
1146 {
1147     struct target_timespec *target_ts;
1148 
1149     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1150         return -TARGET_EFAULT;
1151     }
1152     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1153     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1154     unlock_user_struct(target_ts, target_addr, 0);
1155     return 0;
1156 }
1157 #endif
1158 
1159 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1160     defined(TARGET_NR_timer_settime64) || \
1161     defined(TARGET_NR_mq_timedsend_time64) || \
1162     defined(TARGET_NR_mq_timedreceive_time64) || \
1163     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1164     defined(TARGET_NR_clock_nanosleep_time64) || \
1165     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1166     defined(TARGET_NR_utimensat) || \
1167     defined(TARGET_NR_utimensat_time64) || \
1168     defined(TARGET_NR_semtimedop_time64) || \
1169     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1170 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1171                                                  abi_ulong target_addr)
1172 {
1173     struct target__kernel_timespec *target_ts;
1174 
1175     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1176         return -TARGET_EFAULT;
1177     }
1178     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1179     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1180     /* in 32bit mode, this drops the padding */
1181     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1182     unlock_user_struct(target_ts, target_addr, 0);
1183     return 0;
1184 }
1185 #endif
1186 
1187 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1188                                                struct timespec *host_ts)
1189 {
1190     struct target_timespec *target_ts;
1191 
1192     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1193         return -TARGET_EFAULT;
1194     }
1195     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1196     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1197     unlock_user_struct(target_ts, target_addr, 1);
1198     return 0;
1199 }
1200 
1201 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1202                                                  struct timespec *host_ts)
1203 {
1204     struct target__kernel_timespec *target_ts;
1205 
1206     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1207         return -TARGET_EFAULT;
1208     }
1209     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1210     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1211     unlock_user_struct(target_ts, target_addr, 1);
1212     return 0;
1213 }
1214 
1215 #if defined(TARGET_NR_gettimeofday)
1216 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1217                                              struct timezone *tz)
1218 {
1219     struct target_timezone *target_tz;
1220 
1221     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1222         return -TARGET_EFAULT;
1223     }
1224 
1225     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1226     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1227 
1228     unlock_user_struct(target_tz, target_tz_addr, 1);
1229 
1230     return 0;
1231 }
1232 #endif
1233 
1234 #if defined(TARGET_NR_settimeofday)
1235 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1236                                                abi_ulong target_tz_addr)
1237 {
1238     struct target_timezone *target_tz;
1239 
1240     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1241         return -TARGET_EFAULT;
1242     }
1243 
1244     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1245     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1246 
1247     unlock_user_struct(target_tz, target_tz_addr, 0);
1248 
1249     return 0;
1250 }
1251 #endif
1252 
1253 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1254 #include <mqueue.h>
1255 
1256 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1257                                               abi_ulong target_mq_attr_addr)
1258 {
1259     struct target_mq_attr *target_mq_attr;
1260 
1261     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1262                           target_mq_attr_addr, 1))
1263         return -TARGET_EFAULT;
1264 
1265     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1266     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1267     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1268     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1269 
1270     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1271 
1272     return 0;
1273 }
1274 
1275 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1276                                             const struct mq_attr *attr)
1277 {
1278     struct target_mq_attr *target_mq_attr;
1279 
1280     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1281                           target_mq_attr_addr, 0))
1282         return -TARGET_EFAULT;
1283 
1284     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1285     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1286     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1287     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1288 
1289     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1290 
1291     return 0;
1292 }
1293 #endif
1294 
1295 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1296 /* do_select() must return target values and target errnos. */
1297 static abi_long do_select(int n,
1298                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1299                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1300 {
1301     fd_set rfds, wfds, efds;
1302     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1303     struct timeval tv;
1304     struct timespec ts, *ts_ptr;
1305     abi_long ret;
1306 
1307     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1308     if (ret) {
1309         return ret;
1310     }
1311     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1312     if (ret) {
1313         return ret;
1314     }
1315     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1316     if (ret) {
1317         return ret;
1318     }
1319 
1320     if (target_tv_addr) {
1321         if (copy_from_user_timeval(&tv, target_tv_addr))
1322             return -TARGET_EFAULT;
1323         ts.tv_sec = tv.tv_sec;
1324         ts.tv_nsec = tv.tv_usec * 1000;
1325         ts_ptr = &ts;
1326     } else {
1327         ts_ptr = NULL;
1328     }
1329 
1330     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1331                                   ts_ptr, NULL));
1332 
1333     if (!is_error(ret)) {
1334         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1335             return -TARGET_EFAULT;
1336         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1337             return -TARGET_EFAULT;
1338         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1339             return -TARGET_EFAULT;
1340 
1341         if (target_tv_addr) {
1342             tv.tv_sec = ts.tv_sec;
1343             tv.tv_usec = ts.tv_nsec / 1000;
1344             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1345                 return -TARGET_EFAULT;
1346             }
1347         }
1348     }
1349 
1350     return ret;
1351 }
1352 
1353 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1354 static abi_long do_old_select(abi_ulong arg1)
1355 {
1356     struct target_sel_arg_struct *sel;
1357     abi_ulong inp, outp, exp, tvp;
1358     long nsel;
1359 
1360     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1361         return -TARGET_EFAULT;
1362     }
1363 
1364     nsel = tswapal(sel->n);
1365     inp = tswapal(sel->inp);
1366     outp = tswapal(sel->outp);
1367     exp = tswapal(sel->exp);
1368     tvp = tswapal(sel->tvp);
1369 
1370     unlock_user_struct(sel, arg1, 0);
1371 
1372     return do_select(nsel, inp, outp, exp, tvp);
1373 }
1374 #endif
1375 #endif
1376 
1377 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1378 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1379                             abi_long arg4, abi_long arg5, abi_long arg6,
1380                             bool time64)
1381 {
1382     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1383     fd_set rfds, wfds, efds;
1384     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1385     struct timespec ts, *ts_ptr;
1386     abi_long ret;
1387 
1388     /*
1389      * The 6th arg is actually two args smashed together,
1390      * so we cannot use the C library.
1391      */
1392     sigset_t set;
1393     struct {
1394         sigset_t *set;
1395         size_t size;
1396     } sig, *sig_ptr;
1397 
1398     abi_ulong arg_sigset, arg_sigsize, *arg7;
1399     target_sigset_t *target_sigset;
1400 
1401     n = arg1;
1402     rfd_addr = arg2;
1403     wfd_addr = arg3;
1404     efd_addr = arg4;
1405     ts_addr = arg5;
1406 
1407     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1408     if (ret) {
1409         return ret;
1410     }
1411     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1412     if (ret) {
1413         return ret;
1414     }
1415     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1416     if (ret) {
1417         return ret;
1418     }
1419 
1420     /*
1421      * This takes a timespec, and not a timeval, so we cannot
1422      * use the do_select() helper ...
1423      */
1424     if (ts_addr) {
1425         if (time64) {
1426             if (target_to_host_timespec64(&ts, ts_addr)) {
1427                 return -TARGET_EFAULT;
1428             }
1429         } else {
1430             if (target_to_host_timespec(&ts, ts_addr)) {
1431                 return -TARGET_EFAULT;
1432             }
1433         }
1434             ts_ptr = &ts;
1435     } else {
1436         ts_ptr = NULL;
1437     }
1438 
1439     /* Extract the two packed args for the sigset */
1440     if (arg6) {
1441         sig_ptr = &sig;
1442         sig.size = SIGSET_T_SIZE;
1443 
1444         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1445         if (!arg7) {
1446             return -TARGET_EFAULT;
1447         }
1448         arg_sigset = tswapal(arg7[0]);
1449         arg_sigsize = tswapal(arg7[1]);
1450         unlock_user(arg7, arg6, 0);
1451 
1452         if (arg_sigset) {
1453             sig.set = &set;
1454             if (arg_sigsize != sizeof(*target_sigset)) {
1455                 /* Like the kernel, we enforce correct size sigsets */
1456                 return -TARGET_EINVAL;
1457             }
1458             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1459                                       sizeof(*target_sigset), 1);
1460             if (!target_sigset) {
1461                 return -TARGET_EFAULT;
1462             }
1463             target_to_host_sigset(&set, target_sigset);
1464             unlock_user(target_sigset, arg_sigset, 0);
1465         } else {
1466             sig.set = NULL;
1467         }
1468     } else {
1469         sig_ptr = NULL;
1470     }
1471 
1472     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1473                                   ts_ptr, sig_ptr));
1474 
1475     if (!is_error(ret)) {
1476         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1477             return -TARGET_EFAULT;
1478         }
1479         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1480             return -TARGET_EFAULT;
1481         }
1482         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1483             return -TARGET_EFAULT;
1484         }
1485         if (time64) {
1486             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1487                 return -TARGET_EFAULT;
1488             }
1489         } else {
1490             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1491                 return -TARGET_EFAULT;
1492             }
1493         }
1494     }
1495     return ret;
1496 }
1497 #endif
1498 
1499 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1500     defined(TARGET_NR_ppoll_time64)
1501 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1502                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1503 {
1504     struct target_pollfd *target_pfd;
1505     unsigned int nfds = arg2;
1506     struct pollfd *pfd;
1507     unsigned int i;
1508     abi_long ret;
1509 
1510     pfd = NULL;
1511     target_pfd = NULL;
1512     if (nfds) {
1513         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1514             return -TARGET_EINVAL;
1515         }
1516         target_pfd = lock_user(VERIFY_WRITE, arg1,
1517                                sizeof(struct target_pollfd) * nfds, 1);
1518         if (!target_pfd) {
1519             return -TARGET_EFAULT;
1520         }
1521 
1522         pfd = alloca(sizeof(struct pollfd) * nfds);
1523         for (i = 0; i < nfds; i++) {
1524             pfd[i].fd = tswap32(target_pfd[i].fd);
1525             pfd[i].events = tswap16(target_pfd[i].events);
1526         }
1527     }
1528     if (ppoll) {
1529         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1530         target_sigset_t *target_set;
1531         sigset_t _set, *set = &_set;
1532 
1533         if (arg3) {
1534             if (time64) {
1535                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1536                     unlock_user(target_pfd, arg1, 0);
1537                     return -TARGET_EFAULT;
1538                 }
1539             } else {
1540                 if (target_to_host_timespec(timeout_ts, arg3)) {
1541                     unlock_user(target_pfd, arg1, 0);
1542                     return -TARGET_EFAULT;
1543                 }
1544             }
1545         } else {
1546             timeout_ts = NULL;
1547         }
1548 
1549         if (arg4) {
1550             if (arg5 != sizeof(target_sigset_t)) {
1551                 unlock_user(target_pfd, arg1, 0);
1552                 return -TARGET_EINVAL;
1553             }
1554 
1555             target_set = lock_user(VERIFY_READ, arg4,
1556                                    sizeof(target_sigset_t), 1);
1557             if (!target_set) {
1558                 unlock_user(target_pfd, arg1, 0);
1559                 return -TARGET_EFAULT;
1560             }
1561             target_to_host_sigset(set, target_set);
1562         } else {
1563             set = NULL;
1564         }
1565 
1566         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1567                                    set, SIGSET_T_SIZE));
1568 
1569         if (!is_error(ret) && arg3) {
1570             if (time64) {
1571                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1572                     return -TARGET_EFAULT;
1573                 }
1574             } else {
1575                 if (host_to_target_timespec(arg3, timeout_ts)) {
1576                     return -TARGET_EFAULT;
1577                 }
1578             }
1579         }
1580         if (arg4) {
1581             unlock_user(target_set, arg4, 0);
1582         }
1583     } else {
1584           struct timespec ts, *pts;
1585 
1586           if (arg3 >= 0) {
1587               /* Convert ms to secs, ns */
1588               ts.tv_sec = arg3 / 1000;
1589               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1590               pts = &ts;
1591           } else {
1592               /* -ve poll() timeout means "infinite" */
1593               pts = NULL;
1594           }
1595           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1596     }
1597 
1598     if (!is_error(ret)) {
1599         for (i = 0; i < nfds; i++) {
1600             target_pfd[i].revents = tswap16(pfd[i].revents);
1601         }
1602     }
1603     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1604     return ret;
1605 }
1606 #endif
1607 
1608 static abi_long do_pipe2(int host_pipe[], int flags)
1609 {
1610 #ifdef CONFIG_PIPE2
1611     return pipe2(host_pipe, flags);
1612 #else
1613     return -ENOSYS;
1614 #endif
1615 }
1616 
1617 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1618                         int flags, int is_pipe2)
1619 {
1620     int host_pipe[2];
1621     abi_long ret;
1622     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1623 
1624     if (is_error(ret))
1625         return get_errno(ret);
1626 
1627     /* Several targets have special calling conventions for the original
1628        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1629     if (!is_pipe2) {
1630 #if defined(TARGET_ALPHA)
1631         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1632         return host_pipe[0];
1633 #elif defined(TARGET_MIPS)
1634         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1635         return host_pipe[0];
1636 #elif defined(TARGET_SH4)
1637         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1638         return host_pipe[0];
1639 #elif defined(TARGET_SPARC)
1640         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1641         return host_pipe[0];
1642 #endif
1643     }
1644 
1645     if (put_user_s32(host_pipe[0], pipedes)
1646         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1647         return -TARGET_EFAULT;
1648     return get_errno(ret);
1649 }
1650 
1651 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1652                                               abi_ulong target_addr,
1653                                               socklen_t len)
1654 {
1655     struct target_ip_mreqn *target_smreqn;
1656 
1657     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1658     if (!target_smreqn)
1659         return -TARGET_EFAULT;
1660     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1661     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1662     if (len == sizeof(struct target_ip_mreqn))
1663         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1664     unlock_user(target_smreqn, target_addr, 0);
1665 
1666     return 0;
1667 }
1668 
1669 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1670                                                abi_ulong target_addr,
1671                                                socklen_t len)
1672 {
1673     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1674     sa_family_t sa_family;
1675     struct target_sockaddr *target_saddr;
1676 
1677     if (fd_trans_target_to_host_addr(fd)) {
1678         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1679     }
1680 
1681     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1682     if (!target_saddr)
1683         return -TARGET_EFAULT;
1684 
1685     sa_family = tswap16(target_saddr->sa_family);
1686 
1687     /* Oops. The caller might send a incomplete sun_path; sun_path
1688      * must be terminated by \0 (see the manual page), but
1689      * unfortunately it is quite common to specify sockaddr_un
1690      * length as "strlen(x->sun_path)" while it should be
1691      * "strlen(...) + 1". We'll fix that here if needed.
1692      * Linux kernel has a similar feature.
1693      */
1694 
1695     if (sa_family == AF_UNIX) {
1696         if (len < unix_maxlen && len > 0) {
1697             char *cp = (char*)target_saddr;
1698 
1699             if ( cp[len-1] && !cp[len] )
1700                 len++;
1701         }
1702         if (len > unix_maxlen)
1703             len = unix_maxlen;
1704     }
1705 
1706     memcpy(addr, target_saddr, len);
1707     addr->sa_family = sa_family;
1708     if (sa_family == AF_NETLINK) {
1709         struct sockaddr_nl *nladdr;
1710 
1711         nladdr = (struct sockaddr_nl *)addr;
1712         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1713         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1714     } else if (sa_family == AF_PACKET) {
1715 	struct target_sockaddr_ll *lladdr;
1716 
1717 	lladdr = (struct target_sockaddr_ll *)addr;
1718 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1719 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1720     }
1721     unlock_user(target_saddr, target_addr, 0);
1722 
1723     return 0;
1724 }
1725 
1726 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1727                                                struct sockaddr *addr,
1728                                                socklen_t len)
1729 {
1730     struct target_sockaddr *target_saddr;
1731 
1732     if (len == 0) {
1733         return 0;
1734     }
1735     assert(addr);
1736 
1737     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1738     if (!target_saddr)
1739         return -TARGET_EFAULT;
1740     memcpy(target_saddr, addr, len);
1741     if (len >= offsetof(struct target_sockaddr, sa_family) +
1742         sizeof(target_saddr->sa_family)) {
1743         target_saddr->sa_family = tswap16(addr->sa_family);
1744     }
1745     if (addr->sa_family == AF_NETLINK &&
1746         len >= sizeof(struct target_sockaddr_nl)) {
1747         struct target_sockaddr_nl *target_nl =
1748                (struct target_sockaddr_nl *)target_saddr;
1749         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1750         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1751     } else if (addr->sa_family == AF_PACKET) {
1752         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1753         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1754         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1755     } else if (addr->sa_family == AF_INET6 &&
1756                len >= sizeof(struct target_sockaddr_in6)) {
1757         struct target_sockaddr_in6 *target_in6 =
1758                (struct target_sockaddr_in6 *)target_saddr;
1759         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1760     }
1761     unlock_user(target_saddr, target_addr, len);
1762 
1763     return 0;
1764 }
1765 
1766 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1767                                            struct target_msghdr *target_msgh)
1768 {
1769     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1770     abi_long msg_controllen;
1771     abi_ulong target_cmsg_addr;
1772     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1773     socklen_t space = 0;
1774 
1775     msg_controllen = tswapal(target_msgh->msg_controllen);
1776     if (msg_controllen < sizeof (struct target_cmsghdr))
1777         goto the_end;
1778     target_cmsg_addr = tswapal(target_msgh->msg_control);
1779     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1780     target_cmsg_start = target_cmsg;
1781     if (!target_cmsg)
1782         return -TARGET_EFAULT;
1783 
1784     while (cmsg && target_cmsg) {
1785         void *data = CMSG_DATA(cmsg);
1786         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1787 
1788         int len = tswapal(target_cmsg->cmsg_len)
1789             - sizeof(struct target_cmsghdr);
1790 
1791         space += CMSG_SPACE(len);
1792         if (space > msgh->msg_controllen) {
1793             space -= CMSG_SPACE(len);
1794             /* This is a QEMU bug, since we allocated the payload
1795              * area ourselves (unlike overflow in host-to-target
1796              * conversion, which is just the guest giving us a buffer
1797              * that's too small). It can't happen for the payload types
1798              * we currently support; if it becomes an issue in future
1799              * we would need to improve our allocation strategy to
1800              * something more intelligent than "twice the size of the
1801              * target buffer we're reading from".
1802              */
1803             qemu_log_mask(LOG_UNIMP,
1804                           ("Unsupported ancillary data %d/%d: "
1805                            "unhandled msg size\n"),
1806                           tswap32(target_cmsg->cmsg_level),
1807                           tswap32(target_cmsg->cmsg_type));
1808             break;
1809         }
1810 
1811         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1812             cmsg->cmsg_level = SOL_SOCKET;
1813         } else {
1814             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1815         }
1816         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1817         cmsg->cmsg_len = CMSG_LEN(len);
1818 
1819         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1820             int *fd = (int *)data;
1821             int *target_fd = (int *)target_data;
1822             int i, numfds = len / sizeof(int);
1823 
1824             for (i = 0; i < numfds; i++) {
1825                 __get_user(fd[i], target_fd + i);
1826             }
1827         } else if (cmsg->cmsg_level == SOL_SOCKET
1828                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1829             struct ucred *cred = (struct ucred *)data;
1830             struct target_ucred *target_cred =
1831                 (struct target_ucred *)target_data;
1832 
1833             __get_user(cred->pid, &target_cred->pid);
1834             __get_user(cred->uid, &target_cred->uid);
1835             __get_user(cred->gid, &target_cred->gid);
1836         } else {
1837             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1838                           cmsg->cmsg_level, cmsg->cmsg_type);
1839             memcpy(data, target_data, len);
1840         }
1841 
1842         cmsg = CMSG_NXTHDR(msgh, cmsg);
1843         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1844                                          target_cmsg_start);
1845     }
1846     unlock_user(target_cmsg, target_cmsg_addr, 0);
1847  the_end:
1848     msgh->msg_controllen = space;
1849     return 0;
1850 }
1851 
1852 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1853                                            struct msghdr *msgh)
1854 {
1855     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1856     abi_long msg_controllen;
1857     abi_ulong target_cmsg_addr;
1858     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1859     socklen_t space = 0;
1860 
1861     msg_controllen = tswapal(target_msgh->msg_controllen);
1862     if (msg_controllen < sizeof (struct target_cmsghdr))
1863         goto the_end;
1864     target_cmsg_addr = tswapal(target_msgh->msg_control);
1865     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1866     target_cmsg_start = target_cmsg;
1867     if (!target_cmsg)
1868         return -TARGET_EFAULT;
1869 
1870     while (cmsg && target_cmsg) {
1871         void *data = CMSG_DATA(cmsg);
1872         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1873 
1874         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1875         int tgt_len, tgt_space;
1876 
1877         /* We never copy a half-header but may copy half-data;
1878          * this is Linux's behaviour in put_cmsg(). Note that
1879          * truncation here is a guest problem (which we report
1880          * to the guest via the CTRUNC bit), unlike truncation
1881          * in target_to_host_cmsg, which is a QEMU bug.
1882          */
1883         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1884             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1885             break;
1886         }
1887 
1888         if (cmsg->cmsg_level == SOL_SOCKET) {
1889             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1890         } else {
1891             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1892         }
1893         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1894 
1895         /* Payload types which need a different size of payload on
1896          * the target must adjust tgt_len here.
1897          */
1898         tgt_len = len;
1899         switch (cmsg->cmsg_level) {
1900         case SOL_SOCKET:
1901             switch (cmsg->cmsg_type) {
1902             case SO_TIMESTAMP:
1903                 tgt_len = sizeof(struct target_timeval);
1904                 break;
1905             default:
1906                 break;
1907             }
1908             break;
1909         default:
1910             break;
1911         }
1912 
1913         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1914             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1915             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1916         }
1917 
1918         /* We must now copy-and-convert len bytes of payload
1919          * into tgt_len bytes of destination space. Bear in mind
1920          * that in both source and destination we may be dealing
1921          * with a truncated value!
1922          */
1923         switch (cmsg->cmsg_level) {
1924         case SOL_SOCKET:
1925             switch (cmsg->cmsg_type) {
1926             case SCM_RIGHTS:
1927             {
1928                 int *fd = (int *)data;
1929                 int *target_fd = (int *)target_data;
1930                 int i, numfds = tgt_len / sizeof(int);
1931 
1932                 for (i = 0; i < numfds; i++) {
1933                     __put_user(fd[i], target_fd + i);
1934                 }
1935                 break;
1936             }
1937             case SO_TIMESTAMP:
1938             {
1939                 struct timeval *tv = (struct timeval *)data;
1940                 struct target_timeval *target_tv =
1941                     (struct target_timeval *)target_data;
1942 
1943                 if (len != sizeof(struct timeval) ||
1944                     tgt_len != sizeof(struct target_timeval)) {
1945                     goto unimplemented;
1946                 }
1947 
1948                 /* copy struct timeval to target */
1949                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1950                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1951                 break;
1952             }
1953             case SCM_CREDENTIALS:
1954             {
1955                 struct ucred *cred = (struct ucred *)data;
1956                 struct target_ucred *target_cred =
1957                     (struct target_ucred *)target_data;
1958 
1959                 __put_user(cred->pid, &target_cred->pid);
1960                 __put_user(cred->uid, &target_cred->uid);
1961                 __put_user(cred->gid, &target_cred->gid);
1962                 break;
1963             }
1964             default:
1965                 goto unimplemented;
1966             }
1967             break;
1968 
1969         case SOL_IP:
1970             switch (cmsg->cmsg_type) {
1971             case IP_TTL:
1972             {
1973                 uint32_t *v = (uint32_t *)data;
1974                 uint32_t *t_int = (uint32_t *)target_data;
1975 
1976                 if (len != sizeof(uint32_t) ||
1977                     tgt_len != sizeof(uint32_t)) {
1978                     goto unimplemented;
1979                 }
1980                 __put_user(*v, t_int);
1981                 break;
1982             }
1983             case IP_RECVERR:
1984             {
1985                 struct errhdr_t {
1986                    struct sock_extended_err ee;
1987                    struct sockaddr_in offender;
1988                 };
1989                 struct errhdr_t *errh = (struct errhdr_t *)data;
1990                 struct errhdr_t *target_errh =
1991                     (struct errhdr_t *)target_data;
1992 
1993                 if (len != sizeof(struct errhdr_t) ||
1994                     tgt_len != sizeof(struct errhdr_t)) {
1995                     goto unimplemented;
1996                 }
1997                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1998                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1999                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2000                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2001                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2002                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2003                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2004                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2005                     (void *) &errh->offender, sizeof(errh->offender));
2006                 break;
2007             }
2008             default:
2009                 goto unimplemented;
2010             }
2011             break;
2012 
2013         case SOL_IPV6:
2014             switch (cmsg->cmsg_type) {
2015             case IPV6_HOPLIMIT:
2016             {
2017                 uint32_t *v = (uint32_t *)data;
2018                 uint32_t *t_int = (uint32_t *)target_data;
2019 
2020                 if (len != sizeof(uint32_t) ||
2021                     tgt_len != sizeof(uint32_t)) {
2022                     goto unimplemented;
2023                 }
2024                 __put_user(*v, t_int);
2025                 break;
2026             }
2027             case IPV6_RECVERR:
2028             {
2029                 struct errhdr6_t {
2030                    struct sock_extended_err ee;
2031                    struct sockaddr_in6 offender;
2032                 };
2033                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2034                 struct errhdr6_t *target_errh =
2035                     (struct errhdr6_t *)target_data;
2036 
2037                 if (len != sizeof(struct errhdr6_t) ||
2038                     tgt_len != sizeof(struct errhdr6_t)) {
2039                     goto unimplemented;
2040                 }
2041                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2042                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2043                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2044                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2045                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2046                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2047                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2048                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2049                     (void *) &errh->offender, sizeof(errh->offender));
2050                 break;
2051             }
2052             default:
2053                 goto unimplemented;
2054             }
2055             break;
2056 
2057         default:
2058         unimplemented:
2059             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2060                           cmsg->cmsg_level, cmsg->cmsg_type);
2061             memcpy(target_data, data, MIN(len, tgt_len));
2062             if (tgt_len > len) {
2063                 memset(target_data + len, 0, tgt_len - len);
2064             }
2065         }
2066 
2067         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2068         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2069         if (msg_controllen < tgt_space) {
2070             tgt_space = msg_controllen;
2071         }
2072         msg_controllen -= tgt_space;
2073         space += tgt_space;
2074         cmsg = CMSG_NXTHDR(msgh, cmsg);
2075         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2076                                          target_cmsg_start);
2077     }
2078     unlock_user(target_cmsg, target_cmsg_addr, space);
2079  the_end:
2080     target_msgh->msg_controllen = tswapal(space);
2081     return 0;
2082 }
2083 
2084 /* do_setsockopt() Must return target values and target errnos. */
2085 static abi_long do_setsockopt(int sockfd, int level, int optname,
2086                               abi_ulong optval_addr, socklen_t optlen)
2087 {
2088     abi_long ret;
2089     int val;
2090     struct ip_mreqn *ip_mreq;
2091     struct ip_mreq_source *ip_mreq_source;
2092 
2093     switch(level) {
2094     case SOL_TCP:
2095     case SOL_UDP:
2096         /* TCP and UDP options all take an 'int' value.  */
2097         if (optlen < sizeof(uint32_t))
2098             return -TARGET_EINVAL;
2099 
2100         if (get_user_u32(val, optval_addr))
2101             return -TARGET_EFAULT;
2102         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2103         break;
2104     case SOL_IP:
2105         switch(optname) {
2106         case IP_TOS:
2107         case IP_TTL:
2108         case IP_HDRINCL:
2109         case IP_ROUTER_ALERT:
2110         case IP_RECVOPTS:
2111         case IP_RETOPTS:
2112         case IP_PKTINFO:
2113         case IP_MTU_DISCOVER:
2114         case IP_RECVERR:
2115         case IP_RECVTTL:
2116         case IP_RECVTOS:
2117 #ifdef IP_FREEBIND
2118         case IP_FREEBIND:
2119 #endif
2120         case IP_MULTICAST_TTL:
2121         case IP_MULTICAST_LOOP:
2122             val = 0;
2123             if (optlen >= sizeof(uint32_t)) {
2124                 if (get_user_u32(val, optval_addr))
2125                     return -TARGET_EFAULT;
2126             } else if (optlen >= 1) {
2127                 if (get_user_u8(val, optval_addr))
2128                     return -TARGET_EFAULT;
2129             }
2130             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2131             break;
2132         case IP_ADD_MEMBERSHIP:
2133         case IP_DROP_MEMBERSHIP:
2134             if (optlen < sizeof (struct target_ip_mreq) ||
2135                 optlen > sizeof (struct target_ip_mreqn))
2136                 return -TARGET_EINVAL;
2137 
2138             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2139             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2140             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2141             break;
2142 
2143         case IP_BLOCK_SOURCE:
2144         case IP_UNBLOCK_SOURCE:
2145         case IP_ADD_SOURCE_MEMBERSHIP:
2146         case IP_DROP_SOURCE_MEMBERSHIP:
2147             if (optlen != sizeof (struct target_ip_mreq_source))
2148                 return -TARGET_EINVAL;
2149 
2150             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2151             if (!ip_mreq_source) {
2152                 return -TARGET_EFAULT;
2153             }
2154             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2155             unlock_user (ip_mreq_source, optval_addr, 0);
2156             break;
2157 
2158         default:
2159             goto unimplemented;
2160         }
2161         break;
2162     case SOL_IPV6:
2163         switch (optname) {
2164         case IPV6_MTU_DISCOVER:
2165         case IPV6_MTU:
2166         case IPV6_V6ONLY:
2167         case IPV6_RECVPKTINFO:
2168         case IPV6_UNICAST_HOPS:
2169         case IPV6_MULTICAST_HOPS:
2170         case IPV6_MULTICAST_LOOP:
2171         case IPV6_RECVERR:
2172         case IPV6_RECVHOPLIMIT:
2173         case IPV6_2292HOPLIMIT:
2174         case IPV6_CHECKSUM:
2175         case IPV6_ADDRFORM:
2176         case IPV6_2292PKTINFO:
2177         case IPV6_RECVTCLASS:
2178         case IPV6_RECVRTHDR:
2179         case IPV6_2292RTHDR:
2180         case IPV6_RECVHOPOPTS:
2181         case IPV6_2292HOPOPTS:
2182         case IPV6_RECVDSTOPTS:
2183         case IPV6_2292DSTOPTS:
2184         case IPV6_TCLASS:
2185         case IPV6_ADDR_PREFERENCES:
2186 #ifdef IPV6_RECVPATHMTU
2187         case IPV6_RECVPATHMTU:
2188 #endif
2189 #ifdef IPV6_TRANSPARENT
2190         case IPV6_TRANSPARENT:
2191 #endif
2192 #ifdef IPV6_FREEBIND
2193         case IPV6_FREEBIND:
2194 #endif
2195 #ifdef IPV6_RECVORIGDSTADDR
2196         case IPV6_RECVORIGDSTADDR:
2197 #endif
2198             val = 0;
2199             if (optlen < sizeof(uint32_t)) {
2200                 return -TARGET_EINVAL;
2201             }
2202             if (get_user_u32(val, optval_addr)) {
2203                 return -TARGET_EFAULT;
2204             }
2205             ret = get_errno(setsockopt(sockfd, level, optname,
2206                                        &val, sizeof(val)));
2207             break;
2208         case IPV6_PKTINFO:
2209         {
2210             struct in6_pktinfo pki;
2211 
2212             if (optlen < sizeof(pki)) {
2213                 return -TARGET_EINVAL;
2214             }
2215 
2216             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2217                 return -TARGET_EFAULT;
2218             }
2219 
2220             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2221 
2222             ret = get_errno(setsockopt(sockfd, level, optname,
2223                                        &pki, sizeof(pki)));
2224             break;
2225         }
2226         case IPV6_ADD_MEMBERSHIP:
2227         case IPV6_DROP_MEMBERSHIP:
2228         {
2229             struct ipv6_mreq ipv6mreq;
2230 
2231             if (optlen < sizeof(ipv6mreq)) {
2232                 return -TARGET_EINVAL;
2233             }
2234 
2235             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2236                 return -TARGET_EFAULT;
2237             }
2238 
2239             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2240 
2241             ret = get_errno(setsockopt(sockfd, level, optname,
2242                                        &ipv6mreq, sizeof(ipv6mreq)));
2243             break;
2244         }
2245         default:
2246             goto unimplemented;
2247         }
2248         break;
2249     case SOL_ICMPV6:
2250         switch (optname) {
2251         case ICMPV6_FILTER:
2252         {
2253             struct icmp6_filter icmp6f;
2254 
2255             if (optlen > sizeof(icmp6f)) {
2256                 optlen = sizeof(icmp6f);
2257             }
2258 
2259             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2260                 return -TARGET_EFAULT;
2261             }
2262 
2263             for (val = 0; val < 8; val++) {
2264                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2265             }
2266 
2267             ret = get_errno(setsockopt(sockfd, level, optname,
2268                                        &icmp6f, optlen));
2269             break;
2270         }
2271         default:
2272             goto unimplemented;
2273         }
2274         break;
2275     case SOL_RAW:
2276         switch (optname) {
2277         case ICMP_FILTER:
2278         case IPV6_CHECKSUM:
2279             /* those take an u32 value */
2280             if (optlen < sizeof(uint32_t)) {
2281                 return -TARGET_EINVAL;
2282             }
2283 
2284             if (get_user_u32(val, optval_addr)) {
2285                 return -TARGET_EFAULT;
2286             }
2287             ret = get_errno(setsockopt(sockfd, level, optname,
2288                                        &val, sizeof(val)));
2289             break;
2290 
2291         default:
2292             goto unimplemented;
2293         }
2294         break;
2295 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2296     case SOL_ALG:
2297         switch (optname) {
2298         case ALG_SET_KEY:
2299         {
2300             char *alg_key = g_malloc(optlen);
2301 
2302             if (!alg_key) {
2303                 return -TARGET_ENOMEM;
2304             }
2305             if (copy_from_user(alg_key, optval_addr, optlen)) {
2306                 g_free(alg_key);
2307                 return -TARGET_EFAULT;
2308             }
2309             ret = get_errno(setsockopt(sockfd, level, optname,
2310                                        alg_key, optlen));
2311             g_free(alg_key);
2312             break;
2313         }
2314         case ALG_SET_AEAD_AUTHSIZE:
2315         {
2316             ret = get_errno(setsockopt(sockfd, level, optname,
2317                                        NULL, optlen));
2318             break;
2319         }
2320         default:
2321             goto unimplemented;
2322         }
2323         break;
2324 #endif
2325     case TARGET_SOL_SOCKET:
2326         switch (optname) {
2327         case TARGET_SO_RCVTIMEO:
2328         {
2329                 struct timeval tv;
2330 
2331                 optname = SO_RCVTIMEO;
2332 
2333 set_timeout:
2334                 if (optlen != sizeof(struct target_timeval)) {
2335                     return -TARGET_EINVAL;
2336                 }
2337 
2338                 if (copy_from_user_timeval(&tv, optval_addr)) {
2339                     return -TARGET_EFAULT;
2340                 }
2341 
2342                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2343                                 &tv, sizeof(tv)));
2344                 return ret;
2345         }
2346         case TARGET_SO_SNDTIMEO:
2347                 optname = SO_SNDTIMEO;
2348                 goto set_timeout;
2349         case TARGET_SO_ATTACH_FILTER:
2350         {
2351                 struct target_sock_fprog *tfprog;
2352                 struct target_sock_filter *tfilter;
2353                 struct sock_fprog fprog;
2354                 struct sock_filter *filter;
2355                 int i;
2356 
2357                 if (optlen != sizeof(*tfprog)) {
2358                     return -TARGET_EINVAL;
2359                 }
2360                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2361                     return -TARGET_EFAULT;
2362                 }
2363                 if (!lock_user_struct(VERIFY_READ, tfilter,
2364                                       tswapal(tfprog->filter), 0)) {
2365                     unlock_user_struct(tfprog, optval_addr, 1);
2366                     return -TARGET_EFAULT;
2367                 }
2368 
2369                 fprog.len = tswap16(tfprog->len);
2370                 filter = g_try_new(struct sock_filter, fprog.len);
2371                 if (filter == NULL) {
2372                     unlock_user_struct(tfilter, tfprog->filter, 1);
2373                     unlock_user_struct(tfprog, optval_addr, 1);
2374                     return -TARGET_ENOMEM;
2375                 }
2376                 for (i = 0; i < fprog.len; i++) {
2377                     filter[i].code = tswap16(tfilter[i].code);
2378                     filter[i].jt = tfilter[i].jt;
2379                     filter[i].jf = tfilter[i].jf;
2380                     filter[i].k = tswap32(tfilter[i].k);
2381                 }
2382                 fprog.filter = filter;
2383 
2384                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2385                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2386                 g_free(filter);
2387 
2388                 unlock_user_struct(tfilter, tfprog->filter, 1);
2389                 unlock_user_struct(tfprog, optval_addr, 1);
2390                 return ret;
2391         }
2392 	case TARGET_SO_BINDTODEVICE:
2393 	{
2394 		char *dev_ifname, *addr_ifname;
2395 
2396 		if (optlen > IFNAMSIZ - 1) {
2397 		    optlen = IFNAMSIZ - 1;
2398 		}
2399 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2400 		if (!dev_ifname) {
2401 		    return -TARGET_EFAULT;
2402 		}
2403 		optname = SO_BINDTODEVICE;
2404 		addr_ifname = alloca(IFNAMSIZ);
2405 		memcpy(addr_ifname, dev_ifname, optlen);
2406 		addr_ifname[optlen] = 0;
2407 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2408                                            addr_ifname, optlen));
2409 		unlock_user (dev_ifname, optval_addr, 0);
2410 		return ret;
2411 	}
2412         case TARGET_SO_LINGER:
2413         {
2414                 struct linger lg;
2415                 struct target_linger *tlg;
2416 
2417                 if (optlen != sizeof(struct target_linger)) {
2418                     return -TARGET_EINVAL;
2419                 }
2420                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2421                     return -TARGET_EFAULT;
2422                 }
2423                 __get_user(lg.l_onoff, &tlg->l_onoff);
2424                 __get_user(lg.l_linger, &tlg->l_linger);
2425                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2426                                 &lg, sizeof(lg)));
2427                 unlock_user_struct(tlg, optval_addr, 0);
2428                 return ret;
2429         }
2430             /* Options with 'int' argument.  */
2431         case TARGET_SO_DEBUG:
2432 		optname = SO_DEBUG;
2433 		break;
2434         case TARGET_SO_REUSEADDR:
2435 		optname = SO_REUSEADDR;
2436 		break;
2437 #ifdef SO_REUSEPORT
2438         case TARGET_SO_REUSEPORT:
2439                 optname = SO_REUSEPORT;
2440                 break;
2441 #endif
2442         case TARGET_SO_TYPE:
2443 		optname = SO_TYPE;
2444 		break;
2445         case TARGET_SO_ERROR:
2446 		optname = SO_ERROR;
2447 		break;
2448         case TARGET_SO_DONTROUTE:
2449 		optname = SO_DONTROUTE;
2450 		break;
2451         case TARGET_SO_BROADCAST:
2452 		optname = SO_BROADCAST;
2453 		break;
2454         case TARGET_SO_SNDBUF:
2455 		optname = SO_SNDBUF;
2456 		break;
2457         case TARGET_SO_SNDBUFFORCE:
2458                 optname = SO_SNDBUFFORCE;
2459                 break;
2460         case TARGET_SO_RCVBUF:
2461 		optname = SO_RCVBUF;
2462 		break;
2463         case TARGET_SO_RCVBUFFORCE:
2464                 optname = SO_RCVBUFFORCE;
2465                 break;
2466         case TARGET_SO_KEEPALIVE:
2467 		optname = SO_KEEPALIVE;
2468 		break;
2469         case TARGET_SO_OOBINLINE:
2470 		optname = SO_OOBINLINE;
2471 		break;
2472         case TARGET_SO_NO_CHECK:
2473 		optname = SO_NO_CHECK;
2474 		break;
2475         case TARGET_SO_PRIORITY:
2476 		optname = SO_PRIORITY;
2477 		break;
2478 #ifdef SO_BSDCOMPAT
2479         case TARGET_SO_BSDCOMPAT:
2480 		optname = SO_BSDCOMPAT;
2481 		break;
2482 #endif
2483         case TARGET_SO_PASSCRED:
2484 		optname = SO_PASSCRED;
2485 		break;
2486         case TARGET_SO_PASSSEC:
2487                 optname = SO_PASSSEC;
2488                 break;
2489         case TARGET_SO_TIMESTAMP:
2490 		optname = SO_TIMESTAMP;
2491 		break;
2492         case TARGET_SO_RCVLOWAT:
2493 		optname = SO_RCVLOWAT;
2494 		break;
2495         default:
2496             goto unimplemented;
2497         }
2498 	if (optlen < sizeof(uint32_t))
2499             return -TARGET_EINVAL;
2500 
2501 	if (get_user_u32(val, optval_addr))
2502             return -TARGET_EFAULT;
2503 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2504         break;
2505 #ifdef SOL_NETLINK
2506     case SOL_NETLINK:
2507         switch (optname) {
2508         case NETLINK_PKTINFO:
2509         case NETLINK_ADD_MEMBERSHIP:
2510         case NETLINK_DROP_MEMBERSHIP:
2511         case NETLINK_BROADCAST_ERROR:
2512         case NETLINK_NO_ENOBUFS:
2513 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2514         case NETLINK_LISTEN_ALL_NSID:
2515         case NETLINK_CAP_ACK:
2516 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2517 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2518         case NETLINK_EXT_ACK:
2519 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2520 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2521         case NETLINK_GET_STRICT_CHK:
2522 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2523             break;
2524         default:
2525             goto unimplemented;
2526         }
2527         val = 0;
2528         if (optlen < sizeof(uint32_t)) {
2529             return -TARGET_EINVAL;
2530         }
2531         if (get_user_u32(val, optval_addr)) {
2532             return -TARGET_EFAULT;
2533         }
2534         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2535                                    sizeof(val)));
2536         break;
2537 #endif /* SOL_NETLINK */
2538     default:
2539     unimplemented:
2540         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2541                       level, optname);
2542         ret = -TARGET_ENOPROTOOPT;
2543     }
2544     return ret;
2545 }
2546 
2547 /* do_getsockopt() Must return target values and target errnos. */
2548 static abi_long do_getsockopt(int sockfd, int level, int optname,
2549                               abi_ulong optval_addr, abi_ulong optlen)
2550 {
2551     abi_long ret;
2552     int len, val;
2553     socklen_t lv;
2554 
2555     switch(level) {
2556     case TARGET_SOL_SOCKET:
2557         level = SOL_SOCKET;
2558         switch (optname) {
2559         /* These don't just return a single integer */
2560         case TARGET_SO_PEERNAME:
2561             goto unimplemented;
2562         case TARGET_SO_RCVTIMEO: {
2563             struct timeval tv;
2564             socklen_t tvlen;
2565 
2566             optname = SO_RCVTIMEO;
2567 
2568 get_timeout:
2569             if (get_user_u32(len, optlen)) {
2570                 return -TARGET_EFAULT;
2571             }
2572             if (len < 0) {
2573                 return -TARGET_EINVAL;
2574             }
2575 
2576             tvlen = sizeof(tv);
2577             ret = get_errno(getsockopt(sockfd, level, optname,
2578                                        &tv, &tvlen));
2579             if (ret < 0) {
2580                 return ret;
2581             }
2582             if (len > sizeof(struct target_timeval)) {
2583                 len = sizeof(struct target_timeval);
2584             }
2585             if (copy_to_user_timeval(optval_addr, &tv)) {
2586                 return -TARGET_EFAULT;
2587             }
2588             if (put_user_u32(len, optlen)) {
2589                 return -TARGET_EFAULT;
2590             }
2591             break;
2592         }
2593         case TARGET_SO_SNDTIMEO:
2594             optname = SO_SNDTIMEO;
2595             goto get_timeout;
2596         case TARGET_SO_PEERCRED: {
2597             struct ucred cr;
2598             socklen_t crlen;
2599             struct target_ucred *tcr;
2600 
2601             if (get_user_u32(len, optlen)) {
2602                 return -TARGET_EFAULT;
2603             }
2604             if (len < 0) {
2605                 return -TARGET_EINVAL;
2606             }
2607 
2608             crlen = sizeof(cr);
2609             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2610                                        &cr, &crlen));
2611             if (ret < 0) {
2612                 return ret;
2613             }
2614             if (len > crlen) {
2615                 len = crlen;
2616             }
2617             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2618                 return -TARGET_EFAULT;
2619             }
2620             __put_user(cr.pid, &tcr->pid);
2621             __put_user(cr.uid, &tcr->uid);
2622             __put_user(cr.gid, &tcr->gid);
2623             unlock_user_struct(tcr, optval_addr, 1);
2624             if (put_user_u32(len, optlen)) {
2625                 return -TARGET_EFAULT;
2626             }
2627             break;
2628         }
2629         case TARGET_SO_PEERSEC: {
2630             char *name;
2631 
2632             if (get_user_u32(len, optlen)) {
2633                 return -TARGET_EFAULT;
2634             }
2635             if (len < 0) {
2636                 return -TARGET_EINVAL;
2637             }
2638             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2639             if (!name) {
2640                 return -TARGET_EFAULT;
2641             }
2642             lv = len;
2643             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2644                                        name, &lv));
2645             if (put_user_u32(lv, optlen)) {
2646                 ret = -TARGET_EFAULT;
2647             }
2648             unlock_user(name, optval_addr, lv);
2649             break;
2650         }
2651         case TARGET_SO_LINGER:
2652         {
2653             struct linger lg;
2654             socklen_t lglen;
2655             struct target_linger *tlg;
2656 
2657             if (get_user_u32(len, optlen)) {
2658                 return -TARGET_EFAULT;
2659             }
2660             if (len < 0) {
2661                 return -TARGET_EINVAL;
2662             }
2663 
2664             lglen = sizeof(lg);
2665             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2666                                        &lg, &lglen));
2667             if (ret < 0) {
2668                 return ret;
2669             }
2670             if (len > lglen) {
2671                 len = lglen;
2672             }
2673             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2674                 return -TARGET_EFAULT;
2675             }
2676             __put_user(lg.l_onoff, &tlg->l_onoff);
2677             __put_user(lg.l_linger, &tlg->l_linger);
2678             unlock_user_struct(tlg, optval_addr, 1);
2679             if (put_user_u32(len, optlen)) {
2680                 return -TARGET_EFAULT;
2681             }
2682             break;
2683         }
2684         /* Options with 'int' argument.  */
2685         case TARGET_SO_DEBUG:
2686             optname = SO_DEBUG;
2687             goto int_case;
2688         case TARGET_SO_REUSEADDR:
2689             optname = SO_REUSEADDR;
2690             goto int_case;
2691 #ifdef SO_REUSEPORT
2692         case TARGET_SO_REUSEPORT:
2693             optname = SO_REUSEPORT;
2694             goto int_case;
2695 #endif
2696         case TARGET_SO_TYPE:
2697             optname = SO_TYPE;
2698             goto int_case;
2699         case TARGET_SO_ERROR:
2700             optname = SO_ERROR;
2701             goto int_case;
2702         case TARGET_SO_DONTROUTE:
2703             optname = SO_DONTROUTE;
2704             goto int_case;
2705         case TARGET_SO_BROADCAST:
2706             optname = SO_BROADCAST;
2707             goto int_case;
2708         case TARGET_SO_SNDBUF:
2709             optname = SO_SNDBUF;
2710             goto int_case;
2711         case TARGET_SO_RCVBUF:
2712             optname = SO_RCVBUF;
2713             goto int_case;
2714         case TARGET_SO_KEEPALIVE:
2715             optname = SO_KEEPALIVE;
2716             goto int_case;
2717         case TARGET_SO_OOBINLINE:
2718             optname = SO_OOBINLINE;
2719             goto int_case;
2720         case TARGET_SO_NO_CHECK:
2721             optname = SO_NO_CHECK;
2722             goto int_case;
2723         case TARGET_SO_PRIORITY:
2724             optname = SO_PRIORITY;
2725             goto int_case;
2726 #ifdef SO_BSDCOMPAT
2727         case TARGET_SO_BSDCOMPAT:
2728             optname = SO_BSDCOMPAT;
2729             goto int_case;
2730 #endif
2731         case TARGET_SO_PASSCRED:
2732             optname = SO_PASSCRED;
2733             goto int_case;
2734         case TARGET_SO_TIMESTAMP:
2735             optname = SO_TIMESTAMP;
2736             goto int_case;
2737         case TARGET_SO_RCVLOWAT:
2738             optname = SO_RCVLOWAT;
2739             goto int_case;
2740         case TARGET_SO_ACCEPTCONN:
2741             optname = SO_ACCEPTCONN;
2742             goto int_case;
2743         case TARGET_SO_PROTOCOL:
2744             optname = SO_PROTOCOL;
2745             goto int_case;
2746         case TARGET_SO_DOMAIN:
2747             optname = SO_DOMAIN;
2748             goto int_case;
2749         default:
2750             goto int_case;
2751         }
2752         break;
2753     case SOL_TCP:
2754     case SOL_UDP:
2755         /* TCP and UDP options all take an 'int' value.  */
2756     int_case:
2757         if (get_user_u32(len, optlen))
2758             return -TARGET_EFAULT;
2759         if (len < 0)
2760             return -TARGET_EINVAL;
2761         lv = sizeof(lv);
2762         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2763         if (ret < 0)
2764             return ret;
2765         if (optname == SO_TYPE) {
2766             val = host_to_target_sock_type(val);
2767         }
2768         if (len > lv)
2769             len = lv;
2770         if (len == 4) {
2771             if (put_user_u32(val, optval_addr))
2772                 return -TARGET_EFAULT;
2773         } else {
2774             if (put_user_u8(val, optval_addr))
2775                 return -TARGET_EFAULT;
2776         }
2777         if (put_user_u32(len, optlen))
2778             return -TARGET_EFAULT;
2779         break;
2780     case SOL_IP:
2781         switch(optname) {
2782         case IP_TOS:
2783         case IP_TTL:
2784         case IP_HDRINCL:
2785         case IP_ROUTER_ALERT:
2786         case IP_RECVOPTS:
2787         case IP_RETOPTS:
2788         case IP_PKTINFO:
2789         case IP_MTU_DISCOVER:
2790         case IP_RECVERR:
2791         case IP_RECVTOS:
2792 #ifdef IP_FREEBIND
2793         case IP_FREEBIND:
2794 #endif
2795         case IP_MULTICAST_TTL:
2796         case IP_MULTICAST_LOOP:
2797             if (get_user_u32(len, optlen))
2798                 return -TARGET_EFAULT;
2799             if (len < 0)
2800                 return -TARGET_EINVAL;
2801             lv = sizeof(lv);
2802             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2803             if (ret < 0)
2804                 return ret;
2805             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2806                 len = 1;
2807                 if (put_user_u32(len, optlen)
2808                     || put_user_u8(val, optval_addr))
2809                     return -TARGET_EFAULT;
2810             } else {
2811                 if (len > sizeof(int))
2812                     len = sizeof(int);
2813                 if (put_user_u32(len, optlen)
2814                     || put_user_u32(val, optval_addr))
2815                     return -TARGET_EFAULT;
2816             }
2817             break;
2818         default:
2819             ret = -TARGET_ENOPROTOOPT;
2820             break;
2821         }
2822         break;
2823     case SOL_IPV6:
2824         switch (optname) {
2825         case IPV6_MTU_DISCOVER:
2826         case IPV6_MTU:
2827         case IPV6_V6ONLY:
2828         case IPV6_RECVPKTINFO:
2829         case IPV6_UNICAST_HOPS:
2830         case IPV6_MULTICAST_HOPS:
2831         case IPV6_MULTICAST_LOOP:
2832         case IPV6_RECVERR:
2833         case IPV6_RECVHOPLIMIT:
2834         case IPV6_2292HOPLIMIT:
2835         case IPV6_CHECKSUM:
2836         case IPV6_ADDRFORM:
2837         case IPV6_2292PKTINFO:
2838         case IPV6_RECVTCLASS:
2839         case IPV6_RECVRTHDR:
2840         case IPV6_2292RTHDR:
2841         case IPV6_RECVHOPOPTS:
2842         case IPV6_2292HOPOPTS:
2843         case IPV6_RECVDSTOPTS:
2844         case IPV6_2292DSTOPTS:
2845         case IPV6_TCLASS:
2846         case IPV6_ADDR_PREFERENCES:
2847 #ifdef IPV6_RECVPATHMTU
2848         case IPV6_RECVPATHMTU:
2849 #endif
2850 #ifdef IPV6_TRANSPARENT
2851         case IPV6_TRANSPARENT:
2852 #endif
2853 #ifdef IPV6_FREEBIND
2854         case IPV6_FREEBIND:
2855 #endif
2856 #ifdef IPV6_RECVORIGDSTADDR
2857         case IPV6_RECVORIGDSTADDR:
2858 #endif
2859             if (get_user_u32(len, optlen))
2860                 return -TARGET_EFAULT;
2861             if (len < 0)
2862                 return -TARGET_EINVAL;
2863             lv = sizeof(lv);
2864             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2865             if (ret < 0)
2866                 return ret;
2867             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2868                 len = 1;
2869                 if (put_user_u32(len, optlen)
2870                     || put_user_u8(val, optval_addr))
2871                     return -TARGET_EFAULT;
2872             } else {
2873                 if (len > sizeof(int))
2874                     len = sizeof(int);
2875                 if (put_user_u32(len, optlen)
2876                     || put_user_u32(val, optval_addr))
2877                     return -TARGET_EFAULT;
2878             }
2879             break;
2880         default:
2881             ret = -TARGET_ENOPROTOOPT;
2882             break;
2883         }
2884         break;
2885 #ifdef SOL_NETLINK
2886     case SOL_NETLINK:
2887         switch (optname) {
2888         case NETLINK_PKTINFO:
2889         case NETLINK_BROADCAST_ERROR:
2890         case NETLINK_NO_ENOBUFS:
2891 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2892         case NETLINK_LISTEN_ALL_NSID:
2893         case NETLINK_CAP_ACK:
2894 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2895 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2896         case NETLINK_EXT_ACK:
2897 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2898 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2899         case NETLINK_GET_STRICT_CHK:
2900 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2901             if (get_user_u32(len, optlen)) {
2902                 return -TARGET_EFAULT;
2903             }
2904             if (len != sizeof(val)) {
2905                 return -TARGET_EINVAL;
2906             }
2907             lv = len;
2908             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2909             if (ret < 0) {
2910                 return ret;
2911             }
2912             if (put_user_u32(lv, optlen)
2913                 || put_user_u32(val, optval_addr)) {
2914                 return -TARGET_EFAULT;
2915             }
2916             break;
2917 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2918         case NETLINK_LIST_MEMBERSHIPS:
2919         {
2920             uint32_t *results;
2921             int i;
2922             if (get_user_u32(len, optlen)) {
2923                 return -TARGET_EFAULT;
2924             }
2925             if (len < 0) {
2926                 return -TARGET_EINVAL;
2927             }
2928             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2929             if (!results && len > 0) {
2930                 return -TARGET_EFAULT;
2931             }
2932             lv = len;
2933             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2934             if (ret < 0) {
2935                 unlock_user(results, optval_addr, 0);
2936                 return ret;
2937             }
2938             /* swap host endianess to target endianess. */
2939             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2940                 results[i] = tswap32(results[i]);
2941             }
2942             if (put_user_u32(lv, optlen)) {
2943                 return -TARGET_EFAULT;
2944             }
2945             unlock_user(results, optval_addr, 0);
2946             break;
2947         }
2948 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2949         default:
2950             goto unimplemented;
2951         }
2952         break;
2953 #endif /* SOL_NETLINK */
2954     default:
2955     unimplemented:
2956         qemu_log_mask(LOG_UNIMP,
2957                       "getsockopt level=%d optname=%d not yet supported\n",
2958                       level, optname);
2959         ret = -TARGET_EOPNOTSUPP;
2960         break;
2961     }
2962     return ret;
2963 }
2964 
2965 /* Convert target low/high pair representing file offset into the host
2966  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2967  * as the kernel doesn't handle them either.
2968  */
2969 static void target_to_host_low_high(abi_ulong tlow,
2970                                     abi_ulong thigh,
2971                                     unsigned long *hlow,
2972                                     unsigned long *hhigh)
2973 {
2974     uint64_t off = tlow |
2975         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2976         TARGET_LONG_BITS / 2;
2977 
2978     *hlow = off;
2979     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2980 }
2981 
2982 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2983                                 abi_ulong count, int copy)
2984 {
2985     struct target_iovec *target_vec;
2986     struct iovec *vec;
2987     abi_ulong total_len, max_len;
2988     int i;
2989     int err = 0;
2990     bool bad_address = false;
2991 
2992     if (count == 0) {
2993         errno = 0;
2994         return NULL;
2995     }
2996     if (count > IOV_MAX) {
2997         errno = EINVAL;
2998         return NULL;
2999     }
3000 
3001     vec = g_try_new0(struct iovec, count);
3002     if (vec == NULL) {
3003         errno = ENOMEM;
3004         return NULL;
3005     }
3006 
3007     target_vec = lock_user(VERIFY_READ, target_addr,
3008                            count * sizeof(struct target_iovec), 1);
3009     if (target_vec == NULL) {
3010         err = EFAULT;
3011         goto fail2;
3012     }
3013 
3014     /* ??? If host page size > target page size, this will result in a
3015        value larger than what we can actually support.  */
3016     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3017     total_len = 0;
3018 
3019     for (i = 0; i < count; i++) {
3020         abi_ulong base = tswapal(target_vec[i].iov_base);
3021         abi_long len = tswapal(target_vec[i].iov_len);
3022 
3023         if (len < 0) {
3024             err = EINVAL;
3025             goto fail;
3026         } else if (len == 0) {
3027             /* Zero length pointer is ignored.  */
3028             vec[i].iov_base = 0;
3029         } else {
3030             vec[i].iov_base = lock_user(type, base, len, copy);
3031             /* If the first buffer pointer is bad, this is a fault.  But
3032              * subsequent bad buffers will result in a partial write; this
3033              * is realized by filling the vector with null pointers and
3034              * zero lengths. */
3035             if (!vec[i].iov_base) {
3036                 if (i == 0) {
3037                     err = EFAULT;
3038                     goto fail;
3039                 } else {
3040                     bad_address = true;
3041                 }
3042             }
3043             if (bad_address) {
3044                 len = 0;
3045             }
3046             if (len > max_len - total_len) {
3047                 len = max_len - total_len;
3048             }
3049         }
3050         vec[i].iov_len = len;
3051         total_len += len;
3052     }
3053 
3054     unlock_user(target_vec, target_addr, 0);
3055     return vec;
3056 
3057  fail:
3058     while (--i >= 0) {
3059         if (tswapal(target_vec[i].iov_len) > 0) {
3060             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3061         }
3062     }
3063     unlock_user(target_vec, target_addr, 0);
3064  fail2:
3065     g_free(vec);
3066     errno = err;
3067     return NULL;
3068 }
3069 
3070 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3071                          abi_ulong count, int copy)
3072 {
3073     struct target_iovec *target_vec;
3074     int i;
3075 
3076     target_vec = lock_user(VERIFY_READ, target_addr,
3077                            count * sizeof(struct target_iovec), 1);
3078     if (target_vec) {
3079         for (i = 0; i < count; i++) {
3080             abi_ulong base = tswapal(target_vec[i].iov_base);
3081             abi_long len = tswapal(target_vec[i].iov_len);
3082             if (len < 0) {
3083                 break;
3084             }
3085             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3086         }
3087         unlock_user(target_vec, target_addr, 0);
3088     }
3089 
3090     g_free(vec);
3091 }
3092 
3093 static inline int target_to_host_sock_type(int *type)
3094 {
3095     int host_type = 0;
3096     int target_type = *type;
3097 
3098     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3099     case TARGET_SOCK_DGRAM:
3100         host_type = SOCK_DGRAM;
3101         break;
3102     case TARGET_SOCK_STREAM:
3103         host_type = SOCK_STREAM;
3104         break;
3105     default:
3106         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3107         break;
3108     }
3109     if (target_type & TARGET_SOCK_CLOEXEC) {
3110 #if defined(SOCK_CLOEXEC)
3111         host_type |= SOCK_CLOEXEC;
3112 #else
3113         return -TARGET_EINVAL;
3114 #endif
3115     }
3116     if (target_type & TARGET_SOCK_NONBLOCK) {
3117 #if defined(SOCK_NONBLOCK)
3118         host_type |= SOCK_NONBLOCK;
3119 #elif !defined(O_NONBLOCK)
3120         return -TARGET_EINVAL;
3121 #endif
3122     }
3123     *type = host_type;
3124     return 0;
3125 }
3126 
3127 /* Try to emulate socket type flags after socket creation.  */
3128 static int sock_flags_fixup(int fd, int target_type)
3129 {
3130 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3131     if (target_type & TARGET_SOCK_NONBLOCK) {
3132         int flags = fcntl(fd, F_GETFL);
3133         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3134             close(fd);
3135             return -TARGET_EINVAL;
3136         }
3137     }
3138 #endif
3139     return fd;
3140 }
3141 
3142 /* do_socket() Must return target values and target errnos. */
3143 static abi_long do_socket(int domain, int type, int protocol)
3144 {
3145     int target_type = type;
3146     int ret;
3147 
3148     ret = target_to_host_sock_type(&type);
3149     if (ret) {
3150         return ret;
3151     }
3152 
3153     if (domain == PF_NETLINK && !(
3154 #ifdef CONFIG_RTNETLINK
3155          protocol == NETLINK_ROUTE ||
3156 #endif
3157          protocol == NETLINK_KOBJECT_UEVENT ||
3158          protocol == NETLINK_AUDIT)) {
3159         return -TARGET_EPROTONOSUPPORT;
3160     }
3161 
3162     if (domain == AF_PACKET ||
3163         (domain == AF_INET && type == SOCK_PACKET)) {
3164         protocol = tswap16(protocol);
3165     }
3166 
3167     ret = get_errno(socket(domain, type, protocol));
3168     if (ret >= 0) {
3169         ret = sock_flags_fixup(ret, target_type);
3170         if (type == SOCK_PACKET) {
3171             /* Manage an obsolete case :
3172              * if socket type is SOCK_PACKET, bind by name
3173              */
3174             fd_trans_register(ret, &target_packet_trans);
3175         } else if (domain == PF_NETLINK) {
3176             switch (protocol) {
3177 #ifdef CONFIG_RTNETLINK
3178             case NETLINK_ROUTE:
3179                 fd_trans_register(ret, &target_netlink_route_trans);
3180                 break;
3181 #endif
3182             case NETLINK_KOBJECT_UEVENT:
3183                 /* nothing to do: messages are strings */
3184                 break;
3185             case NETLINK_AUDIT:
3186                 fd_trans_register(ret, &target_netlink_audit_trans);
3187                 break;
3188             default:
3189                 g_assert_not_reached();
3190             }
3191         }
3192     }
3193     return ret;
3194 }
3195 
3196 /* do_bind() Must return target values and target errnos. */
3197 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3198                         socklen_t addrlen)
3199 {
3200     void *addr;
3201     abi_long ret;
3202 
3203     if ((int)addrlen < 0) {
3204         return -TARGET_EINVAL;
3205     }
3206 
3207     addr = alloca(addrlen+1);
3208 
3209     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3210     if (ret)
3211         return ret;
3212 
3213     return get_errno(bind(sockfd, addr, addrlen));
3214 }
3215 
3216 /* do_connect() Must return target values and target errnos. */
3217 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3218                            socklen_t addrlen)
3219 {
3220     void *addr;
3221     abi_long ret;
3222 
3223     if ((int)addrlen < 0) {
3224         return -TARGET_EINVAL;
3225     }
3226 
3227     addr = alloca(addrlen+1);
3228 
3229     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3230     if (ret)
3231         return ret;
3232 
3233     return get_errno(safe_connect(sockfd, addr, addrlen));
3234 }
3235 
3236 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3237 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3238                                       int flags, int send)
3239 {
3240     abi_long ret, len;
3241     struct msghdr msg;
3242     abi_ulong count;
3243     struct iovec *vec;
3244     abi_ulong target_vec;
3245 
3246     if (msgp->msg_name) {
3247         msg.msg_namelen = tswap32(msgp->msg_namelen);
3248         msg.msg_name = alloca(msg.msg_namelen+1);
3249         ret = target_to_host_sockaddr(fd, msg.msg_name,
3250                                       tswapal(msgp->msg_name),
3251                                       msg.msg_namelen);
3252         if (ret == -TARGET_EFAULT) {
3253             /* For connected sockets msg_name and msg_namelen must
3254              * be ignored, so returning EFAULT immediately is wrong.
3255              * Instead, pass a bad msg_name to the host kernel, and
3256              * let it decide whether to return EFAULT or not.
3257              */
3258             msg.msg_name = (void *)-1;
3259         } else if (ret) {
3260             goto out2;
3261         }
3262     } else {
3263         msg.msg_name = NULL;
3264         msg.msg_namelen = 0;
3265     }
3266     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3267     msg.msg_control = alloca(msg.msg_controllen);
3268     memset(msg.msg_control, 0, msg.msg_controllen);
3269 
3270     msg.msg_flags = tswap32(msgp->msg_flags);
3271 
3272     count = tswapal(msgp->msg_iovlen);
3273     target_vec = tswapal(msgp->msg_iov);
3274 
3275     if (count > IOV_MAX) {
3276         /* sendrcvmsg returns a different errno for this condition than
3277          * readv/writev, so we must catch it here before lock_iovec() does.
3278          */
3279         ret = -TARGET_EMSGSIZE;
3280         goto out2;
3281     }
3282 
3283     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3284                      target_vec, count, send);
3285     if (vec == NULL) {
3286         ret = -host_to_target_errno(errno);
3287         goto out2;
3288     }
3289     msg.msg_iovlen = count;
3290     msg.msg_iov = vec;
3291 
3292     if (send) {
3293         if (fd_trans_target_to_host_data(fd)) {
3294             void *host_msg;
3295 
3296             host_msg = g_malloc(msg.msg_iov->iov_len);
3297             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3298             ret = fd_trans_target_to_host_data(fd)(host_msg,
3299                                                    msg.msg_iov->iov_len);
3300             if (ret >= 0) {
3301                 msg.msg_iov->iov_base = host_msg;
3302                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3303             }
3304             g_free(host_msg);
3305         } else {
3306             ret = target_to_host_cmsg(&msg, msgp);
3307             if (ret == 0) {
3308                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3309             }
3310         }
3311     } else {
3312         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3313         if (!is_error(ret)) {
3314             len = ret;
3315             if (fd_trans_host_to_target_data(fd)) {
3316                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3317                                                MIN(msg.msg_iov->iov_len, len));
3318             } else {
3319                 ret = host_to_target_cmsg(msgp, &msg);
3320             }
3321             if (!is_error(ret)) {
3322                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3323                 msgp->msg_flags = tswap32(msg.msg_flags);
3324                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3325                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3326                                     msg.msg_name, msg.msg_namelen);
3327                     if (ret) {
3328                         goto out;
3329                     }
3330                 }
3331 
3332                 ret = len;
3333             }
3334         }
3335     }
3336 
3337 out:
3338     unlock_iovec(vec, target_vec, count, !send);
3339 out2:
3340     return ret;
3341 }
3342 
3343 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3344                                int flags, int send)
3345 {
3346     abi_long ret;
3347     struct target_msghdr *msgp;
3348 
3349     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3350                           msgp,
3351                           target_msg,
3352                           send ? 1 : 0)) {
3353         return -TARGET_EFAULT;
3354     }
3355     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3356     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3357     return ret;
3358 }
3359 
3360 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3361  * so it might not have this *mmsg-specific flag either.
3362  */
3363 #ifndef MSG_WAITFORONE
3364 #define MSG_WAITFORONE 0x10000
3365 #endif
3366 
3367 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3368                                 unsigned int vlen, unsigned int flags,
3369                                 int send)
3370 {
3371     struct target_mmsghdr *mmsgp;
3372     abi_long ret = 0;
3373     int i;
3374 
3375     if (vlen > UIO_MAXIOV) {
3376         vlen = UIO_MAXIOV;
3377     }
3378 
3379     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3380     if (!mmsgp) {
3381         return -TARGET_EFAULT;
3382     }
3383 
3384     for (i = 0; i < vlen; i++) {
3385         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3386         if (is_error(ret)) {
3387             break;
3388         }
3389         mmsgp[i].msg_len = tswap32(ret);
3390         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3391         if (flags & MSG_WAITFORONE) {
3392             flags |= MSG_DONTWAIT;
3393         }
3394     }
3395 
3396     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3397 
3398     /* Return number of datagrams sent if we sent any at all;
3399      * otherwise return the error.
3400      */
3401     if (i) {
3402         return i;
3403     }
3404     return ret;
3405 }
3406 
3407 /* do_accept4() Must return target values and target errnos. */
3408 static abi_long do_accept4(int fd, abi_ulong target_addr,
3409                            abi_ulong target_addrlen_addr, int flags)
3410 {
3411     socklen_t addrlen, ret_addrlen;
3412     void *addr;
3413     abi_long ret;
3414     int host_flags;
3415 
3416     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3417 
3418     if (target_addr == 0) {
3419         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3420     }
3421 
3422     /* linux returns EFAULT if addrlen pointer is invalid */
3423     if (get_user_u32(addrlen, target_addrlen_addr))
3424         return -TARGET_EFAULT;
3425 
3426     if ((int)addrlen < 0) {
3427         return -TARGET_EINVAL;
3428     }
3429 
3430     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3431         return -TARGET_EFAULT;
3432     }
3433 
3434     addr = alloca(addrlen);
3435 
3436     ret_addrlen = addrlen;
3437     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3438     if (!is_error(ret)) {
3439         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3440         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3441             ret = -TARGET_EFAULT;
3442         }
3443     }
3444     return ret;
3445 }
3446 
3447 /* do_getpeername() Must return target values and target errnos. */
3448 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3449                                abi_ulong target_addrlen_addr)
3450 {
3451     socklen_t addrlen, ret_addrlen;
3452     void *addr;
3453     abi_long ret;
3454 
3455     if (get_user_u32(addrlen, target_addrlen_addr))
3456         return -TARGET_EFAULT;
3457 
3458     if ((int)addrlen < 0) {
3459         return -TARGET_EINVAL;
3460     }
3461 
3462     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3463         return -TARGET_EFAULT;
3464     }
3465 
3466     addr = alloca(addrlen);
3467 
3468     ret_addrlen = addrlen;
3469     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3470     if (!is_error(ret)) {
3471         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3472         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3473             ret = -TARGET_EFAULT;
3474         }
3475     }
3476     return ret;
3477 }
3478 
3479 /* do_getsockname() Must return target values and target errnos. */
3480 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3481                                abi_ulong target_addrlen_addr)
3482 {
3483     socklen_t addrlen, ret_addrlen;
3484     void *addr;
3485     abi_long ret;
3486 
3487     if (get_user_u32(addrlen, target_addrlen_addr))
3488         return -TARGET_EFAULT;
3489 
3490     if ((int)addrlen < 0) {
3491         return -TARGET_EINVAL;
3492     }
3493 
3494     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3495         return -TARGET_EFAULT;
3496     }
3497 
3498     addr = alloca(addrlen);
3499 
3500     ret_addrlen = addrlen;
3501     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3502     if (!is_error(ret)) {
3503         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3504         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3505             ret = -TARGET_EFAULT;
3506         }
3507     }
3508     return ret;
3509 }
3510 
3511 /* do_socketpair() Must return target values and target errnos. */
3512 static abi_long do_socketpair(int domain, int type, int protocol,
3513                               abi_ulong target_tab_addr)
3514 {
3515     int tab[2];
3516     abi_long ret;
3517 
3518     target_to_host_sock_type(&type);
3519 
3520     ret = get_errno(socketpair(domain, type, protocol, tab));
3521     if (!is_error(ret)) {
3522         if (put_user_s32(tab[0], target_tab_addr)
3523             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3524             ret = -TARGET_EFAULT;
3525     }
3526     return ret;
3527 }
3528 
3529 /* do_sendto() Must return target values and target errnos. */
3530 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3531                           abi_ulong target_addr, socklen_t addrlen)
3532 {
3533     void *addr;
3534     void *host_msg;
3535     void *copy_msg = NULL;
3536     abi_long ret;
3537 
3538     if ((int)addrlen < 0) {
3539         return -TARGET_EINVAL;
3540     }
3541 
3542     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3543     if (!host_msg)
3544         return -TARGET_EFAULT;
3545     if (fd_trans_target_to_host_data(fd)) {
3546         copy_msg = host_msg;
3547         host_msg = g_malloc(len);
3548         memcpy(host_msg, copy_msg, len);
3549         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3550         if (ret < 0) {
3551             goto fail;
3552         }
3553     }
3554     if (target_addr) {
3555         addr = alloca(addrlen+1);
3556         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3557         if (ret) {
3558             goto fail;
3559         }
3560         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3561     } else {
3562         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3563     }
3564 fail:
3565     if (copy_msg) {
3566         g_free(host_msg);
3567         host_msg = copy_msg;
3568     }
3569     unlock_user(host_msg, msg, 0);
3570     return ret;
3571 }
3572 
3573 /* do_recvfrom() Must return target values and target errnos. */
3574 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3575                             abi_ulong target_addr,
3576                             abi_ulong target_addrlen)
3577 {
3578     socklen_t addrlen, ret_addrlen;
3579     void *addr;
3580     void *host_msg;
3581     abi_long ret;
3582 
3583     if (!msg) {
3584         host_msg = NULL;
3585     } else {
3586         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3587         if (!host_msg) {
3588             return -TARGET_EFAULT;
3589         }
3590     }
3591     if (target_addr) {
3592         if (get_user_u32(addrlen, target_addrlen)) {
3593             ret = -TARGET_EFAULT;
3594             goto fail;
3595         }
3596         if ((int)addrlen < 0) {
3597             ret = -TARGET_EINVAL;
3598             goto fail;
3599         }
3600         addr = alloca(addrlen);
3601         ret_addrlen = addrlen;
3602         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3603                                       addr, &ret_addrlen));
3604     } else {
3605         addr = NULL; /* To keep compiler quiet.  */
3606         addrlen = 0; /* To keep compiler quiet.  */
3607         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3608     }
3609     if (!is_error(ret)) {
3610         if (fd_trans_host_to_target_data(fd)) {
3611             abi_long trans;
3612             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3613             if (is_error(trans)) {
3614                 ret = trans;
3615                 goto fail;
3616             }
3617         }
3618         if (target_addr) {
3619             host_to_target_sockaddr(target_addr, addr,
3620                                     MIN(addrlen, ret_addrlen));
3621             if (put_user_u32(ret_addrlen, target_addrlen)) {
3622                 ret = -TARGET_EFAULT;
3623                 goto fail;
3624             }
3625         }
3626         unlock_user(host_msg, msg, len);
3627     } else {
3628 fail:
3629         unlock_user(host_msg, msg, 0);
3630     }
3631     return ret;
3632 }
3633 
3634 #ifdef TARGET_NR_socketcall
3635 /* do_socketcall() must return target values and target errnos. */
3636 static abi_long do_socketcall(int num, abi_ulong vptr)
3637 {
3638     static const unsigned nargs[] = { /* number of arguments per operation */
3639         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3640         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3641         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3642         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3643         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3644         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3645         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3646         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3647         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3648         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3649         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3650         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3651         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3652         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3653         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3654         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3655         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3656         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3657         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3658         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3659     };
3660     abi_long a[6]; /* max 6 args */
3661     unsigned i;
3662 
3663     /* check the range of the first argument num */
3664     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3665     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3666         return -TARGET_EINVAL;
3667     }
3668     /* ensure we have space for args */
3669     if (nargs[num] > ARRAY_SIZE(a)) {
3670         return -TARGET_EINVAL;
3671     }
3672     /* collect the arguments in a[] according to nargs[] */
3673     for (i = 0; i < nargs[num]; ++i) {
3674         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3675             return -TARGET_EFAULT;
3676         }
3677     }
3678     /* now when we have the args, invoke the appropriate underlying function */
3679     switch (num) {
3680     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3681         return do_socket(a[0], a[1], a[2]);
3682     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3683         return do_bind(a[0], a[1], a[2]);
3684     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3685         return do_connect(a[0], a[1], a[2]);
3686     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3687         return get_errno(listen(a[0], a[1]));
3688     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3689         return do_accept4(a[0], a[1], a[2], 0);
3690     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3691         return do_getsockname(a[0], a[1], a[2]);
3692     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3693         return do_getpeername(a[0], a[1], a[2]);
3694     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3695         return do_socketpair(a[0], a[1], a[2], a[3]);
3696     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3697         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3698     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3699         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3700     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3701         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3702     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3703         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3704     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3705         return get_errno(shutdown(a[0], a[1]));
3706     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3707         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3708     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3709         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3710     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3711         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3712     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3713         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3714     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3715         return do_accept4(a[0], a[1], a[2], a[3]);
3716     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3717         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3718     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3719         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3720     default:
3721         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3722         return -TARGET_EINVAL;
3723     }
3724 }
3725 #endif
3726 
3727 #define N_SHM_REGIONS	32
3728 
3729 static struct shm_region {
3730     abi_ulong start;
3731     abi_ulong size;
3732     bool in_use;
3733 } shm_regions[N_SHM_REGIONS];
3734 
3735 #ifndef TARGET_SEMID64_DS
3736 /* asm-generic version of this struct */
3737 struct target_semid64_ds
3738 {
3739   struct target_ipc_perm sem_perm;
3740   abi_ulong sem_otime;
3741 #if TARGET_ABI_BITS == 32
3742   abi_ulong __unused1;
3743 #endif
3744   abi_ulong sem_ctime;
3745 #if TARGET_ABI_BITS == 32
3746   abi_ulong __unused2;
3747 #endif
3748   abi_ulong sem_nsems;
3749   abi_ulong __unused3;
3750   abi_ulong __unused4;
3751 };
3752 #endif
3753 
3754 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3755                                                abi_ulong target_addr)
3756 {
3757     struct target_ipc_perm *target_ip;
3758     struct target_semid64_ds *target_sd;
3759 
3760     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3761         return -TARGET_EFAULT;
3762     target_ip = &(target_sd->sem_perm);
3763     host_ip->__key = tswap32(target_ip->__key);
3764     host_ip->uid = tswap32(target_ip->uid);
3765     host_ip->gid = tswap32(target_ip->gid);
3766     host_ip->cuid = tswap32(target_ip->cuid);
3767     host_ip->cgid = tswap32(target_ip->cgid);
3768 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3769     host_ip->mode = tswap32(target_ip->mode);
3770 #else
3771     host_ip->mode = tswap16(target_ip->mode);
3772 #endif
3773 #if defined(TARGET_PPC)
3774     host_ip->__seq = tswap32(target_ip->__seq);
3775 #else
3776     host_ip->__seq = tswap16(target_ip->__seq);
3777 #endif
3778     unlock_user_struct(target_sd, target_addr, 0);
3779     return 0;
3780 }
3781 
3782 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3783                                                struct ipc_perm *host_ip)
3784 {
3785     struct target_ipc_perm *target_ip;
3786     struct target_semid64_ds *target_sd;
3787 
3788     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3789         return -TARGET_EFAULT;
3790     target_ip = &(target_sd->sem_perm);
3791     target_ip->__key = tswap32(host_ip->__key);
3792     target_ip->uid = tswap32(host_ip->uid);
3793     target_ip->gid = tswap32(host_ip->gid);
3794     target_ip->cuid = tswap32(host_ip->cuid);
3795     target_ip->cgid = tswap32(host_ip->cgid);
3796 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3797     target_ip->mode = tswap32(host_ip->mode);
3798 #else
3799     target_ip->mode = tswap16(host_ip->mode);
3800 #endif
3801 #if defined(TARGET_PPC)
3802     target_ip->__seq = tswap32(host_ip->__seq);
3803 #else
3804     target_ip->__seq = tswap16(host_ip->__seq);
3805 #endif
3806     unlock_user_struct(target_sd, target_addr, 1);
3807     return 0;
3808 }
3809 
3810 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3811                                                abi_ulong target_addr)
3812 {
3813     struct target_semid64_ds *target_sd;
3814 
3815     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3816         return -TARGET_EFAULT;
3817     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3818         return -TARGET_EFAULT;
3819     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3820     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3821     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3822     unlock_user_struct(target_sd, target_addr, 0);
3823     return 0;
3824 }
3825 
3826 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3827                                                struct semid_ds *host_sd)
3828 {
3829     struct target_semid64_ds *target_sd;
3830 
3831     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3832         return -TARGET_EFAULT;
3833     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3834         return -TARGET_EFAULT;
3835     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3836     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3837     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3838     unlock_user_struct(target_sd, target_addr, 1);
3839     return 0;
3840 }
3841 
3842 struct target_seminfo {
3843     int semmap;
3844     int semmni;
3845     int semmns;
3846     int semmnu;
3847     int semmsl;
3848     int semopm;
3849     int semume;
3850     int semusz;
3851     int semvmx;
3852     int semaem;
3853 };
3854 
3855 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3856                                               struct seminfo *host_seminfo)
3857 {
3858     struct target_seminfo *target_seminfo;
3859     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3860         return -TARGET_EFAULT;
3861     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3862     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3863     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3864     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3865     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3866     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3867     __put_user(host_seminfo->semume, &target_seminfo->semume);
3868     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3869     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3870     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3871     unlock_user_struct(target_seminfo, target_addr, 1);
3872     return 0;
3873 }
3874 
3875 union semun {
3876 	int val;
3877 	struct semid_ds *buf;
3878 	unsigned short *array;
3879 	struct seminfo *__buf;
3880 };
3881 
3882 union target_semun {
3883 	int val;
3884 	abi_ulong buf;
3885 	abi_ulong array;
3886 	abi_ulong __buf;
3887 };
3888 
3889 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3890                                                abi_ulong target_addr)
3891 {
3892     int nsems;
3893     unsigned short *array;
3894     union semun semun;
3895     struct semid_ds semid_ds;
3896     int i, ret;
3897 
3898     semun.buf = &semid_ds;
3899 
3900     ret = semctl(semid, 0, IPC_STAT, semun);
3901     if (ret == -1)
3902         return get_errno(ret);
3903 
3904     nsems = semid_ds.sem_nsems;
3905 
3906     *host_array = g_try_new(unsigned short, nsems);
3907     if (!*host_array) {
3908         return -TARGET_ENOMEM;
3909     }
3910     array = lock_user(VERIFY_READ, target_addr,
3911                       nsems*sizeof(unsigned short), 1);
3912     if (!array) {
3913         g_free(*host_array);
3914         return -TARGET_EFAULT;
3915     }
3916 
3917     for(i=0; i<nsems; i++) {
3918         __get_user((*host_array)[i], &array[i]);
3919     }
3920     unlock_user(array, target_addr, 0);
3921 
3922     return 0;
3923 }
3924 
3925 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3926                                                unsigned short **host_array)
3927 {
3928     int nsems;
3929     unsigned short *array;
3930     union semun semun;
3931     struct semid_ds semid_ds;
3932     int i, ret;
3933 
3934     semun.buf = &semid_ds;
3935 
3936     ret = semctl(semid, 0, IPC_STAT, semun);
3937     if (ret == -1)
3938         return get_errno(ret);
3939 
3940     nsems = semid_ds.sem_nsems;
3941 
3942     array = lock_user(VERIFY_WRITE, target_addr,
3943                       nsems*sizeof(unsigned short), 0);
3944     if (!array)
3945         return -TARGET_EFAULT;
3946 
3947     for(i=0; i<nsems; i++) {
3948         __put_user((*host_array)[i], &array[i]);
3949     }
3950     g_free(*host_array);
3951     unlock_user(array, target_addr, 1);
3952 
3953     return 0;
3954 }
3955 
3956 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3957                                  abi_ulong target_arg)
3958 {
3959     union target_semun target_su = { .buf = target_arg };
3960     union semun arg;
3961     struct semid_ds dsarg;
3962     unsigned short *array = NULL;
3963     struct seminfo seminfo;
3964     abi_long ret = -TARGET_EINVAL;
3965     abi_long err;
3966     cmd &= 0xff;
3967 
3968     switch( cmd ) {
3969 	case GETVAL:
3970 	case SETVAL:
3971             /* In 64 bit cross-endian situations, we will erroneously pick up
3972              * the wrong half of the union for the "val" element.  To rectify
3973              * this, the entire 8-byte structure is byteswapped, followed by
3974 	     * a swap of the 4 byte val field. In other cases, the data is
3975 	     * already in proper host byte order. */
3976 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3977 		target_su.buf = tswapal(target_su.buf);
3978 		arg.val = tswap32(target_su.val);
3979 	    } else {
3980 		arg.val = target_su.val;
3981 	    }
3982             ret = get_errno(semctl(semid, semnum, cmd, arg));
3983             break;
3984 	case GETALL:
3985 	case SETALL:
3986             err = target_to_host_semarray(semid, &array, target_su.array);
3987             if (err)
3988                 return err;
3989             arg.array = array;
3990             ret = get_errno(semctl(semid, semnum, cmd, arg));
3991             err = host_to_target_semarray(semid, target_su.array, &array);
3992             if (err)
3993                 return err;
3994             break;
3995 	case IPC_STAT:
3996 	case IPC_SET:
3997 	case SEM_STAT:
3998             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3999             if (err)
4000                 return err;
4001             arg.buf = &dsarg;
4002             ret = get_errno(semctl(semid, semnum, cmd, arg));
4003             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4004             if (err)
4005                 return err;
4006             break;
4007 	case IPC_INFO:
4008 	case SEM_INFO:
4009             arg.__buf = &seminfo;
4010             ret = get_errno(semctl(semid, semnum, cmd, arg));
4011             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4012             if (err)
4013                 return err;
4014             break;
4015 	case IPC_RMID:
4016 	case GETPID:
4017 	case GETNCNT:
4018 	case GETZCNT:
4019             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4020             break;
4021     }
4022 
4023     return ret;
4024 }
4025 
4026 struct target_sembuf {
4027     unsigned short sem_num;
4028     short sem_op;
4029     short sem_flg;
4030 };
4031 
4032 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4033                                              abi_ulong target_addr,
4034                                              unsigned nsops)
4035 {
4036     struct target_sembuf *target_sembuf;
4037     int i;
4038 
4039     target_sembuf = lock_user(VERIFY_READ, target_addr,
4040                               nsops*sizeof(struct target_sembuf), 1);
4041     if (!target_sembuf)
4042         return -TARGET_EFAULT;
4043 
4044     for(i=0; i<nsops; i++) {
4045         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4046         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4047         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4048     }
4049 
4050     unlock_user(target_sembuf, target_addr, 0);
4051 
4052     return 0;
4053 }
4054 
4055 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4056     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4057 
4058 /*
4059  * This macro is required to handle the s390 variants, which passes the
4060  * arguments in a different order than default.
4061  */
4062 #ifdef __s390x__
4063 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4064   (__nsops), (__timeout), (__sops)
4065 #else
4066 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4067   (__nsops), 0, (__sops), (__timeout)
4068 #endif
4069 
4070 static inline abi_long do_semtimedop(int semid,
4071                                      abi_long ptr,
4072                                      unsigned nsops,
4073                                      abi_long timeout, bool time64)
4074 {
4075     struct sembuf *sops;
4076     struct timespec ts, *pts = NULL;
4077     abi_long ret;
4078 
4079     if (timeout) {
4080         pts = &ts;
4081         if (time64) {
4082             if (target_to_host_timespec64(pts, timeout)) {
4083                 return -TARGET_EFAULT;
4084             }
4085         } else {
4086             if (target_to_host_timespec(pts, timeout)) {
4087                 return -TARGET_EFAULT;
4088             }
4089         }
4090     }
4091 
4092     if (nsops > TARGET_SEMOPM) {
4093         return -TARGET_E2BIG;
4094     }
4095 
4096     sops = g_new(struct sembuf, nsops);
4097 
4098     if (target_to_host_sembuf(sops, ptr, nsops)) {
4099         g_free(sops);
4100         return -TARGET_EFAULT;
4101     }
4102 
4103     ret = -TARGET_ENOSYS;
4104 #ifdef __NR_semtimedop
4105     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4106 #endif
4107 #ifdef __NR_ipc
4108     if (ret == -TARGET_ENOSYS) {
4109         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4110                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4111     }
4112 #endif
4113     g_free(sops);
4114     return ret;
4115 }
4116 #endif
4117 
4118 struct target_msqid_ds
4119 {
4120     struct target_ipc_perm msg_perm;
4121     abi_ulong msg_stime;
4122 #if TARGET_ABI_BITS == 32
4123     abi_ulong __unused1;
4124 #endif
4125     abi_ulong msg_rtime;
4126 #if TARGET_ABI_BITS == 32
4127     abi_ulong __unused2;
4128 #endif
4129     abi_ulong msg_ctime;
4130 #if TARGET_ABI_BITS == 32
4131     abi_ulong __unused3;
4132 #endif
4133     abi_ulong __msg_cbytes;
4134     abi_ulong msg_qnum;
4135     abi_ulong msg_qbytes;
4136     abi_ulong msg_lspid;
4137     abi_ulong msg_lrpid;
4138     abi_ulong __unused4;
4139     abi_ulong __unused5;
4140 };
4141 
4142 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4143                                                abi_ulong target_addr)
4144 {
4145     struct target_msqid_ds *target_md;
4146 
4147     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4148         return -TARGET_EFAULT;
4149     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4150         return -TARGET_EFAULT;
4151     host_md->msg_stime = tswapal(target_md->msg_stime);
4152     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4153     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4154     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4155     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4156     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4157     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4158     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4159     unlock_user_struct(target_md, target_addr, 0);
4160     return 0;
4161 }
4162 
4163 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4164                                                struct msqid_ds *host_md)
4165 {
4166     struct target_msqid_ds *target_md;
4167 
4168     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4169         return -TARGET_EFAULT;
4170     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4171         return -TARGET_EFAULT;
4172     target_md->msg_stime = tswapal(host_md->msg_stime);
4173     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4174     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4175     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4176     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4177     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4178     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4179     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4180     unlock_user_struct(target_md, target_addr, 1);
4181     return 0;
4182 }
4183 
4184 struct target_msginfo {
4185     int msgpool;
4186     int msgmap;
4187     int msgmax;
4188     int msgmnb;
4189     int msgmni;
4190     int msgssz;
4191     int msgtql;
4192     unsigned short int msgseg;
4193 };
4194 
4195 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4196                                               struct msginfo *host_msginfo)
4197 {
4198     struct target_msginfo *target_msginfo;
4199     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4200         return -TARGET_EFAULT;
4201     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4202     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4203     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4204     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4205     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4206     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4207     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4208     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4209     unlock_user_struct(target_msginfo, target_addr, 1);
4210     return 0;
4211 }
4212 
4213 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4214 {
4215     struct msqid_ds dsarg;
4216     struct msginfo msginfo;
4217     abi_long ret = -TARGET_EINVAL;
4218 
4219     cmd &= 0xff;
4220 
4221     switch (cmd) {
4222     case IPC_STAT:
4223     case IPC_SET:
4224     case MSG_STAT:
4225         if (target_to_host_msqid_ds(&dsarg,ptr))
4226             return -TARGET_EFAULT;
4227         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4228         if (host_to_target_msqid_ds(ptr,&dsarg))
4229             return -TARGET_EFAULT;
4230         break;
4231     case IPC_RMID:
4232         ret = get_errno(msgctl(msgid, cmd, NULL));
4233         break;
4234     case IPC_INFO:
4235     case MSG_INFO:
4236         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4237         if (host_to_target_msginfo(ptr, &msginfo))
4238             return -TARGET_EFAULT;
4239         break;
4240     }
4241 
4242     return ret;
4243 }
4244 
4245 struct target_msgbuf {
4246     abi_long mtype;
4247     char	mtext[1];
4248 };
4249 
4250 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4251                                  ssize_t msgsz, int msgflg)
4252 {
4253     struct target_msgbuf *target_mb;
4254     struct msgbuf *host_mb;
4255     abi_long ret = 0;
4256 
4257     if (msgsz < 0) {
4258         return -TARGET_EINVAL;
4259     }
4260 
4261     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4262         return -TARGET_EFAULT;
4263     host_mb = g_try_malloc(msgsz + sizeof(long));
4264     if (!host_mb) {
4265         unlock_user_struct(target_mb, msgp, 0);
4266         return -TARGET_ENOMEM;
4267     }
4268     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4269     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4270     ret = -TARGET_ENOSYS;
4271 #ifdef __NR_msgsnd
4272     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4273 #endif
4274 #ifdef __NR_ipc
4275     if (ret == -TARGET_ENOSYS) {
4276 #ifdef __s390x__
4277         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4278                                  host_mb));
4279 #else
4280         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4281                                  host_mb, 0));
4282 #endif
4283     }
4284 #endif
4285     g_free(host_mb);
4286     unlock_user_struct(target_mb, msgp, 0);
4287 
4288     return ret;
4289 }
4290 
4291 #ifdef __NR_ipc
4292 #if defined(__sparc__)
4293 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4294 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4295 #elif defined(__s390x__)
4296 /* The s390 sys_ipc variant has only five parameters.  */
4297 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4298     ((long int[]){(long int)__msgp, __msgtyp})
4299 #else
4300 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4301     ((long int[]){(long int)__msgp, __msgtyp}), 0
4302 #endif
4303 #endif
4304 
4305 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4306                                  ssize_t msgsz, abi_long msgtyp,
4307                                  int msgflg)
4308 {
4309     struct target_msgbuf *target_mb;
4310     char *target_mtext;
4311     struct msgbuf *host_mb;
4312     abi_long ret = 0;
4313 
4314     if (msgsz < 0) {
4315         return -TARGET_EINVAL;
4316     }
4317 
4318     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4319         return -TARGET_EFAULT;
4320 
4321     host_mb = g_try_malloc(msgsz + sizeof(long));
4322     if (!host_mb) {
4323         ret = -TARGET_ENOMEM;
4324         goto end;
4325     }
4326     ret = -TARGET_ENOSYS;
4327 #ifdef __NR_msgrcv
4328     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4329 #endif
4330 #ifdef __NR_ipc
4331     if (ret == -TARGET_ENOSYS) {
4332         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4333                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4334     }
4335 #endif
4336 
4337     if (ret > 0) {
4338         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4339         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4340         if (!target_mtext) {
4341             ret = -TARGET_EFAULT;
4342             goto end;
4343         }
4344         memcpy(target_mb->mtext, host_mb->mtext, ret);
4345         unlock_user(target_mtext, target_mtext_addr, ret);
4346     }
4347 
4348     target_mb->mtype = tswapal(host_mb->mtype);
4349 
4350 end:
4351     if (target_mb)
4352         unlock_user_struct(target_mb, msgp, 1);
4353     g_free(host_mb);
4354     return ret;
4355 }
4356 
4357 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4358                                                abi_ulong target_addr)
4359 {
4360     struct target_shmid_ds *target_sd;
4361 
4362     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4363         return -TARGET_EFAULT;
4364     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4365         return -TARGET_EFAULT;
4366     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4367     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4368     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4369     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4370     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4371     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4372     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4373     unlock_user_struct(target_sd, target_addr, 0);
4374     return 0;
4375 }
4376 
4377 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4378                                                struct shmid_ds *host_sd)
4379 {
4380     struct target_shmid_ds *target_sd;
4381 
4382     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4383         return -TARGET_EFAULT;
4384     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4385         return -TARGET_EFAULT;
4386     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4387     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4388     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4389     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4390     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4391     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4392     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4393     unlock_user_struct(target_sd, target_addr, 1);
4394     return 0;
4395 }
4396 
4397 struct  target_shminfo {
4398     abi_ulong shmmax;
4399     abi_ulong shmmin;
4400     abi_ulong shmmni;
4401     abi_ulong shmseg;
4402     abi_ulong shmall;
4403 };
4404 
4405 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4406                                               struct shminfo *host_shminfo)
4407 {
4408     struct target_shminfo *target_shminfo;
4409     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4410         return -TARGET_EFAULT;
4411     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4412     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4413     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4414     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4415     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4416     unlock_user_struct(target_shminfo, target_addr, 1);
4417     return 0;
4418 }
4419 
4420 struct target_shm_info {
4421     int used_ids;
4422     abi_ulong shm_tot;
4423     abi_ulong shm_rss;
4424     abi_ulong shm_swp;
4425     abi_ulong swap_attempts;
4426     abi_ulong swap_successes;
4427 };
4428 
4429 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4430                                                struct shm_info *host_shm_info)
4431 {
4432     struct target_shm_info *target_shm_info;
4433     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4434         return -TARGET_EFAULT;
4435     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4436     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4437     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4438     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4439     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4440     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4441     unlock_user_struct(target_shm_info, target_addr, 1);
4442     return 0;
4443 }
4444 
4445 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4446 {
4447     struct shmid_ds dsarg;
4448     struct shminfo shminfo;
4449     struct shm_info shm_info;
4450     abi_long ret = -TARGET_EINVAL;
4451 
4452     cmd &= 0xff;
4453 
4454     switch(cmd) {
4455     case IPC_STAT:
4456     case IPC_SET:
4457     case SHM_STAT:
4458         if (target_to_host_shmid_ds(&dsarg, buf))
4459             return -TARGET_EFAULT;
4460         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4461         if (host_to_target_shmid_ds(buf, &dsarg))
4462             return -TARGET_EFAULT;
4463         break;
4464     case IPC_INFO:
4465         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4466         if (host_to_target_shminfo(buf, &shminfo))
4467             return -TARGET_EFAULT;
4468         break;
4469     case SHM_INFO:
4470         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4471         if (host_to_target_shm_info(buf, &shm_info))
4472             return -TARGET_EFAULT;
4473         break;
4474     case IPC_RMID:
4475     case SHM_LOCK:
4476     case SHM_UNLOCK:
4477         ret = get_errno(shmctl(shmid, cmd, NULL));
4478         break;
4479     }
4480 
4481     return ret;
4482 }
4483 
4484 #ifndef TARGET_FORCE_SHMLBA
4485 /* For most architectures, SHMLBA is the same as the page size;
4486  * some architectures have larger values, in which case they should
4487  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4488  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4489  * and defining its own value for SHMLBA.
4490  *
4491  * The kernel also permits SHMLBA to be set by the architecture to a
4492  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4493  * this means that addresses are rounded to the large size if
4494  * SHM_RND is set but addresses not aligned to that size are not rejected
4495  * as long as they are at least page-aligned. Since the only architecture
4496  * which uses this is ia64 this code doesn't provide for that oddity.
4497  */
4498 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4499 {
4500     return TARGET_PAGE_SIZE;
4501 }
4502 #endif
4503 
4504 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4505                                  int shmid, abi_ulong shmaddr, int shmflg)
4506 {
4507     CPUState *cpu = env_cpu(cpu_env);
4508     abi_long raddr;
4509     void *host_raddr;
4510     struct shmid_ds shm_info;
4511     int i,ret;
4512     abi_ulong shmlba;
4513 
4514     /* shmat pointers are always untagged */
4515 
4516     /* find out the length of the shared memory segment */
4517     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4518     if (is_error(ret)) {
4519         /* can't get length, bail out */
4520         return ret;
4521     }
4522 
4523     shmlba = target_shmlba(cpu_env);
4524 
4525     if (shmaddr & (shmlba - 1)) {
4526         if (shmflg & SHM_RND) {
4527             shmaddr &= ~(shmlba - 1);
4528         } else {
4529             return -TARGET_EINVAL;
4530         }
4531     }
4532     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4533         return -TARGET_EINVAL;
4534     }
4535 
4536     mmap_lock();
4537 
4538     /*
4539      * We're mapping shared memory, so ensure we generate code for parallel
4540      * execution and flush old translations.  This will work up to the level
4541      * supported by the host -- anything that requires EXCP_ATOMIC will not
4542      * be atomic with respect to an external process.
4543      */
4544     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4545         cpu->tcg_cflags |= CF_PARALLEL;
4546         tb_flush(cpu);
4547     }
4548 
4549     if (shmaddr)
4550         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4551     else {
4552         abi_ulong mmap_start;
4553 
4554         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4555         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4556 
4557         if (mmap_start == -1) {
4558             errno = ENOMEM;
4559             host_raddr = (void *)-1;
4560         } else
4561             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4562                                shmflg | SHM_REMAP);
4563     }
4564 
4565     if (host_raddr == (void *)-1) {
4566         mmap_unlock();
4567         return get_errno((long)host_raddr);
4568     }
4569     raddr=h2g((unsigned long)host_raddr);
4570 
4571     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4572                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4573                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4574 
4575     for (i = 0; i < N_SHM_REGIONS; i++) {
4576         if (!shm_regions[i].in_use) {
4577             shm_regions[i].in_use = true;
4578             shm_regions[i].start = raddr;
4579             shm_regions[i].size = shm_info.shm_segsz;
4580             break;
4581         }
4582     }
4583 
4584     mmap_unlock();
4585     return raddr;
4586 
4587 }
4588 
4589 static inline abi_long do_shmdt(abi_ulong shmaddr)
4590 {
4591     int i;
4592     abi_long rv;
4593 
4594     /* shmdt pointers are always untagged */
4595 
4596     mmap_lock();
4597 
4598     for (i = 0; i < N_SHM_REGIONS; ++i) {
4599         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4600             shm_regions[i].in_use = false;
4601             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4602             break;
4603         }
4604     }
4605     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4606 
4607     mmap_unlock();
4608 
4609     return rv;
4610 }
4611 
4612 #ifdef TARGET_NR_ipc
4613 /* ??? This only works with linear mappings.  */
4614 /* do_ipc() must return target values and target errnos. */
4615 static abi_long do_ipc(CPUArchState *cpu_env,
4616                        unsigned int call, abi_long first,
4617                        abi_long second, abi_long third,
4618                        abi_long ptr, abi_long fifth)
4619 {
4620     int version;
4621     abi_long ret = 0;
4622 
4623     version = call >> 16;
4624     call &= 0xffff;
4625 
4626     switch (call) {
4627     case IPCOP_semop:
4628         ret = do_semtimedop(first, ptr, second, 0, false);
4629         break;
4630     case IPCOP_semtimedop:
4631     /*
4632      * The s390 sys_ipc variant has only five parameters instead of six
4633      * (as for default variant) and the only difference is the handling of
4634      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4635      * to a struct timespec where the generic variant uses fifth parameter.
4636      */
4637 #if defined(TARGET_S390X)
4638         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4639 #else
4640         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4641 #endif
4642         break;
4643 
4644     case IPCOP_semget:
4645         ret = get_errno(semget(first, second, third));
4646         break;
4647 
4648     case IPCOP_semctl: {
4649         /* The semun argument to semctl is passed by value, so dereference the
4650          * ptr argument. */
4651         abi_ulong atptr;
4652         get_user_ual(atptr, ptr);
4653         ret = do_semctl(first, second, third, atptr);
4654         break;
4655     }
4656 
4657     case IPCOP_msgget:
4658         ret = get_errno(msgget(first, second));
4659         break;
4660 
4661     case IPCOP_msgsnd:
4662         ret = do_msgsnd(first, ptr, second, third);
4663         break;
4664 
4665     case IPCOP_msgctl:
4666         ret = do_msgctl(first, second, ptr);
4667         break;
4668 
4669     case IPCOP_msgrcv:
4670         switch (version) {
4671         case 0:
4672             {
4673                 struct target_ipc_kludge {
4674                     abi_long msgp;
4675                     abi_long msgtyp;
4676                 } *tmp;
4677 
4678                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4679                     ret = -TARGET_EFAULT;
4680                     break;
4681                 }
4682 
4683                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4684 
4685                 unlock_user_struct(tmp, ptr, 0);
4686                 break;
4687             }
4688         default:
4689             ret = do_msgrcv(first, ptr, second, fifth, third);
4690         }
4691         break;
4692 
4693     case IPCOP_shmat:
4694         switch (version) {
4695         default:
4696         {
4697             abi_ulong raddr;
4698             raddr = do_shmat(cpu_env, first, ptr, second);
4699             if (is_error(raddr))
4700                 return get_errno(raddr);
4701             if (put_user_ual(raddr, third))
4702                 return -TARGET_EFAULT;
4703             break;
4704         }
4705         case 1:
4706             ret = -TARGET_EINVAL;
4707             break;
4708         }
4709 	break;
4710     case IPCOP_shmdt:
4711         ret = do_shmdt(ptr);
4712 	break;
4713 
4714     case IPCOP_shmget:
4715 	/* IPC_* flag values are the same on all linux platforms */
4716 	ret = get_errno(shmget(first, second, third));
4717 	break;
4718 
4719 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4720     case IPCOP_shmctl:
4721         ret = do_shmctl(first, second, ptr);
4722         break;
4723     default:
4724         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4725                       call, version);
4726 	ret = -TARGET_ENOSYS;
4727 	break;
4728     }
4729     return ret;
4730 }
4731 #endif
4732 
4733 /* kernel structure types definitions */
4734 
4735 #define STRUCT(name, ...) STRUCT_ ## name,
4736 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4737 enum {
4738 #include "syscall_types.h"
4739 STRUCT_MAX
4740 };
4741 #undef STRUCT
4742 #undef STRUCT_SPECIAL
4743 
4744 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4745 #define STRUCT_SPECIAL(name)
4746 #include "syscall_types.h"
4747 #undef STRUCT
4748 #undef STRUCT_SPECIAL
4749 
4750 #define MAX_STRUCT_SIZE 4096
4751 
4752 #ifdef CONFIG_FIEMAP
4753 /* So fiemap access checks don't overflow on 32 bit systems.
4754  * This is very slightly smaller than the limit imposed by
4755  * the underlying kernel.
4756  */
4757 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4758                             / sizeof(struct fiemap_extent))
4759 
4760 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4761                                        int fd, int cmd, abi_long arg)
4762 {
4763     /* The parameter for this ioctl is a struct fiemap followed
4764      * by an array of struct fiemap_extent whose size is set
4765      * in fiemap->fm_extent_count. The array is filled in by the
4766      * ioctl.
4767      */
4768     int target_size_in, target_size_out;
4769     struct fiemap *fm;
4770     const argtype *arg_type = ie->arg_type;
4771     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4772     void *argptr, *p;
4773     abi_long ret;
4774     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4775     uint32_t outbufsz;
4776     int free_fm = 0;
4777 
4778     assert(arg_type[0] == TYPE_PTR);
4779     assert(ie->access == IOC_RW);
4780     arg_type++;
4781     target_size_in = thunk_type_size(arg_type, 0);
4782     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4783     if (!argptr) {
4784         return -TARGET_EFAULT;
4785     }
4786     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4787     unlock_user(argptr, arg, 0);
4788     fm = (struct fiemap *)buf_temp;
4789     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4790         return -TARGET_EINVAL;
4791     }
4792 
4793     outbufsz = sizeof (*fm) +
4794         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4795 
4796     if (outbufsz > MAX_STRUCT_SIZE) {
4797         /* We can't fit all the extents into the fixed size buffer.
4798          * Allocate one that is large enough and use it instead.
4799          */
4800         fm = g_try_malloc(outbufsz);
4801         if (!fm) {
4802             return -TARGET_ENOMEM;
4803         }
4804         memcpy(fm, buf_temp, sizeof(struct fiemap));
4805         free_fm = 1;
4806     }
4807     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4808     if (!is_error(ret)) {
4809         target_size_out = target_size_in;
4810         /* An extent_count of 0 means we were only counting the extents
4811          * so there are no structs to copy
4812          */
4813         if (fm->fm_extent_count != 0) {
4814             target_size_out += fm->fm_mapped_extents * extent_size;
4815         }
4816         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4817         if (!argptr) {
4818             ret = -TARGET_EFAULT;
4819         } else {
4820             /* Convert the struct fiemap */
4821             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4822             if (fm->fm_extent_count != 0) {
4823                 p = argptr + target_size_in;
4824                 /* ...and then all the struct fiemap_extents */
4825                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4826                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4827                                   THUNK_TARGET);
4828                     p += extent_size;
4829                 }
4830             }
4831             unlock_user(argptr, arg, target_size_out);
4832         }
4833     }
4834     if (free_fm) {
4835         g_free(fm);
4836     }
4837     return ret;
4838 }
4839 #endif
4840 
4841 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4842                                 int fd, int cmd, abi_long arg)
4843 {
4844     const argtype *arg_type = ie->arg_type;
4845     int target_size;
4846     void *argptr;
4847     int ret;
4848     struct ifconf *host_ifconf;
4849     uint32_t outbufsz;
4850     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4851     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4852     int target_ifreq_size;
4853     int nb_ifreq;
4854     int free_buf = 0;
4855     int i;
4856     int target_ifc_len;
4857     abi_long target_ifc_buf;
4858     int host_ifc_len;
4859     char *host_ifc_buf;
4860 
4861     assert(arg_type[0] == TYPE_PTR);
4862     assert(ie->access == IOC_RW);
4863 
4864     arg_type++;
4865     target_size = thunk_type_size(arg_type, 0);
4866 
4867     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4868     if (!argptr)
4869         return -TARGET_EFAULT;
4870     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4871     unlock_user(argptr, arg, 0);
4872 
4873     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4874     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4875     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4876 
4877     if (target_ifc_buf != 0) {
4878         target_ifc_len = host_ifconf->ifc_len;
4879         nb_ifreq = target_ifc_len / target_ifreq_size;
4880         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4881 
4882         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4883         if (outbufsz > MAX_STRUCT_SIZE) {
4884             /*
4885              * We can't fit all the extents into the fixed size buffer.
4886              * Allocate one that is large enough and use it instead.
4887              */
4888             host_ifconf = g_try_malloc(outbufsz);
4889             if (!host_ifconf) {
4890                 return -TARGET_ENOMEM;
4891             }
4892             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4893             free_buf = 1;
4894         }
4895         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4896 
4897         host_ifconf->ifc_len = host_ifc_len;
4898     } else {
4899       host_ifc_buf = NULL;
4900     }
4901     host_ifconf->ifc_buf = host_ifc_buf;
4902 
4903     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4904     if (!is_error(ret)) {
4905 	/* convert host ifc_len to target ifc_len */
4906 
4907         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4908         target_ifc_len = nb_ifreq * target_ifreq_size;
4909         host_ifconf->ifc_len = target_ifc_len;
4910 
4911 	/* restore target ifc_buf */
4912 
4913         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4914 
4915 	/* copy struct ifconf to target user */
4916 
4917         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4918         if (!argptr)
4919             return -TARGET_EFAULT;
4920         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4921         unlock_user(argptr, arg, target_size);
4922 
4923         if (target_ifc_buf != 0) {
4924             /* copy ifreq[] to target user */
4925             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4926             for (i = 0; i < nb_ifreq ; i++) {
4927                 thunk_convert(argptr + i * target_ifreq_size,
4928                               host_ifc_buf + i * sizeof(struct ifreq),
4929                               ifreq_arg_type, THUNK_TARGET);
4930             }
4931             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4932         }
4933     }
4934 
4935     if (free_buf) {
4936         g_free(host_ifconf);
4937     }
4938 
4939     return ret;
4940 }
4941 
4942 #if defined(CONFIG_USBFS)
4943 #if HOST_LONG_BITS > 64
4944 #error USBDEVFS thunks do not support >64 bit hosts yet.
4945 #endif
4946 struct live_urb {
4947     uint64_t target_urb_adr;
4948     uint64_t target_buf_adr;
4949     char *target_buf_ptr;
4950     struct usbdevfs_urb host_urb;
4951 };
4952 
4953 static GHashTable *usbdevfs_urb_hashtable(void)
4954 {
4955     static GHashTable *urb_hashtable;
4956 
4957     if (!urb_hashtable) {
4958         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4959     }
4960     return urb_hashtable;
4961 }
4962 
4963 static void urb_hashtable_insert(struct live_urb *urb)
4964 {
4965     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4966     g_hash_table_insert(urb_hashtable, urb, urb);
4967 }
4968 
4969 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4970 {
4971     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4972     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4973 }
4974 
4975 static void urb_hashtable_remove(struct live_urb *urb)
4976 {
4977     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4978     g_hash_table_remove(urb_hashtable, urb);
4979 }
4980 
4981 static abi_long
4982 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4983                           int fd, int cmd, abi_long arg)
4984 {
4985     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4986     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4987     struct live_urb *lurb;
4988     void *argptr;
4989     uint64_t hurb;
4990     int target_size;
4991     uintptr_t target_urb_adr;
4992     abi_long ret;
4993 
4994     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4995 
4996     memset(buf_temp, 0, sizeof(uint64_t));
4997     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4998     if (is_error(ret)) {
4999         return ret;
5000     }
5001 
5002     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5003     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5004     if (!lurb->target_urb_adr) {
5005         return -TARGET_EFAULT;
5006     }
5007     urb_hashtable_remove(lurb);
5008     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5009         lurb->host_urb.buffer_length);
5010     lurb->target_buf_ptr = NULL;
5011 
5012     /* restore the guest buffer pointer */
5013     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5014 
5015     /* update the guest urb struct */
5016     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5017     if (!argptr) {
5018         g_free(lurb);
5019         return -TARGET_EFAULT;
5020     }
5021     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5022     unlock_user(argptr, lurb->target_urb_adr, target_size);
5023 
5024     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5025     /* write back the urb handle */
5026     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5027     if (!argptr) {
5028         g_free(lurb);
5029         return -TARGET_EFAULT;
5030     }
5031 
5032     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5033     target_urb_adr = lurb->target_urb_adr;
5034     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5035     unlock_user(argptr, arg, target_size);
5036 
5037     g_free(lurb);
5038     return ret;
5039 }
5040 
5041 static abi_long
5042 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5043                              uint8_t *buf_temp __attribute__((unused)),
5044                              int fd, int cmd, abi_long arg)
5045 {
5046     struct live_urb *lurb;
5047 
5048     /* map target address back to host URB with metadata. */
5049     lurb = urb_hashtable_lookup(arg);
5050     if (!lurb) {
5051         return -TARGET_EFAULT;
5052     }
5053     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5054 }
5055 
5056 static abi_long
5057 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5058                             int fd, int cmd, abi_long arg)
5059 {
5060     const argtype *arg_type = ie->arg_type;
5061     int target_size;
5062     abi_long ret;
5063     void *argptr;
5064     int rw_dir;
5065     struct live_urb *lurb;
5066 
5067     /*
5068      * each submitted URB needs to map to a unique ID for the
5069      * kernel, and that unique ID needs to be a pointer to
5070      * host memory.  hence, we need to malloc for each URB.
5071      * isochronous transfers have a variable length struct.
5072      */
5073     arg_type++;
5074     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5075 
5076     /* construct host copy of urb and metadata */
5077     lurb = g_try_malloc0(sizeof(struct live_urb));
5078     if (!lurb) {
5079         return -TARGET_ENOMEM;
5080     }
5081 
5082     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5083     if (!argptr) {
5084         g_free(lurb);
5085         return -TARGET_EFAULT;
5086     }
5087     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5088     unlock_user(argptr, arg, 0);
5089 
5090     lurb->target_urb_adr = arg;
5091     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5092 
5093     /* buffer space used depends on endpoint type so lock the entire buffer */
5094     /* control type urbs should check the buffer contents for true direction */
5095     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5096     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5097         lurb->host_urb.buffer_length, 1);
5098     if (lurb->target_buf_ptr == NULL) {
5099         g_free(lurb);
5100         return -TARGET_EFAULT;
5101     }
5102 
5103     /* update buffer pointer in host copy */
5104     lurb->host_urb.buffer = lurb->target_buf_ptr;
5105 
5106     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5107     if (is_error(ret)) {
5108         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5109         g_free(lurb);
5110     } else {
5111         urb_hashtable_insert(lurb);
5112     }
5113 
5114     return ret;
5115 }
5116 #endif /* CONFIG_USBFS */
5117 
5118 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5119                             int cmd, abi_long arg)
5120 {
5121     void *argptr;
5122     struct dm_ioctl *host_dm;
5123     abi_long guest_data;
5124     uint32_t guest_data_size;
5125     int target_size;
5126     const argtype *arg_type = ie->arg_type;
5127     abi_long ret;
5128     void *big_buf = NULL;
5129     char *host_data;
5130 
5131     arg_type++;
5132     target_size = thunk_type_size(arg_type, 0);
5133     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5134     if (!argptr) {
5135         ret = -TARGET_EFAULT;
5136         goto out;
5137     }
5138     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5139     unlock_user(argptr, arg, 0);
5140 
5141     /* buf_temp is too small, so fetch things into a bigger buffer */
5142     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5143     memcpy(big_buf, buf_temp, target_size);
5144     buf_temp = big_buf;
5145     host_dm = big_buf;
5146 
5147     guest_data = arg + host_dm->data_start;
5148     if ((guest_data - arg) < 0) {
5149         ret = -TARGET_EINVAL;
5150         goto out;
5151     }
5152     guest_data_size = host_dm->data_size - host_dm->data_start;
5153     host_data = (char*)host_dm + host_dm->data_start;
5154 
5155     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5156     if (!argptr) {
5157         ret = -TARGET_EFAULT;
5158         goto out;
5159     }
5160 
5161     switch (ie->host_cmd) {
5162     case DM_REMOVE_ALL:
5163     case DM_LIST_DEVICES:
5164     case DM_DEV_CREATE:
5165     case DM_DEV_REMOVE:
5166     case DM_DEV_SUSPEND:
5167     case DM_DEV_STATUS:
5168     case DM_DEV_WAIT:
5169     case DM_TABLE_STATUS:
5170     case DM_TABLE_CLEAR:
5171     case DM_TABLE_DEPS:
5172     case DM_LIST_VERSIONS:
5173         /* no input data */
5174         break;
5175     case DM_DEV_RENAME:
5176     case DM_DEV_SET_GEOMETRY:
5177         /* data contains only strings */
5178         memcpy(host_data, argptr, guest_data_size);
5179         break;
5180     case DM_TARGET_MSG:
5181         memcpy(host_data, argptr, guest_data_size);
5182         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5183         break;
5184     case DM_TABLE_LOAD:
5185     {
5186         void *gspec = argptr;
5187         void *cur_data = host_data;
5188         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5189         int spec_size = thunk_type_size(arg_type, 0);
5190         int i;
5191 
5192         for (i = 0; i < host_dm->target_count; i++) {
5193             struct dm_target_spec *spec = cur_data;
5194             uint32_t next;
5195             int slen;
5196 
5197             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5198             slen = strlen((char*)gspec + spec_size) + 1;
5199             next = spec->next;
5200             spec->next = sizeof(*spec) + slen;
5201             strcpy((char*)&spec[1], gspec + spec_size);
5202             gspec += next;
5203             cur_data += spec->next;
5204         }
5205         break;
5206     }
5207     default:
5208         ret = -TARGET_EINVAL;
5209         unlock_user(argptr, guest_data, 0);
5210         goto out;
5211     }
5212     unlock_user(argptr, guest_data, 0);
5213 
5214     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5215     if (!is_error(ret)) {
5216         guest_data = arg + host_dm->data_start;
5217         guest_data_size = host_dm->data_size - host_dm->data_start;
5218         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5219         switch (ie->host_cmd) {
5220         case DM_REMOVE_ALL:
5221         case DM_DEV_CREATE:
5222         case DM_DEV_REMOVE:
5223         case DM_DEV_RENAME:
5224         case DM_DEV_SUSPEND:
5225         case DM_DEV_STATUS:
5226         case DM_TABLE_LOAD:
5227         case DM_TABLE_CLEAR:
5228         case DM_TARGET_MSG:
5229         case DM_DEV_SET_GEOMETRY:
5230             /* no return data */
5231             break;
5232         case DM_LIST_DEVICES:
5233         {
5234             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5235             uint32_t remaining_data = guest_data_size;
5236             void *cur_data = argptr;
5237             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5238             int nl_size = 12; /* can't use thunk_size due to alignment */
5239 
5240             while (1) {
5241                 uint32_t next = nl->next;
5242                 if (next) {
5243                     nl->next = nl_size + (strlen(nl->name) + 1);
5244                 }
5245                 if (remaining_data < nl->next) {
5246                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5247                     break;
5248                 }
5249                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5250                 strcpy(cur_data + nl_size, nl->name);
5251                 cur_data += nl->next;
5252                 remaining_data -= nl->next;
5253                 if (!next) {
5254                     break;
5255                 }
5256                 nl = (void*)nl + next;
5257             }
5258             break;
5259         }
5260         case DM_DEV_WAIT:
5261         case DM_TABLE_STATUS:
5262         {
5263             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5264             void *cur_data = argptr;
5265             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5266             int spec_size = thunk_type_size(arg_type, 0);
5267             int i;
5268 
5269             for (i = 0; i < host_dm->target_count; i++) {
5270                 uint32_t next = spec->next;
5271                 int slen = strlen((char*)&spec[1]) + 1;
5272                 spec->next = (cur_data - argptr) + spec_size + slen;
5273                 if (guest_data_size < spec->next) {
5274                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5275                     break;
5276                 }
5277                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5278                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5279                 cur_data = argptr + spec->next;
5280                 spec = (void*)host_dm + host_dm->data_start + next;
5281             }
5282             break;
5283         }
5284         case DM_TABLE_DEPS:
5285         {
5286             void *hdata = (void*)host_dm + host_dm->data_start;
5287             int count = *(uint32_t*)hdata;
5288             uint64_t *hdev = hdata + 8;
5289             uint64_t *gdev = argptr + 8;
5290             int i;
5291 
5292             *(uint32_t*)argptr = tswap32(count);
5293             for (i = 0; i < count; i++) {
5294                 *gdev = tswap64(*hdev);
5295                 gdev++;
5296                 hdev++;
5297             }
5298             break;
5299         }
5300         case DM_LIST_VERSIONS:
5301         {
5302             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5303             uint32_t remaining_data = guest_data_size;
5304             void *cur_data = argptr;
5305             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5306             int vers_size = thunk_type_size(arg_type, 0);
5307 
5308             while (1) {
5309                 uint32_t next = vers->next;
5310                 if (next) {
5311                     vers->next = vers_size + (strlen(vers->name) + 1);
5312                 }
5313                 if (remaining_data < vers->next) {
5314                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5315                     break;
5316                 }
5317                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5318                 strcpy(cur_data + vers_size, vers->name);
5319                 cur_data += vers->next;
5320                 remaining_data -= vers->next;
5321                 if (!next) {
5322                     break;
5323                 }
5324                 vers = (void*)vers + next;
5325             }
5326             break;
5327         }
5328         default:
5329             unlock_user(argptr, guest_data, 0);
5330             ret = -TARGET_EINVAL;
5331             goto out;
5332         }
5333         unlock_user(argptr, guest_data, guest_data_size);
5334 
5335         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5336         if (!argptr) {
5337             ret = -TARGET_EFAULT;
5338             goto out;
5339         }
5340         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5341         unlock_user(argptr, arg, target_size);
5342     }
5343 out:
5344     g_free(big_buf);
5345     return ret;
5346 }
5347 
5348 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5349                                int cmd, abi_long arg)
5350 {
5351     void *argptr;
5352     int target_size;
5353     const argtype *arg_type = ie->arg_type;
5354     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5355     abi_long ret;
5356 
5357     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5358     struct blkpg_partition host_part;
5359 
5360     /* Read and convert blkpg */
5361     arg_type++;
5362     target_size = thunk_type_size(arg_type, 0);
5363     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5364     if (!argptr) {
5365         ret = -TARGET_EFAULT;
5366         goto out;
5367     }
5368     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5369     unlock_user(argptr, arg, 0);
5370 
5371     switch (host_blkpg->op) {
5372     case BLKPG_ADD_PARTITION:
5373     case BLKPG_DEL_PARTITION:
5374         /* payload is struct blkpg_partition */
5375         break;
5376     default:
5377         /* Unknown opcode */
5378         ret = -TARGET_EINVAL;
5379         goto out;
5380     }
5381 
5382     /* Read and convert blkpg->data */
5383     arg = (abi_long)(uintptr_t)host_blkpg->data;
5384     target_size = thunk_type_size(part_arg_type, 0);
5385     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5386     if (!argptr) {
5387         ret = -TARGET_EFAULT;
5388         goto out;
5389     }
5390     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5391     unlock_user(argptr, arg, 0);
5392 
5393     /* Swizzle the data pointer to our local copy and call! */
5394     host_blkpg->data = &host_part;
5395     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5396 
5397 out:
5398     return ret;
5399 }
5400 
5401 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5402                                 int fd, int cmd, abi_long arg)
5403 {
5404     const argtype *arg_type = ie->arg_type;
5405     const StructEntry *se;
5406     const argtype *field_types;
5407     const int *dst_offsets, *src_offsets;
5408     int target_size;
5409     void *argptr;
5410     abi_ulong *target_rt_dev_ptr = NULL;
5411     unsigned long *host_rt_dev_ptr = NULL;
5412     abi_long ret;
5413     int i;
5414 
5415     assert(ie->access == IOC_W);
5416     assert(*arg_type == TYPE_PTR);
5417     arg_type++;
5418     assert(*arg_type == TYPE_STRUCT);
5419     target_size = thunk_type_size(arg_type, 0);
5420     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5421     if (!argptr) {
5422         return -TARGET_EFAULT;
5423     }
5424     arg_type++;
5425     assert(*arg_type == (int)STRUCT_rtentry);
5426     se = struct_entries + *arg_type++;
5427     assert(se->convert[0] == NULL);
5428     /* convert struct here to be able to catch rt_dev string */
5429     field_types = se->field_types;
5430     dst_offsets = se->field_offsets[THUNK_HOST];
5431     src_offsets = se->field_offsets[THUNK_TARGET];
5432     for (i = 0; i < se->nb_fields; i++) {
5433         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5434             assert(*field_types == TYPE_PTRVOID);
5435             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5436             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5437             if (*target_rt_dev_ptr != 0) {
5438                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5439                                                   tswapal(*target_rt_dev_ptr));
5440                 if (!*host_rt_dev_ptr) {
5441                     unlock_user(argptr, arg, 0);
5442                     return -TARGET_EFAULT;
5443                 }
5444             } else {
5445                 *host_rt_dev_ptr = 0;
5446             }
5447             field_types++;
5448             continue;
5449         }
5450         field_types = thunk_convert(buf_temp + dst_offsets[i],
5451                                     argptr + src_offsets[i],
5452                                     field_types, THUNK_HOST);
5453     }
5454     unlock_user(argptr, arg, 0);
5455 
5456     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5457 
5458     assert(host_rt_dev_ptr != NULL);
5459     assert(target_rt_dev_ptr != NULL);
5460     if (*host_rt_dev_ptr != 0) {
5461         unlock_user((void *)*host_rt_dev_ptr,
5462                     *target_rt_dev_ptr, 0);
5463     }
5464     return ret;
5465 }
5466 
5467 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5468                                      int fd, int cmd, abi_long arg)
5469 {
5470     int sig = target_to_host_signal(arg);
5471     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5472 }
5473 
5474 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5475                                     int fd, int cmd, abi_long arg)
5476 {
5477     struct timeval tv;
5478     abi_long ret;
5479 
5480     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5481     if (is_error(ret)) {
5482         return ret;
5483     }
5484 
5485     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5486         if (copy_to_user_timeval(arg, &tv)) {
5487             return -TARGET_EFAULT;
5488         }
5489     } else {
5490         if (copy_to_user_timeval64(arg, &tv)) {
5491             return -TARGET_EFAULT;
5492         }
5493     }
5494 
5495     return ret;
5496 }
5497 
5498 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5499                                       int fd, int cmd, abi_long arg)
5500 {
5501     struct timespec ts;
5502     abi_long ret;
5503 
5504     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5505     if (is_error(ret)) {
5506         return ret;
5507     }
5508 
5509     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5510         if (host_to_target_timespec(arg, &ts)) {
5511             return -TARGET_EFAULT;
5512         }
5513     } else{
5514         if (host_to_target_timespec64(arg, &ts)) {
5515             return -TARGET_EFAULT;
5516         }
5517     }
5518 
5519     return ret;
5520 }
5521 
5522 #ifdef TIOCGPTPEER
5523 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5524                                      int fd, int cmd, abi_long arg)
5525 {
5526     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5527     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5528 }
5529 #endif
5530 
5531 #ifdef HAVE_DRM_H
5532 
5533 static void unlock_drm_version(struct drm_version *host_ver,
5534                                struct target_drm_version *target_ver,
5535                                bool copy)
5536 {
5537     unlock_user(host_ver->name, target_ver->name,
5538                                 copy ? host_ver->name_len : 0);
5539     unlock_user(host_ver->date, target_ver->date,
5540                                 copy ? host_ver->date_len : 0);
5541     unlock_user(host_ver->desc, target_ver->desc,
5542                                 copy ? host_ver->desc_len : 0);
5543 }
5544 
5545 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5546                                           struct target_drm_version *target_ver)
5547 {
5548     memset(host_ver, 0, sizeof(*host_ver));
5549 
5550     __get_user(host_ver->name_len, &target_ver->name_len);
5551     if (host_ver->name_len) {
5552         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5553                                    target_ver->name_len, 0);
5554         if (!host_ver->name) {
5555             return -EFAULT;
5556         }
5557     }
5558 
5559     __get_user(host_ver->date_len, &target_ver->date_len);
5560     if (host_ver->date_len) {
5561         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5562                                    target_ver->date_len, 0);
5563         if (!host_ver->date) {
5564             goto err;
5565         }
5566     }
5567 
5568     __get_user(host_ver->desc_len, &target_ver->desc_len);
5569     if (host_ver->desc_len) {
5570         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5571                                    target_ver->desc_len, 0);
5572         if (!host_ver->desc) {
5573             goto err;
5574         }
5575     }
5576 
5577     return 0;
5578 err:
5579     unlock_drm_version(host_ver, target_ver, false);
5580     return -EFAULT;
5581 }
5582 
5583 static inline void host_to_target_drmversion(
5584                                           struct target_drm_version *target_ver,
5585                                           struct drm_version *host_ver)
5586 {
5587     __put_user(host_ver->version_major, &target_ver->version_major);
5588     __put_user(host_ver->version_minor, &target_ver->version_minor);
5589     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5590     __put_user(host_ver->name_len, &target_ver->name_len);
5591     __put_user(host_ver->date_len, &target_ver->date_len);
5592     __put_user(host_ver->desc_len, &target_ver->desc_len);
5593     unlock_drm_version(host_ver, target_ver, true);
5594 }
5595 
5596 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5597                              int fd, int cmd, abi_long arg)
5598 {
5599     struct drm_version *ver;
5600     struct target_drm_version *target_ver;
5601     abi_long ret;
5602 
5603     switch (ie->host_cmd) {
5604     case DRM_IOCTL_VERSION:
5605         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5606             return -TARGET_EFAULT;
5607         }
5608         ver = (struct drm_version *)buf_temp;
5609         ret = target_to_host_drmversion(ver, target_ver);
5610         if (!is_error(ret)) {
5611             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5612             if (is_error(ret)) {
5613                 unlock_drm_version(ver, target_ver, false);
5614             } else {
5615                 host_to_target_drmversion(target_ver, ver);
5616             }
5617         }
5618         unlock_user_struct(target_ver, arg, 0);
5619         return ret;
5620     }
5621     return -TARGET_ENOSYS;
5622 }
5623 
5624 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5625                                            struct drm_i915_getparam *gparam,
5626                                            int fd, abi_long arg)
5627 {
5628     abi_long ret;
5629     int value;
5630     struct target_drm_i915_getparam *target_gparam;
5631 
5632     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5633         return -TARGET_EFAULT;
5634     }
5635 
5636     __get_user(gparam->param, &target_gparam->param);
5637     gparam->value = &value;
5638     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5639     put_user_s32(value, target_gparam->value);
5640 
5641     unlock_user_struct(target_gparam, arg, 0);
5642     return ret;
5643 }
5644 
5645 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5646                                   int fd, int cmd, abi_long arg)
5647 {
5648     switch (ie->host_cmd) {
5649     case DRM_IOCTL_I915_GETPARAM:
5650         return do_ioctl_drm_i915_getparam(ie,
5651                                           (struct drm_i915_getparam *)buf_temp,
5652                                           fd, arg);
5653     default:
5654         return -TARGET_ENOSYS;
5655     }
5656 }
5657 
5658 #endif
5659 
5660 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5661                                         int fd, int cmd, abi_long arg)
5662 {
5663     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5664     struct tun_filter *target_filter;
5665     char *target_addr;
5666 
5667     assert(ie->access == IOC_W);
5668 
5669     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5670     if (!target_filter) {
5671         return -TARGET_EFAULT;
5672     }
5673     filter->flags = tswap16(target_filter->flags);
5674     filter->count = tswap16(target_filter->count);
5675     unlock_user(target_filter, arg, 0);
5676 
5677     if (filter->count) {
5678         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5679             MAX_STRUCT_SIZE) {
5680             return -TARGET_EFAULT;
5681         }
5682 
5683         target_addr = lock_user(VERIFY_READ,
5684                                 arg + offsetof(struct tun_filter, addr),
5685                                 filter->count * ETH_ALEN, 1);
5686         if (!target_addr) {
5687             return -TARGET_EFAULT;
5688         }
5689         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5690         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5691     }
5692 
5693     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5694 }
5695 
5696 IOCTLEntry ioctl_entries[] = {
5697 #define IOCTL(cmd, access, ...) \
5698     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5699 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5700     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5701 #define IOCTL_IGNORE(cmd) \
5702     { TARGET_ ## cmd, 0, #cmd },
5703 #include "ioctls.h"
5704     { 0, 0, },
5705 };
5706 
5707 /* ??? Implement proper locking for ioctls.  */
5708 /* do_ioctl() Must return target values and target errnos. */
5709 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5710 {
5711     const IOCTLEntry *ie;
5712     const argtype *arg_type;
5713     abi_long ret;
5714     uint8_t buf_temp[MAX_STRUCT_SIZE];
5715     int target_size;
5716     void *argptr;
5717 
5718     ie = ioctl_entries;
5719     for(;;) {
5720         if (ie->target_cmd == 0) {
5721             qemu_log_mask(
5722                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5723             return -TARGET_ENOSYS;
5724         }
5725         if (ie->target_cmd == cmd)
5726             break;
5727         ie++;
5728     }
5729     arg_type = ie->arg_type;
5730     if (ie->do_ioctl) {
5731         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5732     } else if (!ie->host_cmd) {
5733         /* Some architectures define BSD ioctls in their headers
5734            that are not implemented in Linux.  */
5735         return -TARGET_ENOSYS;
5736     }
5737 
5738     switch(arg_type[0]) {
5739     case TYPE_NULL:
5740         /* no argument */
5741         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5742         break;
5743     case TYPE_PTRVOID:
5744     case TYPE_INT:
5745     case TYPE_LONG:
5746     case TYPE_ULONG:
5747         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5748         break;
5749     case TYPE_PTR:
5750         arg_type++;
5751         target_size = thunk_type_size(arg_type, 0);
5752         switch(ie->access) {
5753         case IOC_R:
5754             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5755             if (!is_error(ret)) {
5756                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5757                 if (!argptr)
5758                     return -TARGET_EFAULT;
5759                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5760                 unlock_user(argptr, arg, target_size);
5761             }
5762             break;
5763         case IOC_W:
5764             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5765             if (!argptr)
5766                 return -TARGET_EFAULT;
5767             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5768             unlock_user(argptr, arg, 0);
5769             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5770             break;
5771         default:
5772         case IOC_RW:
5773             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5774             if (!argptr)
5775                 return -TARGET_EFAULT;
5776             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5777             unlock_user(argptr, arg, 0);
5778             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5779             if (!is_error(ret)) {
5780                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5781                 if (!argptr)
5782                     return -TARGET_EFAULT;
5783                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5784                 unlock_user(argptr, arg, target_size);
5785             }
5786             break;
5787         }
5788         break;
5789     default:
5790         qemu_log_mask(LOG_UNIMP,
5791                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5792                       (long)cmd, arg_type[0]);
5793         ret = -TARGET_ENOSYS;
5794         break;
5795     }
5796     return ret;
5797 }
5798 
5799 static const bitmask_transtbl iflag_tbl[] = {
5800         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5801         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5802         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5803         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5804         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5805         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5806         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5807         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5808         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5809         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5810         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5811         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5812         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5813         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5814         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5815         { 0, 0, 0, 0 }
5816 };
5817 
5818 static const bitmask_transtbl oflag_tbl[] = {
5819 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5820 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5821 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5822 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5823 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5824 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5825 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5826 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5827 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5828 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5829 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5830 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5831 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5832 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5833 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5834 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5835 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5836 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5837 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5838 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5839 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5840 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5841 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5842 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5843 	{ 0, 0, 0, 0 }
5844 };
5845 
5846 static const bitmask_transtbl cflag_tbl[] = {
5847 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5848 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5849 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5850 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5851 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5852 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5853 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5854 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5855 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5856 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5857 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5858 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5859 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5860 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5861 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5862 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5863 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5864 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5865 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5866 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5867 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5868 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5869 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5870 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5871 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5872 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5873 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5874 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5875 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5876 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5877 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5878 	{ 0, 0, 0, 0 }
5879 };
5880 
5881 static const bitmask_transtbl lflag_tbl[] = {
5882   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5883   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5884   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5885   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5886   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5887   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5888   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5889   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5890   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5891   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5892   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5893   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5894   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5895   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5896   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5897   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5898   { 0, 0, 0, 0 }
5899 };
5900 
5901 static void target_to_host_termios (void *dst, const void *src)
5902 {
5903     struct host_termios *host = dst;
5904     const struct target_termios *target = src;
5905 
5906     host->c_iflag =
5907         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5908     host->c_oflag =
5909         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5910     host->c_cflag =
5911         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5912     host->c_lflag =
5913         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5914     host->c_line = target->c_line;
5915 
5916     memset(host->c_cc, 0, sizeof(host->c_cc));
5917     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5918     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5919     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5920     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5921     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5922     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5923     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5924     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5925     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5926     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5927     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5928     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5929     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5930     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5931     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5932     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5933     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5934 }
5935 
5936 static void host_to_target_termios (void *dst, const void *src)
5937 {
5938     struct target_termios *target = dst;
5939     const struct host_termios *host = src;
5940 
5941     target->c_iflag =
5942         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5943     target->c_oflag =
5944         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5945     target->c_cflag =
5946         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5947     target->c_lflag =
5948         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5949     target->c_line = host->c_line;
5950 
5951     memset(target->c_cc, 0, sizeof(target->c_cc));
5952     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5953     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5954     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5955     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5956     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5957     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5958     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5959     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5960     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5961     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5962     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5963     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5964     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5965     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5966     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5967     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5968     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5969 }
5970 
5971 static const StructEntry struct_termios_def = {
5972     .convert = { host_to_target_termios, target_to_host_termios },
5973     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5974     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5975     .print = print_termios,
5976 };
5977 
5978 static const bitmask_transtbl mmap_flags_tbl[] = {
5979     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5980     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5981     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5982     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5983       MAP_ANONYMOUS, MAP_ANONYMOUS },
5984     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5985       MAP_GROWSDOWN, MAP_GROWSDOWN },
5986     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5987       MAP_DENYWRITE, MAP_DENYWRITE },
5988     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5989       MAP_EXECUTABLE, MAP_EXECUTABLE },
5990     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5991     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5992       MAP_NORESERVE, MAP_NORESERVE },
5993     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5994     /* MAP_STACK had been ignored by the kernel for quite some time.
5995        Recognize it for the target insofar as we do not want to pass
5996        it through to the host.  */
5997     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5998     { 0, 0, 0, 0 }
5999 };
6000 
6001 /*
6002  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6003  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6004  */
6005 #if defined(TARGET_I386)
6006 
6007 /* NOTE: there is really one LDT for all the threads */
6008 static uint8_t *ldt_table;
6009 
6010 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6011 {
6012     int size;
6013     void *p;
6014 
6015     if (!ldt_table)
6016         return 0;
6017     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6018     if (size > bytecount)
6019         size = bytecount;
6020     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6021     if (!p)
6022         return -TARGET_EFAULT;
6023     /* ??? Should this by byteswapped?  */
6024     memcpy(p, ldt_table, size);
6025     unlock_user(p, ptr, size);
6026     return size;
6027 }
6028 
6029 /* XXX: add locking support */
6030 static abi_long write_ldt(CPUX86State *env,
6031                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6032 {
6033     struct target_modify_ldt_ldt_s ldt_info;
6034     struct target_modify_ldt_ldt_s *target_ldt_info;
6035     int seg_32bit, contents, read_exec_only, limit_in_pages;
6036     int seg_not_present, useable, lm;
6037     uint32_t *lp, entry_1, entry_2;
6038 
6039     if (bytecount != sizeof(ldt_info))
6040         return -TARGET_EINVAL;
6041     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6042         return -TARGET_EFAULT;
6043     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6044     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6045     ldt_info.limit = tswap32(target_ldt_info->limit);
6046     ldt_info.flags = tswap32(target_ldt_info->flags);
6047     unlock_user_struct(target_ldt_info, ptr, 0);
6048 
6049     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6050         return -TARGET_EINVAL;
6051     seg_32bit = ldt_info.flags & 1;
6052     contents = (ldt_info.flags >> 1) & 3;
6053     read_exec_only = (ldt_info.flags >> 3) & 1;
6054     limit_in_pages = (ldt_info.flags >> 4) & 1;
6055     seg_not_present = (ldt_info.flags >> 5) & 1;
6056     useable = (ldt_info.flags >> 6) & 1;
6057 #ifdef TARGET_ABI32
6058     lm = 0;
6059 #else
6060     lm = (ldt_info.flags >> 7) & 1;
6061 #endif
6062     if (contents == 3) {
6063         if (oldmode)
6064             return -TARGET_EINVAL;
6065         if (seg_not_present == 0)
6066             return -TARGET_EINVAL;
6067     }
6068     /* allocate the LDT */
6069     if (!ldt_table) {
6070         env->ldt.base = target_mmap(0,
6071                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6072                                     PROT_READ|PROT_WRITE,
6073                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6074         if (env->ldt.base == -1)
6075             return -TARGET_ENOMEM;
6076         memset(g2h_untagged(env->ldt.base), 0,
6077                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6078         env->ldt.limit = 0xffff;
6079         ldt_table = g2h_untagged(env->ldt.base);
6080     }
6081 
6082     /* NOTE: same code as Linux kernel */
6083     /* Allow LDTs to be cleared by the user. */
6084     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6085         if (oldmode ||
6086             (contents == 0		&&
6087              read_exec_only == 1	&&
6088              seg_32bit == 0		&&
6089              limit_in_pages == 0	&&
6090              seg_not_present == 1	&&
6091              useable == 0 )) {
6092             entry_1 = 0;
6093             entry_2 = 0;
6094             goto install;
6095         }
6096     }
6097 
6098     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6099         (ldt_info.limit & 0x0ffff);
6100     entry_2 = (ldt_info.base_addr & 0xff000000) |
6101         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6102         (ldt_info.limit & 0xf0000) |
6103         ((read_exec_only ^ 1) << 9) |
6104         (contents << 10) |
6105         ((seg_not_present ^ 1) << 15) |
6106         (seg_32bit << 22) |
6107         (limit_in_pages << 23) |
6108         (lm << 21) |
6109         0x7000;
6110     if (!oldmode)
6111         entry_2 |= (useable << 20);
6112 
6113     /* Install the new entry ...  */
6114 install:
6115     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6116     lp[0] = tswap32(entry_1);
6117     lp[1] = tswap32(entry_2);
6118     return 0;
6119 }
6120 
6121 /* specific and weird i386 syscalls */
6122 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6123                               unsigned long bytecount)
6124 {
6125     abi_long ret;
6126 
6127     switch (func) {
6128     case 0:
6129         ret = read_ldt(ptr, bytecount);
6130         break;
6131     case 1:
6132         ret = write_ldt(env, ptr, bytecount, 1);
6133         break;
6134     case 0x11:
6135         ret = write_ldt(env, ptr, bytecount, 0);
6136         break;
6137     default:
6138         ret = -TARGET_ENOSYS;
6139         break;
6140     }
6141     return ret;
6142 }
6143 
6144 #if defined(TARGET_ABI32)
6145 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6146 {
6147     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6148     struct target_modify_ldt_ldt_s ldt_info;
6149     struct target_modify_ldt_ldt_s *target_ldt_info;
6150     int seg_32bit, contents, read_exec_only, limit_in_pages;
6151     int seg_not_present, useable, lm;
6152     uint32_t *lp, entry_1, entry_2;
6153     int i;
6154 
6155     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6156     if (!target_ldt_info)
6157         return -TARGET_EFAULT;
6158     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6159     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6160     ldt_info.limit = tswap32(target_ldt_info->limit);
6161     ldt_info.flags = tswap32(target_ldt_info->flags);
6162     if (ldt_info.entry_number == -1) {
6163         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6164             if (gdt_table[i] == 0) {
6165                 ldt_info.entry_number = i;
6166                 target_ldt_info->entry_number = tswap32(i);
6167                 break;
6168             }
6169         }
6170     }
6171     unlock_user_struct(target_ldt_info, ptr, 1);
6172 
6173     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6174         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6175            return -TARGET_EINVAL;
6176     seg_32bit = ldt_info.flags & 1;
6177     contents = (ldt_info.flags >> 1) & 3;
6178     read_exec_only = (ldt_info.flags >> 3) & 1;
6179     limit_in_pages = (ldt_info.flags >> 4) & 1;
6180     seg_not_present = (ldt_info.flags >> 5) & 1;
6181     useable = (ldt_info.flags >> 6) & 1;
6182 #ifdef TARGET_ABI32
6183     lm = 0;
6184 #else
6185     lm = (ldt_info.flags >> 7) & 1;
6186 #endif
6187 
6188     if (contents == 3) {
6189         if (seg_not_present == 0)
6190             return -TARGET_EINVAL;
6191     }
6192 
6193     /* NOTE: same code as Linux kernel */
6194     /* Allow LDTs to be cleared by the user. */
6195     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6196         if ((contents == 0             &&
6197              read_exec_only == 1       &&
6198              seg_32bit == 0            &&
6199              limit_in_pages == 0       &&
6200              seg_not_present == 1      &&
6201              useable == 0 )) {
6202             entry_1 = 0;
6203             entry_2 = 0;
6204             goto install;
6205         }
6206     }
6207 
6208     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6209         (ldt_info.limit & 0x0ffff);
6210     entry_2 = (ldt_info.base_addr & 0xff000000) |
6211         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6212         (ldt_info.limit & 0xf0000) |
6213         ((read_exec_only ^ 1) << 9) |
6214         (contents << 10) |
6215         ((seg_not_present ^ 1) << 15) |
6216         (seg_32bit << 22) |
6217         (limit_in_pages << 23) |
6218         (useable << 20) |
6219         (lm << 21) |
6220         0x7000;
6221 
6222     /* Install the new entry ...  */
6223 install:
6224     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6225     lp[0] = tswap32(entry_1);
6226     lp[1] = tswap32(entry_2);
6227     return 0;
6228 }
6229 
6230 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6231 {
6232     struct target_modify_ldt_ldt_s *target_ldt_info;
6233     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6234     uint32_t base_addr, limit, flags;
6235     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6236     int seg_not_present, useable, lm;
6237     uint32_t *lp, entry_1, entry_2;
6238 
6239     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6240     if (!target_ldt_info)
6241         return -TARGET_EFAULT;
6242     idx = tswap32(target_ldt_info->entry_number);
6243     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6244         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6245         unlock_user_struct(target_ldt_info, ptr, 1);
6246         return -TARGET_EINVAL;
6247     }
6248     lp = (uint32_t *)(gdt_table + idx);
6249     entry_1 = tswap32(lp[0]);
6250     entry_2 = tswap32(lp[1]);
6251 
6252     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6253     contents = (entry_2 >> 10) & 3;
6254     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6255     seg_32bit = (entry_2 >> 22) & 1;
6256     limit_in_pages = (entry_2 >> 23) & 1;
6257     useable = (entry_2 >> 20) & 1;
6258 #ifdef TARGET_ABI32
6259     lm = 0;
6260 #else
6261     lm = (entry_2 >> 21) & 1;
6262 #endif
6263     flags = (seg_32bit << 0) | (contents << 1) |
6264         (read_exec_only << 3) | (limit_in_pages << 4) |
6265         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6266     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6267     base_addr = (entry_1 >> 16) |
6268         (entry_2 & 0xff000000) |
6269         ((entry_2 & 0xff) << 16);
6270     target_ldt_info->base_addr = tswapal(base_addr);
6271     target_ldt_info->limit = tswap32(limit);
6272     target_ldt_info->flags = tswap32(flags);
6273     unlock_user_struct(target_ldt_info, ptr, 1);
6274     return 0;
6275 }
6276 
6277 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6278 {
6279     return -TARGET_ENOSYS;
6280 }
6281 #else
6282 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6283 {
6284     abi_long ret = 0;
6285     abi_ulong val;
6286     int idx;
6287 
6288     switch(code) {
6289     case TARGET_ARCH_SET_GS:
6290     case TARGET_ARCH_SET_FS:
6291         if (code == TARGET_ARCH_SET_GS)
6292             idx = R_GS;
6293         else
6294             idx = R_FS;
6295         cpu_x86_load_seg(env, idx, 0);
6296         env->segs[idx].base = addr;
6297         break;
6298     case TARGET_ARCH_GET_GS:
6299     case TARGET_ARCH_GET_FS:
6300         if (code == TARGET_ARCH_GET_GS)
6301             idx = R_GS;
6302         else
6303             idx = R_FS;
6304         val = env->segs[idx].base;
6305         if (put_user(val, addr, abi_ulong))
6306             ret = -TARGET_EFAULT;
6307         break;
6308     default:
6309         ret = -TARGET_EINVAL;
6310         break;
6311     }
6312     return ret;
6313 }
6314 #endif /* defined(TARGET_ABI32 */
6315 #endif /* defined(TARGET_I386) */
6316 
6317 /*
6318  * These constants are generic.  Supply any that are missing from the host.
6319  */
6320 #ifndef PR_SET_NAME
6321 # define PR_SET_NAME    15
6322 # define PR_GET_NAME    16
6323 #endif
6324 #ifndef PR_SET_FP_MODE
6325 # define PR_SET_FP_MODE 45
6326 # define PR_GET_FP_MODE 46
6327 # define PR_FP_MODE_FR   (1 << 0)
6328 # define PR_FP_MODE_FRE  (1 << 1)
6329 #endif
6330 #ifndef PR_SVE_SET_VL
6331 # define PR_SVE_SET_VL  50
6332 # define PR_SVE_GET_VL  51
6333 # define PR_SVE_VL_LEN_MASK  0xffff
6334 # define PR_SVE_VL_INHERIT   (1 << 17)
6335 #endif
6336 #ifndef PR_PAC_RESET_KEYS
6337 # define PR_PAC_RESET_KEYS  54
6338 # define PR_PAC_APIAKEY   (1 << 0)
6339 # define PR_PAC_APIBKEY   (1 << 1)
6340 # define PR_PAC_APDAKEY   (1 << 2)
6341 # define PR_PAC_APDBKEY   (1 << 3)
6342 # define PR_PAC_APGAKEY   (1 << 4)
6343 #endif
6344 #ifndef PR_SET_TAGGED_ADDR_CTRL
6345 # define PR_SET_TAGGED_ADDR_CTRL 55
6346 # define PR_GET_TAGGED_ADDR_CTRL 56
6347 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6348 #endif
6349 #ifndef PR_MTE_TCF_SHIFT
6350 # define PR_MTE_TCF_SHIFT       1
6351 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6352 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6353 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6354 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6355 # define PR_MTE_TAG_SHIFT       3
6356 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6357 #endif
6358 #ifndef PR_SET_IO_FLUSHER
6359 # define PR_SET_IO_FLUSHER 57
6360 # define PR_GET_IO_FLUSHER 58
6361 #endif
6362 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6363 # define PR_SET_SYSCALL_USER_DISPATCH 59
6364 #endif
6365 
6366 #include "target_prctl.h"
6367 
6368 static abi_long do_prctl_inval0(CPUArchState *env)
6369 {
6370     return -TARGET_EINVAL;
6371 }
6372 
6373 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6374 {
6375     return -TARGET_EINVAL;
6376 }
6377 
6378 #ifndef do_prctl_get_fp_mode
6379 #define do_prctl_get_fp_mode do_prctl_inval0
6380 #endif
6381 #ifndef do_prctl_set_fp_mode
6382 #define do_prctl_set_fp_mode do_prctl_inval1
6383 #endif
6384 #ifndef do_prctl_get_vl
6385 #define do_prctl_get_vl do_prctl_inval0
6386 #endif
6387 #ifndef do_prctl_set_vl
6388 #define do_prctl_set_vl do_prctl_inval1
6389 #endif
6390 #ifndef do_prctl_reset_keys
6391 #define do_prctl_reset_keys do_prctl_inval1
6392 #endif
6393 #ifndef do_prctl_set_tagged_addr_ctrl
6394 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6395 #endif
6396 #ifndef do_prctl_get_tagged_addr_ctrl
6397 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6398 #endif
6399 #ifndef do_prctl_get_unalign
6400 #define do_prctl_get_unalign do_prctl_inval1
6401 #endif
6402 #ifndef do_prctl_set_unalign
6403 #define do_prctl_set_unalign do_prctl_inval1
6404 #endif
6405 
6406 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6407                          abi_long arg3, abi_long arg4, abi_long arg5)
6408 {
6409     abi_long ret;
6410 
6411     switch (option) {
6412     case PR_GET_PDEATHSIG:
6413         {
6414             int deathsig;
6415             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6416                                   arg3, arg4, arg5));
6417             if (!is_error(ret) &&
6418                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6419                 return -TARGET_EFAULT;
6420             }
6421             return ret;
6422         }
6423     case PR_SET_PDEATHSIG:
6424         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6425                                arg3, arg4, arg5));
6426     case PR_GET_NAME:
6427         {
6428             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6429             if (!name) {
6430                 return -TARGET_EFAULT;
6431             }
6432             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6433                                   arg3, arg4, arg5));
6434             unlock_user(name, arg2, 16);
6435             return ret;
6436         }
6437     case PR_SET_NAME:
6438         {
6439             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6440             if (!name) {
6441                 return -TARGET_EFAULT;
6442             }
6443             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6444                                   arg3, arg4, arg5));
6445             unlock_user(name, arg2, 0);
6446             return ret;
6447         }
6448     case PR_GET_FP_MODE:
6449         return do_prctl_get_fp_mode(env);
6450     case PR_SET_FP_MODE:
6451         return do_prctl_set_fp_mode(env, arg2);
6452     case PR_SVE_GET_VL:
6453         return do_prctl_get_vl(env);
6454     case PR_SVE_SET_VL:
6455         return do_prctl_set_vl(env, arg2);
6456     case PR_PAC_RESET_KEYS:
6457         if (arg3 || arg4 || arg5) {
6458             return -TARGET_EINVAL;
6459         }
6460         return do_prctl_reset_keys(env, arg2);
6461     case PR_SET_TAGGED_ADDR_CTRL:
6462         if (arg3 || arg4 || arg5) {
6463             return -TARGET_EINVAL;
6464         }
6465         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6466     case PR_GET_TAGGED_ADDR_CTRL:
6467         if (arg2 || arg3 || arg4 || arg5) {
6468             return -TARGET_EINVAL;
6469         }
6470         return do_prctl_get_tagged_addr_ctrl(env);
6471 
6472     case PR_GET_UNALIGN:
6473         return do_prctl_get_unalign(env, arg2);
6474     case PR_SET_UNALIGN:
6475         return do_prctl_set_unalign(env, arg2);
6476 
6477     case PR_CAP_AMBIENT:
6478     case PR_CAPBSET_READ:
6479     case PR_CAPBSET_DROP:
6480     case PR_GET_DUMPABLE:
6481     case PR_SET_DUMPABLE:
6482     case PR_GET_KEEPCAPS:
6483     case PR_SET_KEEPCAPS:
6484     case PR_GET_SECUREBITS:
6485     case PR_SET_SECUREBITS:
6486     case PR_GET_TIMING:
6487     case PR_SET_TIMING:
6488     case PR_GET_TIMERSLACK:
6489     case PR_SET_TIMERSLACK:
6490     case PR_MCE_KILL:
6491     case PR_MCE_KILL_GET:
6492     case PR_GET_NO_NEW_PRIVS:
6493     case PR_SET_NO_NEW_PRIVS:
6494     case PR_GET_IO_FLUSHER:
6495     case PR_SET_IO_FLUSHER:
6496         /* Some prctl options have no pointer arguments and we can pass on. */
6497         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6498 
6499     case PR_GET_CHILD_SUBREAPER:
6500     case PR_SET_CHILD_SUBREAPER:
6501     case PR_GET_SPECULATION_CTRL:
6502     case PR_SET_SPECULATION_CTRL:
6503     case PR_GET_TID_ADDRESS:
6504         /* TODO */
6505         return -TARGET_EINVAL;
6506 
6507     case PR_GET_FPEXC:
6508     case PR_SET_FPEXC:
6509         /* Was used for SPE on PowerPC. */
6510         return -TARGET_EINVAL;
6511 
6512     case PR_GET_ENDIAN:
6513     case PR_SET_ENDIAN:
6514     case PR_GET_FPEMU:
6515     case PR_SET_FPEMU:
6516     case PR_SET_MM:
6517     case PR_GET_SECCOMP:
6518     case PR_SET_SECCOMP:
6519     case PR_SET_SYSCALL_USER_DISPATCH:
6520     case PR_GET_THP_DISABLE:
6521     case PR_SET_THP_DISABLE:
6522     case PR_GET_TSC:
6523     case PR_SET_TSC:
6524         /* Disable to prevent the target disabling stuff we need. */
6525         return -TARGET_EINVAL;
6526 
6527     default:
6528         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6529                       option);
6530         return -TARGET_EINVAL;
6531     }
6532 }
6533 
6534 #define NEW_STACK_SIZE 0x40000
6535 
6536 
6537 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6538 typedef struct {
6539     CPUArchState *env;
6540     pthread_mutex_t mutex;
6541     pthread_cond_t cond;
6542     pthread_t thread;
6543     uint32_t tid;
6544     abi_ulong child_tidptr;
6545     abi_ulong parent_tidptr;
6546     sigset_t sigmask;
6547 } new_thread_info;
6548 
6549 static void *clone_func(void *arg)
6550 {
6551     new_thread_info *info = arg;
6552     CPUArchState *env;
6553     CPUState *cpu;
6554     TaskState *ts;
6555 
6556     rcu_register_thread();
6557     tcg_register_thread();
6558     env = info->env;
6559     cpu = env_cpu(env);
6560     thread_cpu = cpu;
6561     ts = (TaskState *)cpu->opaque;
6562     info->tid = sys_gettid();
6563     task_settid(ts);
6564     if (info->child_tidptr)
6565         put_user_u32(info->tid, info->child_tidptr);
6566     if (info->parent_tidptr)
6567         put_user_u32(info->tid, info->parent_tidptr);
6568     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6569     /* Enable signals.  */
6570     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6571     /* Signal to the parent that we're ready.  */
6572     pthread_mutex_lock(&info->mutex);
6573     pthread_cond_broadcast(&info->cond);
6574     pthread_mutex_unlock(&info->mutex);
6575     /* Wait until the parent has finished initializing the tls state.  */
6576     pthread_mutex_lock(&clone_lock);
6577     pthread_mutex_unlock(&clone_lock);
6578     cpu_loop(env);
6579     /* never exits */
6580     return NULL;
6581 }
6582 
6583 /* do_fork() Must return host values and target errnos (unlike most
6584    do_*() functions). */
6585 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6586                    abi_ulong parent_tidptr, target_ulong newtls,
6587                    abi_ulong child_tidptr)
6588 {
6589     CPUState *cpu = env_cpu(env);
6590     int ret;
6591     TaskState *ts;
6592     CPUState *new_cpu;
6593     CPUArchState *new_env;
6594     sigset_t sigmask;
6595 
6596     flags &= ~CLONE_IGNORED_FLAGS;
6597 
6598     /* Emulate vfork() with fork() */
6599     if (flags & CLONE_VFORK)
6600         flags &= ~(CLONE_VFORK | CLONE_VM);
6601 
6602     if (flags & CLONE_VM) {
6603         TaskState *parent_ts = (TaskState *)cpu->opaque;
6604         new_thread_info info;
6605         pthread_attr_t attr;
6606 
6607         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6608             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6609             return -TARGET_EINVAL;
6610         }
6611 
6612         ts = g_new0(TaskState, 1);
6613         init_task_state(ts);
6614 
6615         /* Grab a mutex so that thread setup appears atomic.  */
6616         pthread_mutex_lock(&clone_lock);
6617 
6618         /*
6619          * If this is our first additional thread, we need to ensure we
6620          * generate code for parallel execution and flush old translations.
6621          * Do this now so that the copy gets CF_PARALLEL too.
6622          */
6623         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6624             cpu->tcg_cflags |= CF_PARALLEL;
6625             tb_flush(cpu);
6626         }
6627 
6628         /* we create a new CPU instance. */
6629         new_env = cpu_copy(env);
6630         /* Init regs that differ from the parent.  */
6631         cpu_clone_regs_child(new_env, newsp, flags);
6632         cpu_clone_regs_parent(env, flags);
6633         new_cpu = env_cpu(new_env);
6634         new_cpu->opaque = ts;
6635         ts->bprm = parent_ts->bprm;
6636         ts->info = parent_ts->info;
6637         ts->signal_mask = parent_ts->signal_mask;
6638 
6639         if (flags & CLONE_CHILD_CLEARTID) {
6640             ts->child_tidptr = child_tidptr;
6641         }
6642 
6643         if (flags & CLONE_SETTLS) {
6644             cpu_set_tls (new_env, newtls);
6645         }
6646 
6647         memset(&info, 0, sizeof(info));
6648         pthread_mutex_init(&info.mutex, NULL);
6649         pthread_mutex_lock(&info.mutex);
6650         pthread_cond_init(&info.cond, NULL);
6651         info.env = new_env;
6652         if (flags & CLONE_CHILD_SETTID) {
6653             info.child_tidptr = child_tidptr;
6654         }
6655         if (flags & CLONE_PARENT_SETTID) {
6656             info.parent_tidptr = parent_tidptr;
6657         }
6658 
6659         ret = pthread_attr_init(&attr);
6660         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6661         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6662         /* It is not safe to deliver signals until the child has finished
6663            initializing, so temporarily block all signals.  */
6664         sigfillset(&sigmask);
6665         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6666         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6667 
6668         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6669         /* TODO: Free new CPU state if thread creation failed.  */
6670 
6671         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6672         pthread_attr_destroy(&attr);
6673         if (ret == 0) {
6674             /* Wait for the child to initialize.  */
6675             pthread_cond_wait(&info.cond, &info.mutex);
6676             ret = info.tid;
6677         } else {
6678             ret = -1;
6679         }
6680         pthread_mutex_unlock(&info.mutex);
6681         pthread_cond_destroy(&info.cond);
6682         pthread_mutex_destroy(&info.mutex);
6683         pthread_mutex_unlock(&clone_lock);
6684     } else {
6685         /* if no CLONE_VM, we consider it is a fork */
6686         if (flags & CLONE_INVALID_FORK_FLAGS) {
6687             return -TARGET_EINVAL;
6688         }
6689 
6690         /* We can't support custom termination signals */
6691         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6692             return -TARGET_EINVAL;
6693         }
6694 
6695         if (block_signals()) {
6696             return -QEMU_ERESTARTSYS;
6697         }
6698 
6699         fork_start();
6700         ret = fork();
6701         if (ret == 0) {
6702             /* Child Process.  */
6703             cpu_clone_regs_child(env, newsp, flags);
6704             fork_end(1);
6705             /* There is a race condition here.  The parent process could
6706                theoretically read the TID in the child process before the child
6707                tid is set.  This would require using either ptrace
6708                (not implemented) or having *_tidptr to point at a shared memory
6709                mapping.  We can't repeat the spinlock hack used above because
6710                the child process gets its own copy of the lock.  */
6711             if (flags & CLONE_CHILD_SETTID)
6712                 put_user_u32(sys_gettid(), child_tidptr);
6713             if (flags & CLONE_PARENT_SETTID)
6714                 put_user_u32(sys_gettid(), parent_tidptr);
6715             ts = (TaskState *)cpu->opaque;
6716             if (flags & CLONE_SETTLS)
6717                 cpu_set_tls (env, newtls);
6718             if (flags & CLONE_CHILD_CLEARTID)
6719                 ts->child_tidptr = child_tidptr;
6720         } else {
6721             cpu_clone_regs_parent(env, flags);
6722             fork_end(0);
6723         }
6724     }
6725     return ret;
6726 }
6727 
6728 /* warning : doesn't handle linux specific flags... */
6729 static int target_to_host_fcntl_cmd(int cmd)
6730 {
6731     int ret;
6732 
6733     switch(cmd) {
6734     case TARGET_F_DUPFD:
6735     case TARGET_F_GETFD:
6736     case TARGET_F_SETFD:
6737     case TARGET_F_GETFL:
6738     case TARGET_F_SETFL:
6739     case TARGET_F_OFD_GETLK:
6740     case TARGET_F_OFD_SETLK:
6741     case TARGET_F_OFD_SETLKW:
6742         ret = cmd;
6743         break;
6744     case TARGET_F_GETLK:
6745         ret = F_GETLK64;
6746         break;
6747     case TARGET_F_SETLK:
6748         ret = F_SETLK64;
6749         break;
6750     case TARGET_F_SETLKW:
6751         ret = F_SETLKW64;
6752         break;
6753     case TARGET_F_GETOWN:
6754         ret = F_GETOWN;
6755         break;
6756     case TARGET_F_SETOWN:
6757         ret = F_SETOWN;
6758         break;
6759     case TARGET_F_GETSIG:
6760         ret = F_GETSIG;
6761         break;
6762     case TARGET_F_SETSIG:
6763         ret = F_SETSIG;
6764         break;
6765 #if TARGET_ABI_BITS == 32
6766     case TARGET_F_GETLK64:
6767         ret = F_GETLK64;
6768         break;
6769     case TARGET_F_SETLK64:
6770         ret = F_SETLK64;
6771         break;
6772     case TARGET_F_SETLKW64:
6773         ret = F_SETLKW64;
6774         break;
6775 #endif
6776     case TARGET_F_SETLEASE:
6777         ret = F_SETLEASE;
6778         break;
6779     case TARGET_F_GETLEASE:
6780         ret = F_GETLEASE;
6781         break;
6782 #ifdef F_DUPFD_CLOEXEC
6783     case TARGET_F_DUPFD_CLOEXEC:
6784         ret = F_DUPFD_CLOEXEC;
6785         break;
6786 #endif
6787     case TARGET_F_NOTIFY:
6788         ret = F_NOTIFY;
6789         break;
6790 #ifdef F_GETOWN_EX
6791     case TARGET_F_GETOWN_EX:
6792         ret = F_GETOWN_EX;
6793         break;
6794 #endif
6795 #ifdef F_SETOWN_EX
6796     case TARGET_F_SETOWN_EX:
6797         ret = F_SETOWN_EX;
6798         break;
6799 #endif
6800 #ifdef F_SETPIPE_SZ
6801     case TARGET_F_SETPIPE_SZ:
6802         ret = F_SETPIPE_SZ;
6803         break;
6804     case TARGET_F_GETPIPE_SZ:
6805         ret = F_GETPIPE_SZ;
6806         break;
6807 #endif
6808 #ifdef F_ADD_SEALS
6809     case TARGET_F_ADD_SEALS:
6810         ret = F_ADD_SEALS;
6811         break;
6812     case TARGET_F_GET_SEALS:
6813         ret = F_GET_SEALS;
6814         break;
6815 #endif
6816     default:
6817         ret = -TARGET_EINVAL;
6818         break;
6819     }
6820 
6821 #if defined(__powerpc64__)
6822     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6823      * is not supported by kernel. The glibc fcntl call actually adjusts
6824      * them to 5, 6 and 7 before making the syscall(). Since we make the
6825      * syscall directly, adjust to what is supported by the kernel.
6826      */
6827     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6828         ret -= F_GETLK64 - 5;
6829     }
6830 #endif
6831 
6832     return ret;
6833 }
6834 
6835 #define FLOCK_TRANSTBL \
6836     switch (type) { \
6837     TRANSTBL_CONVERT(F_RDLCK); \
6838     TRANSTBL_CONVERT(F_WRLCK); \
6839     TRANSTBL_CONVERT(F_UNLCK); \
6840     }
6841 
6842 static int target_to_host_flock(int type)
6843 {
6844 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6845     FLOCK_TRANSTBL
6846 #undef  TRANSTBL_CONVERT
6847     return -TARGET_EINVAL;
6848 }
6849 
6850 static int host_to_target_flock(int type)
6851 {
6852 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6853     FLOCK_TRANSTBL
6854 #undef  TRANSTBL_CONVERT
6855     /* if we don't know how to convert the value coming
6856      * from the host we copy to the target field as-is
6857      */
6858     return type;
6859 }
6860 
6861 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6862                                             abi_ulong target_flock_addr)
6863 {
6864     struct target_flock *target_fl;
6865     int l_type;
6866 
6867     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6868         return -TARGET_EFAULT;
6869     }
6870 
6871     __get_user(l_type, &target_fl->l_type);
6872     l_type = target_to_host_flock(l_type);
6873     if (l_type < 0) {
6874         return l_type;
6875     }
6876     fl->l_type = l_type;
6877     __get_user(fl->l_whence, &target_fl->l_whence);
6878     __get_user(fl->l_start, &target_fl->l_start);
6879     __get_user(fl->l_len, &target_fl->l_len);
6880     __get_user(fl->l_pid, &target_fl->l_pid);
6881     unlock_user_struct(target_fl, target_flock_addr, 0);
6882     return 0;
6883 }
6884 
6885 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6886                                           const struct flock64 *fl)
6887 {
6888     struct target_flock *target_fl;
6889     short l_type;
6890 
6891     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6892         return -TARGET_EFAULT;
6893     }
6894 
6895     l_type = host_to_target_flock(fl->l_type);
6896     __put_user(l_type, &target_fl->l_type);
6897     __put_user(fl->l_whence, &target_fl->l_whence);
6898     __put_user(fl->l_start, &target_fl->l_start);
6899     __put_user(fl->l_len, &target_fl->l_len);
6900     __put_user(fl->l_pid, &target_fl->l_pid);
6901     unlock_user_struct(target_fl, target_flock_addr, 1);
6902     return 0;
6903 }
6904 
6905 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6906 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6907 
6908 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6909 struct target_oabi_flock64 {
6910     abi_short l_type;
6911     abi_short l_whence;
6912     abi_llong l_start;
6913     abi_llong l_len;
6914     abi_int   l_pid;
6915 } QEMU_PACKED;
6916 
6917 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6918                                                    abi_ulong target_flock_addr)
6919 {
6920     struct target_oabi_flock64 *target_fl;
6921     int l_type;
6922 
6923     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6924         return -TARGET_EFAULT;
6925     }
6926 
6927     __get_user(l_type, &target_fl->l_type);
6928     l_type = target_to_host_flock(l_type);
6929     if (l_type < 0) {
6930         return l_type;
6931     }
6932     fl->l_type = l_type;
6933     __get_user(fl->l_whence, &target_fl->l_whence);
6934     __get_user(fl->l_start, &target_fl->l_start);
6935     __get_user(fl->l_len, &target_fl->l_len);
6936     __get_user(fl->l_pid, &target_fl->l_pid);
6937     unlock_user_struct(target_fl, target_flock_addr, 0);
6938     return 0;
6939 }
6940 
6941 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6942                                                  const struct flock64 *fl)
6943 {
6944     struct target_oabi_flock64 *target_fl;
6945     short l_type;
6946 
6947     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6948         return -TARGET_EFAULT;
6949     }
6950 
6951     l_type = host_to_target_flock(fl->l_type);
6952     __put_user(l_type, &target_fl->l_type);
6953     __put_user(fl->l_whence, &target_fl->l_whence);
6954     __put_user(fl->l_start, &target_fl->l_start);
6955     __put_user(fl->l_len, &target_fl->l_len);
6956     __put_user(fl->l_pid, &target_fl->l_pid);
6957     unlock_user_struct(target_fl, target_flock_addr, 1);
6958     return 0;
6959 }
6960 #endif
6961 
6962 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6963                                               abi_ulong target_flock_addr)
6964 {
6965     struct target_flock64 *target_fl;
6966     int l_type;
6967 
6968     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6969         return -TARGET_EFAULT;
6970     }
6971 
6972     __get_user(l_type, &target_fl->l_type);
6973     l_type = target_to_host_flock(l_type);
6974     if (l_type < 0) {
6975         return l_type;
6976     }
6977     fl->l_type = l_type;
6978     __get_user(fl->l_whence, &target_fl->l_whence);
6979     __get_user(fl->l_start, &target_fl->l_start);
6980     __get_user(fl->l_len, &target_fl->l_len);
6981     __get_user(fl->l_pid, &target_fl->l_pid);
6982     unlock_user_struct(target_fl, target_flock_addr, 0);
6983     return 0;
6984 }
6985 
6986 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6987                                             const struct flock64 *fl)
6988 {
6989     struct target_flock64 *target_fl;
6990     short l_type;
6991 
6992     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6993         return -TARGET_EFAULT;
6994     }
6995 
6996     l_type = host_to_target_flock(fl->l_type);
6997     __put_user(l_type, &target_fl->l_type);
6998     __put_user(fl->l_whence, &target_fl->l_whence);
6999     __put_user(fl->l_start, &target_fl->l_start);
7000     __put_user(fl->l_len, &target_fl->l_len);
7001     __put_user(fl->l_pid, &target_fl->l_pid);
7002     unlock_user_struct(target_fl, target_flock_addr, 1);
7003     return 0;
7004 }
7005 
7006 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7007 {
7008     struct flock64 fl64;
7009 #ifdef F_GETOWN_EX
7010     struct f_owner_ex fox;
7011     struct target_f_owner_ex *target_fox;
7012 #endif
7013     abi_long ret;
7014     int host_cmd = target_to_host_fcntl_cmd(cmd);
7015 
7016     if (host_cmd == -TARGET_EINVAL)
7017 	    return host_cmd;
7018 
7019     switch(cmd) {
7020     case TARGET_F_GETLK:
7021         ret = copy_from_user_flock(&fl64, arg);
7022         if (ret) {
7023             return ret;
7024         }
7025         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7026         if (ret == 0) {
7027             ret = copy_to_user_flock(arg, &fl64);
7028         }
7029         break;
7030 
7031     case TARGET_F_SETLK:
7032     case TARGET_F_SETLKW:
7033         ret = copy_from_user_flock(&fl64, arg);
7034         if (ret) {
7035             return ret;
7036         }
7037         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7038         break;
7039 
7040     case TARGET_F_GETLK64:
7041     case TARGET_F_OFD_GETLK:
7042         ret = copy_from_user_flock64(&fl64, arg);
7043         if (ret) {
7044             return ret;
7045         }
7046         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7047         if (ret == 0) {
7048             ret = copy_to_user_flock64(arg, &fl64);
7049         }
7050         break;
7051     case TARGET_F_SETLK64:
7052     case TARGET_F_SETLKW64:
7053     case TARGET_F_OFD_SETLK:
7054     case TARGET_F_OFD_SETLKW:
7055         ret = copy_from_user_flock64(&fl64, arg);
7056         if (ret) {
7057             return ret;
7058         }
7059         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7060         break;
7061 
7062     case TARGET_F_GETFL:
7063         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7064         if (ret >= 0) {
7065             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7066         }
7067         break;
7068 
7069     case TARGET_F_SETFL:
7070         ret = get_errno(safe_fcntl(fd, host_cmd,
7071                                    target_to_host_bitmask(arg,
7072                                                           fcntl_flags_tbl)));
7073         break;
7074 
7075 #ifdef F_GETOWN_EX
7076     case TARGET_F_GETOWN_EX:
7077         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7078         if (ret >= 0) {
7079             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7080                 return -TARGET_EFAULT;
7081             target_fox->type = tswap32(fox.type);
7082             target_fox->pid = tswap32(fox.pid);
7083             unlock_user_struct(target_fox, arg, 1);
7084         }
7085         break;
7086 #endif
7087 
7088 #ifdef F_SETOWN_EX
7089     case TARGET_F_SETOWN_EX:
7090         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7091             return -TARGET_EFAULT;
7092         fox.type = tswap32(target_fox->type);
7093         fox.pid = tswap32(target_fox->pid);
7094         unlock_user_struct(target_fox, arg, 0);
7095         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7096         break;
7097 #endif
7098 
7099     case TARGET_F_SETSIG:
7100         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7101         break;
7102 
7103     case TARGET_F_GETSIG:
7104         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7105         break;
7106 
7107     case TARGET_F_SETOWN:
7108     case TARGET_F_GETOWN:
7109     case TARGET_F_SETLEASE:
7110     case TARGET_F_GETLEASE:
7111     case TARGET_F_SETPIPE_SZ:
7112     case TARGET_F_GETPIPE_SZ:
7113     case TARGET_F_ADD_SEALS:
7114     case TARGET_F_GET_SEALS:
7115         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7116         break;
7117 
7118     default:
7119         ret = get_errno(safe_fcntl(fd, cmd, arg));
7120         break;
7121     }
7122     return ret;
7123 }
7124 
7125 #ifdef USE_UID16
7126 
7127 static inline int high2lowuid(int uid)
7128 {
7129     if (uid > 65535)
7130         return 65534;
7131     else
7132         return uid;
7133 }
7134 
7135 static inline int high2lowgid(int gid)
7136 {
7137     if (gid > 65535)
7138         return 65534;
7139     else
7140         return gid;
7141 }
7142 
7143 static inline int low2highuid(int uid)
7144 {
7145     if ((int16_t)uid == -1)
7146         return -1;
7147     else
7148         return uid;
7149 }
7150 
7151 static inline int low2highgid(int gid)
7152 {
7153     if ((int16_t)gid == -1)
7154         return -1;
7155     else
7156         return gid;
7157 }
7158 static inline int tswapid(int id)
7159 {
7160     return tswap16(id);
7161 }
7162 
7163 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7164 
7165 #else /* !USE_UID16 */
7166 static inline int high2lowuid(int uid)
7167 {
7168     return uid;
7169 }
7170 static inline int high2lowgid(int gid)
7171 {
7172     return gid;
7173 }
7174 static inline int low2highuid(int uid)
7175 {
7176     return uid;
7177 }
7178 static inline int low2highgid(int gid)
7179 {
7180     return gid;
7181 }
7182 static inline int tswapid(int id)
7183 {
7184     return tswap32(id);
7185 }
7186 
7187 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7188 
7189 #endif /* USE_UID16 */
7190 
7191 /* We must do direct syscalls for setting UID/GID, because we want to
7192  * implement the Linux system call semantics of "change only for this thread",
7193  * not the libc/POSIX semantics of "change for all threads in process".
7194  * (See http://ewontfix.com/17/ for more details.)
7195  * We use the 32-bit version of the syscalls if present; if it is not
7196  * then either the host architecture supports 32-bit UIDs natively with
7197  * the standard syscall, or the 16-bit UID is the best we can do.
7198  */
7199 #ifdef __NR_setuid32
7200 #define __NR_sys_setuid __NR_setuid32
7201 #else
7202 #define __NR_sys_setuid __NR_setuid
7203 #endif
7204 #ifdef __NR_setgid32
7205 #define __NR_sys_setgid __NR_setgid32
7206 #else
7207 #define __NR_sys_setgid __NR_setgid
7208 #endif
7209 #ifdef __NR_setresuid32
7210 #define __NR_sys_setresuid __NR_setresuid32
7211 #else
7212 #define __NR_sys_setresuid __NR_setresuid
7213 #endif
7214 #ifdef __NR_setresgid32
7215 #define __NR_sys_setresgid __NR_setresgid32
7216 #else
7217 #define __NR_sys_setresgid __NR_setresgid
7218 #endif
7219 
7220 _syscall1(int, sys_setuid, uid_t, uid)
7221 _syscall1(int, sys_setgid, gid_t, gid)
7222 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7223 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7224 
7225 void syscall_init(void)
7226 {
7227     IOCTLEntry *ie;
7228     const argtype *arg_type;
7229     int size;
7230 
7231     thunk_init(STRUCT_MAX);
7232 
7233 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7234 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7235 #include "syscall_types.h"
7236 #undef STRUCT
7237 #undef STRUCT_SPECIAL
7238 
7239     /* we patch the ioctl size if necessary. We rely on the fact that
7240        no ioctl has all the bits at '1' in the size field */
7241     ie = ioctl_entries;
7242     while (ie->target_cmd != 0) {
7243         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7244             TARGET_IOC_SIZEMASK) {
7245             arg_type = ie->arg_type;
7246             if (arg_type[0] != TYPE_PTR) {
7247                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7248                         ie->target_cmd);
7249                 exit(1);
7250             }
7251             arg_type++;
7252             size = thunk_type_size(arg_type, 0);
7253             ie->target_cmd = (ie->target_cmd &
7254                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7255                 (size << TARGET_IOC_SIZESHIFT);
7256         }
7257 
7258         /* automatic consistency check if same arch */
7259 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7260     (defined(__x86_64__) && defined(TARGET_X86_64))
7261         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7262             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7263                     ie->name, ie->target_cmd, ie->host_cmd);
7264         }
7265 #endif
7266         ie++;
7267     }
7268 }
7269 
7270 #ifdef TARGET_NR_truncate64
7271 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7272                                          abi_long arg2,
7273                                          abi_long arg3,
7274                                          abi_long arg4)
7275 {
7276     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7277         arg2 = arg3;
7278         arg3 = arg4;
7279     }
7280     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7281 }
7282 #endif
7283 
7284 #ifdef TARGET_NR_ftruncate64
7285 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7286                                           abi_long arg2,
7287                                           abi_long arg3,
7288                                           abi_long arg4)
7289 {
7290     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7291         arg2 = arg3;
7292         arg3 = arg4;
7293     }
7294     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7295 }
7296 #endif
7297 
7298 #if defined(TARGET_NR_timer_settime) || \
7299     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7300 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7301                                                  abi_ulong target_addr)
7302 {
7303     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7304                                 offsetof(struct target_itimerspec,
7305                                          it_interval)) ||
7306         target_to_host_timespec(&host_its->it_value, target_addr +
7307                                 offsetof(struct target_itimerspec,
7308                                          it_value))) {
7309         return -TARGET_EFAULT;
7310     }
7311 
7312     return 0;
7313 }
7314 #endif
7315 
7316 #if defined(TARGET_NR_timer_settime64) || \
7317     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7318 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7319                                                    abi_ulong target_addr)
7320 {
7321     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7322                                   offsetof(struct target__kernel_itimerspec,
7323                                            it_interval)) ||
7324         target_to_host_timespec64(&host_its->it_value, target_addr +
7325                                   offsetof(struct target__kernel_itimerspec,
7326                                            it_value))) {
7327         return -TARGET_EFAULT;
7328     }
7329 
7330     return 0;
7331 }
7332 #endif
7333 
7334 #if ((defined(TARGET_NR_timerfd_gettime) || \
7335       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7336       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7337 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7338                                                  struct itimerspec *host_its)
7339 {
7340     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7341                                                        it_interval),
7342                                 &host_its->it_interval) ||
7343         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7344                                                        it_value),
7345                                 &host_its->it_value)) {
7346         return -TARGET_EFAULT;
7347     }
7348     return 0;
7349 }
7350 #endif
7351 
7352 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7353       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7354       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7355 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7356                                                    struct itimerspec *host_its)
7357 {
7358     if (host_to_target_timespec64(target_addr +
7359                                   offsetof(struct target__kernel_itimerspec,
7360                                            it_interval),
7361                                   &host_its->it_interval) ||
7362         host_to_target_timespec64(target_addr +
7363                                   offsetof(struct target__kernel_itimerspec,
7364                                            it_value),
7365                                   &host_its->it_value)) {
7366         return -TARGET_EFAULT;
7367     }
7368     return 0;
7369 }
7370 #endif
7371 
7372 #if defined(TARGET_NR_adjtimex) || \
7373     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7374 static inline abi_long target_to_host_timex(struct timex *host_tx,
7375                                             abi_long target_addr)
7376 {
7377     struct target_timex *target_tx;
7378 
7379     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7380         return -TARGET_EFAULT;
7381     }
7382 
7383     __get_user(host_tx->modes, &target_tx->modes);
7384     __get_user(host_tx->offset, &target_tx->offset);
7385     __get_user(host_tx->freq, &target_tx->freq);
7386     __get_user(host_tx->maxerror, &target_tx->maxerror);
7387     __get_user(host_tx->esterror, &target_tx->esterror);
7388     __get_user(host_tx->status, &target_tx->status);
7389     __get_user(host_tx->constant, &target_tx->constant);
7390     __get_user(host_tx->precision, &target_tx->precision);
7391     __get_user(host_tx->tolerance, &target_tx->tolerance);
7392     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7393     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7394     __get_user(host_tx->tick, &target_tx->tick);
7395     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7396     __get_user(host_tx->jitter, &target_tx->jitter);
7397     __get_user(host_tx->shift, &target_tx->shift);
7398     __get_user(host_tx->stabil, &target_tx->stabil);
7399     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7400     __get_user(host_tx->calcnt, &target_tx->calcnt);
7401     __get_user(host_tx->errcnt, &target_tx->errcnt);
7402     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7403     __get_user(host_tx->tai, &target_tx->tai);
7404 
7405     unlock_user_struct(target_tx, target_addr, 0);
7406     return 0;
7407 }
7408 
7409 static inline abi_long host_to_target_timex(abi_long target_addr,
7410                                             struct timex *host_tx)
7411 {
7412     struct target_timex *target_tx;
7413 
7414     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7415         return -TARGET_EFAULT;
7416     }
7417 
7418     __put_user(host_tx->modes, &target_tx->modes);
7419     __put_user(host_tx->offset, &target_tx->offset);
7420     __put_user(host_tx->freq, &target_tx->freq);
7421     __put_user(host_tx->maxerror, &target_tx->maxerror);
7422     __put_user(host_tx->esterror, &target_tx->esterror);
7423     __put_user(host_tx->status, &target_tx->status);
7424     __put_user(host_tx->constant, &target_tx->constant);
7425     __put_user(host_tx->precision, &target_tx->precision);
7426     __put_user(host_tx->tolerance, &target_tx->tolerance);
7427     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7428     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7429     __put_user(host_tx->tick, &target_tx->tick);
7430     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7431     __put_user(host_tx->jitter, &target_tx->jitter);
7432     __put_user(host_tx->shift, &target_tx->shift);
7433     __put_user(host_tx->stabil, &target_tx->stabil);
7434     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7435     __put_user(host_tx->calcnt, &target_tx->calcnt);
7436     __put_user(host_tx->errcnt, &target_tx->errcnt);
7437     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7438     __put_user(host_tx->tai, &target_tx->tai);
7439 
7440     unlock_user_struct(target_tx, target_addr, 1);
7441     return 0;
7442 }
7443 #endif
7444 
7445 
7446 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7447 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7448                                               abi_long target_addr)
7449 {
7450     struct target__kernel_timex *target_tx;
7451 
7452     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7453                                  offsetof(struct target__kernel_timex,
7454                                           time))) {
7455         return -TARGET_EFAULT;
7456     }
7457 
7458     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7459         return -TARGET_EFAULT;
7460     }
7461 
7462     __get_user(host_tx->modes, &target_tx->modes);
7463     __get_user(host_tx->offset, &target_tx->offset);
7464     __get_user(host_tx->freq, &target_tx->freq);
7465     __get_user(host_tx->maxerror, &target_tx->maxerror);
7466     __get_user(host_tx->esterror, &target_tx->esterror);
7467     __get_user(host_tx->status, &target_tx->status);
7468     __get_user(host_tx->constant, &target_tx->constant);
7469     __get_user(host_tx->precision, &target_tx->precision);
7470     __get_user(host_tx->tolerance, &target_tx->tolerance);
7471     __get_user(host_tx->tick, &target_tx->tick);
7472     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7473     __get_user(host_tx->jitter, &target_tx->jitter);
7474     __get_user(host_tx->shift, &target_tx->shift);
7475     __get_user(host_tx->stabil, &target_tx->stabil);
7476     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7477     __get_user(host_tx->calcnt, &target_tx->calcnt);
7478     __get_user(host_tx->errcnt, &target_tx->errcnt);
7479     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7480     __get_user(host_tx->tai, &target_tx->tai);
7481 
7482     unlock_user_struct(target_tx, target_addr, 0);
7483     return 0;
7484 }
7485 
7486 static inline abi_long host_to_target_timex64(abi_long target_addr,
7487                                               struct timex *host_tx)
7488 {
7489     struct target__kernel_timex *target_tx;
7490 
7491    if (copy_to_user_timeval64(target_addr +
7492                               offsetof(struct target__kernel_timex, time),
7493                               &host_tx->time)) {
7494         return -TARGET_EFAULT;
7495     }
7496 
7497     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7498         return -TARGET_EFAULT;
7499     }
7500 
7501     __put_user(host_tx->modes, &target_tx->modes);
7502     __put_user(host_tx->offset, &target_tx->offset);
7503     __put_user(host_tx->freq, &target_tx->freq);
7504     __put_user(host_tx->maxerror, &target_tx->maxerror);
7505     __put_user(host_tx->esterror, &target_tx->esterror);
7506     __put_user(host_tx->status, &target_tx->status);
7507     __put_user(host_tx->constant, &target_tx->constant);
7508     __put_user(host_tx->precision, &target_tx->precision);
7509     __put_user(host_tx->tolerance, &target_tx->tolerance);
7510     __put_user(host_tx->tick, &target_tx->tick);
7511     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7512     __put_user(host_tx->jitter, &target_tx->jitter);
7513     __put_user(host_tx->shift, &target_tx->shift);
7514     __put_user(host_tx->stabil, &target_tx->stabil);
7515     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7516     __put_user(host_tx->calcnt, &target_tx->calcnt);
7517     __put_user(host_tx->errcnt, &target_tx->errcnt);
7518     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7519     __put_user(host_tx->tai, &target_tx->tai);
7520 
7521     unlock_user_struct(target_tx, target_addr, 1);
7522     return 0;
7523 }
7524 #endif
7525 
7526 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7527 #define sigev_notify_thread_id _sigev_un._tid
7528 #endif
7529 
7530 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7531                                                abi_ulong target_addr)
7532 {
7533     struct target_sigevent *target_sevp;
7534 
7535     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7536         return -TARGET_EFAULT;
7537     }
7538 
7539     /* This union is awkward on 64 bit systems because it has a 32 bit
7540      * integer and a pointer in it; we follow the conversion approach
7541      * used for handling sigval types in signal.c so the guest should get
7542      * the correct value back even if we did a 64 bit byteswap and it's
7543      * using the 32 bit integer.
7544      */
7545     host_sevp->sigev_value.sival_ptr =
7546         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7547     host_sevp->sigev_signo =
7548         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7549     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7550     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7551 
7552     unlock_user_struct(target_sevp, target_addr, 1);
7553     return 0;
7554 }
7555 
7556 #if defined(TARGET_NR_mlockall)
7557 static inline int target_to_host_mlockall_arg(int arg)
7558 {
7559     int result = 0;
7560 
7561     if (arg & TARGET_MCL_CURRENT) {
7562         result |= MCL_CURRENT;
7563     }
7564     if (arg & TARGET_MCL_FUTURE) {
7565         result |= MCL_FUTURE;
7566     }
7567 #ifdef MCL_ONFAULT
7568     if (arg & TARGET_MCL_ONFAULT) {
7569         result |= MCL_ONFAULT;
7570     }
7571 #endif
7572 
7573     return result;
7574 }
7575 #endif
7576 
7577 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7578      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7579      defined(TARGET_NR_newfstatat))
7580 static inline abi_long host_to_target_stat64(void *cpu_env,
7581                                              abi_ulong target_addr,
7582                                              struct stat *host_st)
7583 {
7584 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7585     if (((CPUARMState *)cpu_env)->eabi) {
7586         struct target_eabi_stat64 *target_st;
7587 
7588         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7589             return -TARGET_EFAULT;
7590         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7591         __put_user(host_st->st_dev, &target_st->st_dev);
7592         __put_user(host_st->st_ino, &target_st->st_ino);
7593 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7594         __put_user(host_st->st_ino, &target_st->__st_ino);
7595 #endif
7596         __put_user(host_st->st_mode, &target_st->st_mode);
7597         __put_user(host_st->st_nlink, &target_st->st_nlink);
7598         __put_user(host_st->st_uid, &target_st->st_uid);
7599         __put_user(host_st->st_gid, &target_st->st_gid);
7600         __put_user(host_st->st_rdev, &target_st->st_rdev);
7601         __put_user(host_st->st_size, &target_st->st_size);
7602         __put_user(host_st->st_blksize, &target_st->st_blksize);
7603         __put_user(host_st->st_blocks, &target_st->st_blocks);
7604         __put_user(host_st->st_atime, &target_st->target_st_atime);
7605         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7606         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7607 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7608         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7609         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7610         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7611 #endif
7612         unlock_user_struct(target_st, target_addr, 1);
7613     } else
7614 #endif
7615     {
7616 #if defined(TARGET_HAS_STRUCT_STAT64)
7617         struct target_stat64 *target_st;
7618 #else
7619         struct target_stat *target_st;
7620 #endif
7621 
7622         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7623             return -TARGET_EFAULT;
7624         memset(target_st, 0, sizeof(*target_st));
7625         __put_user(host_st->st_dev, &target_st->st_dev);
7626         __put_user(host_st->st_ino, &target_st->st_ino);
7627 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7628         __put_user(host_st->st_ino, &target_st->__st_ino);
7629 #endif
7630         __put_user(host_st->st_mode, &target_st->st_mode);
7631         __put_user(host_st->st_nlink, &target_st->st_nlink);
7632         __put_user(host_st->st_uid, &target_st->st_uid);
7633         __put_user(host_st->st_gid, &target_st->st_gid);
7634         __put_user(host_st->st_rdev, &target_st->st_rdev);
7635         /* XXX: better use of kernel struct */
7636         __put_user(host_st->st_size, &target_st->st_size);
7637         __put_user(host_st->st_blksize, &target_st->st_blksize);
7638         __put_user(host_st->st_blocks, &target_st->st_blocks);
7639         __put_user(host_st->st_atime, &target_st->target_st_atime);
7640         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7641         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7642 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7643         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7644         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7645         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7646 #endif
7647         unlock_user_struct(target_st, target_addr, 1);
7648     }
7649 
7650     return 0;
7651 }
7652 #endif
7653 
7654 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7655 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7656                                             abi_ulong target_addr)
7657 {
7658     struct target_statx *target_stx;
7659 
7660     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7661         return -TARGET_EFAULT;
7662     }
7663     memset(target_stx, 0, sizeof(*target_stx));
7664 
7665     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7666     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7667     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7668     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7669     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7670     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7671     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7672     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7673     __put_user(host_stx->stx_size, &target_stx->stx_size);
7674     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7675     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7676     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7677     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7678     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7679     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7680     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7681     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7682     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7683     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7684     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7685     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7686     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7687     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7688 
7689     unlock_user_struct(target_stx, target_addr, 1);
7690 
7691     return 0;
7692 }
7693 #endif
7694 
7695 static int do_sys_futex(int *uaddr, int op, int val,
7696                          const struct timespec *timeout, int *uaddr2,
7697                          int val3)
7698 {
7699 #if HOST_LONG_BITS == 64
7700 #if defined(__NR_futex)
7701     /* always a 64-bit time_t, it doesn't define _time64 version  */
7702     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7703 
7704 #endif
7705 #else /* HOST_LONG_BITS == 64 */
7706 #if defined(__NR_futex_time64)
7707     if (sizeof(timeout->tv_sec) == 8) {
7708         /* _time64 function on 32bit arch */
7709         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7710     }
7711 #endif
7712 #if defined(__NR_futex)
7713     /* old function on 32bit arch */
7714     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7715 #endif
7716 #endif /* HOST_LONG_BITS == 64 */
7717     g_assert_not_reached();
7718 }
7719 
7720 static int do_safe_futex(int *uaddr, int op, int val,
7721                          const struct timespec *timeout, int *uaddr2,
7722                          int val3)
7723 {
7724 #if HOST_LONG_BITS == 64
7725 #if defined(__NR_futex)
7726     /* always a 64-bit time_t, it doesn't define _time64 version  */
7727     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7728 #endif
7729 #else /* HOST_LONG_BITS == 64 */
7730 #if defined(__NR_futex_time64)
7731     if (sizeof(timeout->tv_sec) == 8) {
7732         /* _time64 function on 32bit arch */
7733         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7734                                            val3));
7735     }
7736 #endif
7737 #if defined(__NR_futex)
7738     /* old function on 32bit arch */
7739     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7740 #endif
7741 #endif /* HOST_LONG_BITS == 64 */
7742     return -TARGET_ENOSYS;
7743 }
7744 
7745 /* ??? Using host futex calls even when target atomic operations
7746    are not really atomic probably breaks things.  However implementing
7747    futexes locally would make futexes shared between multiple processes
7748    tricky.  However they're probably useless because guest atomic
7749    operations won't work either.  */
7750 #if defined(TARGET_NR_futex)
7751 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7752                     target_ulong timeout, target_ulong uaddr2, int val3)
7753 {
7754     struct timespec ts, *pts;
7755     int base_op;
7756 
7757     /* ??? We assume FUTEX_* constants are the same on both host
7758        and target.  */
7759 #ifdef FUTEX_CMD_MASK
7760     base_op = op & FUTEX_CMD_MASK;
7761 #else
7762     base_op = op;
7763 #endif
7764     switch (base_op) {
7765     case FUTEX_WAIT:
7766     case FUTEX_WAIT_BITSET:
7767         if (timeout) {
7768             pts = &ts;
7769             target_to_host_timespec(pts, timeout);
7770         } else {
7771             pts = NULL;
7772         }
7773         return do_safe_futex(g2h(cpu, uaddr),
7774                              op, tswap32(val), pts, NULL, val3);
7775     case FUTEX_WAKE:
7776         return do_safe_futex(g2h(cpu, uaddr),
7777                              op, val, NULL, NULL, 0);
7778     case FUTEX_FD:
7779         return do_safe_futex(g2h(cpu, uaddr),
7780                              op, val, NULL, NULL, 0);
7781     case FUTEX_REQUEUE:
7782     case FUTEX_CMP_REQUEUE:
7783     case FUTEX_WAKE_OP:
7784         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7785            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7786            But the prototype takes a `struct timespec *'; insert casts
7787            to satisfy the compiler.  We do not need to tswap TIMEOUT
7788            since it's not compared to guest memory.  */
7789         pts = (struct timespec *)(uintptr_t) timeout;
7790         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7791                              (base_op == FUTEX_CMP_REQUEUE
7792                               ? tswap32(val3) : val3));
7793     default:
7794         return -TARGET_ENOSYS;
7795     }
7796 }
7797 #endif
7798 
7799 #if defined(TARGET_NR_futex_time64)
7800 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7801                            int val, target_ulong timeout,
7802                            target_ulong uaddr2, int val3)
7803 {
7804     struct timespec ts, *pts;
7805     int base_op;
7806 
7807     /* ??? We assume FUTEX_* constants are the same on both host
7808        and target.  */
7809 #ifdef FUTEX_CMD_MASK
7810     base_op = op & FUTEX_CMD_MASK;
7811 #else
7812     base_op = op;
7813 #endif
7814     switch (base_op) {
7815     case FUTEX_WAIT:
7816     case FUTEX_WAIT_BITSET:
7817         if (timeout) {
7818             pts = &ts;
7819             if (target_to_host_timespec64(pts, timeout)) {
7820                 return -TARGET_EFAULT;
7821             }
7822         } else {
7823             pts = NULL;
7824         }
7825         return do_safe_futex(g2h(cpu, uaddr), op,
7826                              tswap32(val), pts, NULL, val3);
7827     case FUTEX_WAKE:
7828         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7829     case FUTEX_FD:
7830         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7831     case FUTEX_REQUEUE:
7832     case FUTEX_CMP_REQUEUE:
7833     case FUTEX_WAKE_OP:
7834         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7835            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7836            But the prototype takes a `struct timespec *'; insert casts
7837            to satisfy the compiler.  We do not need to tswap TIMEOUT
7838            since it's not compared to guest memory.  */
7839         pts = (struct timespec *)(uintptr_t) timeout;
7840         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7841                              (base_op == FUTEX_CMP_REQUEUE
7842                               ? tswap32(val3) : val3));
7843     default:
7844         return -TARGET_ENOSYS;
7845     }
7846 }
7847 #endif
7848 
7849 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7850 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7851                                      abi_long handle, abi_long mount_id,
7852                                      abi_long flags)
7853 {
7854     struct file_handle *target_fh;
7855     struct file_handle *fh;
7856     int mid = 0;
7857     abi_long ret;
7858     char *name;
7859     unsigned int size, total_size;
7860 
7861     if (get_user_s32(size, handle)) {
7862         return -TARGET_EFAULT;
7863     }
7864 
7865     name = lock_user_string(pathname);
7866     if (!name) {
7867         return -TARGET_EFAULT;
7868     }
7869 
7870     total_size = sizeof(struct file_handle) + size;
7871     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7872     if (!target_fh) {
7873         unlock_user(name, pathname, 0);
7874         return -TARGET_EFAULT;
7875     }
7876 
7877     fh = g_malloc0(total_size);
7878     fh->handle_bytes = size;
7879 
7880     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7881     unlock_user(name, pathname, 0);
7882 
7883     /* man name_to_handle_at(2):
7884      * Other than the use of the handle_bytes field, the caller should treat
7885      * the file_handle structure as an opaque data type
7886      */
7887 
7888     memcpy(target_fh, fh, total_size);
7889     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7890     target_fh->handle_type = tswap32(fh->handle_type);
7891     g_free(fh);
7892     unlock_user(target_fh, handle, total_size);
7893 
7894     if (put_user_s32(mid, mount_id)) {
7895         return -TARGET_EFAULT;
7896     }
7897 
7898     return ret;
7899 
7900 }
7901 #endif
7902 
7903 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7904 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7905                                      abi_long flags)
7906 {
7907     struct file_handle *target_fh;
7908     struct file_handle *fh;
7909     unsigned int size, total_size;
7910     abi_long ret;
7911 
7912     if (get_user_s32(size, handle)) {
7913         return -TARGET_EFAULT;
7914     }
7915 
7916     total_size = sizeof(struct file_handle) + size;
7917     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7918     if (!target_fh) {
7919         return -TARGET_EFAULT;
7920     }
7921 
7922     fh = g_memdup(target_fh, total_size);
7923     fh->handle_bytes = size;
7924     fh->handle_type = tswap32(target_fh->handle_type);
7925 
7926     ret = get_errno(open_by_handle_at(mount_fd, fh,
7927                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7928 
7929     g_free(fh);
7930 
7931     unlock_user(target_fh, handle, total_size);
7932 
7933     return ret;
7934 }
7935 #endif
7936 
7937 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7938 
7939 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7940 {
7941     int host_flags;
7942     target_sigset_t *target_mask;
7943     sigset_t host_mask;
7944     abi_long ret;
7945 
7946     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7947         return -TARGET_EINVAL;
7948     }
7949     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7950         return -TARGET_EFAULT;
7951     }
7952 
7953     target_to_host_sigset(&host_mask, target_mask);
7954 
7955     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7956 
7957     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7958     if (ret >= 0) {
7959         fd_trans_register(ret, &target_signalfd_trans);
7960     }
7961 
7962     unlock_user_struct(target_mask, mask, 0);
7963 
7964     return ret;
7965 }
7966 #endif
7967 
7968 /* Map host to target signal numbers for the wait family of syscalls.
7969    Assume all other status bits are the same.  */
7970 int host_to_target_waitstatus(int status)
7971 {
7972     if (WIFSIGNALED(status)) {
7973         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7974     }
7975     if (WIFSTOPPED(status)) {
7976         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7977                | (status & 0xff);
7978     }
7979     return status;
7980 }
7981 
7982 static int open_self_cmdline(void *cpu_env, int fd)
7983 {
7984     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7985     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7986     int i;
7987 
7988     for (i = 0; i < bprm->argc; i++) {
7989         size_t len = strlen(bprm->argv[i]) + 1;
7990 
7991         if (write(fd, bprm->argv[i], len) != len) {
7992             return -1;
7993         }
7994     }
7995 
7996     return 0;
7997 }
7998 
7999 static int open_self_maps(void *cpu_env, int fd)
8000 {
8001     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8002     TaskState *ts = cpu->opaque;
8003     GSList *map_info = read_self_maps();
8004     GSList *s;
8005     int count;
8006 
8007     for (s = map_info; s; s = g_slist_next(s)) {
8008         MapInfo *e = (MapInfo *) s->data;
8009 
8010         if (h2g_valid(e->start)) {
8011             unsigned long min = e->start;
8012             unsigned long max = e->end;
8013             int flags = page_get_flags(h2g(min));
8014             const char *path;
8015 
8016             max = h2g_valid(max - 1) ?
8017                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8018 
8019             if (page_check_range(h2g(min), max - min, flags) == -1) {
8020                 continue;
8021             }
8022 
8023             if (h2g(min) == ts->info->stack_limit) {
8024                 path = "[stack]";
8025             } else {
8026                 path = e->path;
8027             }
8028 
8029             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8030                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8031                             h2g(min), h2g(max - 1) + 1,
8032                             (flags & PAGE_READ) ? 'r' : '-',
8033                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8034                             (flags & PAGE_EXEC) ? 'x' : '-',
8035                             e->is_priv ? 'p' : 's',
8036                             (uint64_t) e->offset, e->dev, e->inode);
8037             if (path) {
8038                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8039             } else {
8040                 dprintf(fd, "\n");
8041             }
8042         }
8043     }
8044 
8045     free_self_maps(map_info);
8046 
8047 #ifdef TARGET_VSYSCALL_PAGE
8048     /*
8049      * We only support execution from the vsyscall page.
8050      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8051      */
8052     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8053                     " --xp 00000000 00:00 0",
8054                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8055     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8056 #endif
8057 
8058     return 0;
8059 }
8060 
8061 static int open_self_stat(void *cpu_env, int fd)
8062 {
8063     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8064     TaskState *ts = cpu->opaque;
8065     g_autoptr(GString) buf = g_string_new(NULL);
8066     int i;
8067 
8068     for (i = 0; i < 44; i++) {
8069         if (i == 0) {
8070             /* pid */
8071             g_string_printf(buf, FMT_pid " ", getpid());
8072         } else if (i == 1) {
8073             /* app name */
8074             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8075             bin = bin ? bin + 1 : ts->bprm->argv[0];
8076             g_string_printf(buf, "(%.15s) ", bin);
8077         } else if (i == 3) {
8078             /* ppid */
8079             g_string_printf(buf, FMT_pid " ", getppid());
8080         } else if (i == 21) {
8081             /* starttime */
8082             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8083         } else if (i == 27) {
8084             /* stack bottom */
8085             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8086         } else {
8087             /* for the rest, there is MasterCard */
8088             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8089         }
8090 
8091         if (write(fd, buf->str, buf->len) != buf->len) {
8092             return -1;
8093         }
8094     }
8095 
8096     return 0;
8097 }
8098 
8099 static int open_self_auxv(void *cpu_env, int fd)
8100 {
8101     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8102     TaskState *ts = cpu->opaque;
8103     abi_ulong auxv = ts->info->saved_auxv;
8104     abi_ulong len = ts->info->auxv_len;
8105     char *ptr;
8106 
8107     /*
8108      * Auxiliary vector is stored in target process stack.
8109      * read in whole auxv vector and copy it to file
8110      */
8111     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8112     if (ptr != NULL) {
8113         while (len > 0) {
8114             ssize_t r;
8115             r = write(fd, ptr, len);
8116             if (r <= 0) {
8117                 break;
8118             }
8119             len -= r;
8120             ptr += r;
8121         }
8122         lseek(fd, 0, SEEK_SET);
8123         unlock_user(ptr, auxv, len);
8124     }
8125 
8126     return 0;
8127 }
8128 
8129 static int is_proc_myself(const char *filename, const char *entry)
8130 {
8131     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8132         filename += strlen("/proc/");
8133         if (!strncmp(filename, "self/", strlen("self/"))) {
8134             filename += strlen("self/");
8135         } else if (*filename >= '1' && *filename <= '9') {
8136             char myself[80];
8137             snprintf(myself, sizeof(myself), "%d/", getpid());
8138             if (!strncmp(filename, myself, strlen(myself))) {
8139                 filename += strlen(myself);
8140             } else {
8141                 return 0;
8142             }
8143         } else {
8144             return 0;
8145         }
8146         if (!strcmp(filename, entry)) {
8147             return 1;
8148         }
8149     }
8150     return 0;
8151 }
8152 
8153 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8154     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8155 static int is_proc(const char *filename, const char *entry)
8156 {
8157     return strcmp(filename, entry) == 0;
8158 }
8159 #endif
8160 
8161 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8162 static int open_net_route(void *cpu_env, int fd)
8163 {
8164     FILE *fp;
8165     char *line = NULL;
8166     size_t len = 0;
8167     ssize_t read;
8168 
8169     fp = fopen("/proc/net/route", "r");
8170     if (fp == NULL) {
8171         return -1;
8172     }
8173 
8174     /* read header */
8175 
8176     read = getline(&line, &len, fp);
8177     dprintf(fd, "%s", line);
8178 
8179     /* read routes */
8180 
8181     while ((read = getline(&line, &len, fp)) != -1) {
8182         char iface[16];
8183         uint32_t dest, gw, mask;
8184         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8185         int fields;
8186 
8187         fields = sscanf(line,
8188                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8189                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8190                         &mask, &mtu, &window, &irtt);
8191         if (fields != 11) {
8192             continue;
8193         }
8194         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8195                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8196                 metric, tswap32(mask), mtu, window, irtt);
8197     }
8198 
8199     free(line);
8200     fclose(fp);
8201 
8202     return 0;
8203 }
8204 #endif
8205 
8206 #if defined(TARGET_SPARC)
8207 static int open_cpuinfo(void *cpu_env, int fd)
8208 {
8209     dprintf(fd, "type\t\t: sun4u\n");
8210     return 0;
8211 }
8212 #endif
8213 
8214 #if defined(TARGET_HPPA)
8215 static int open_cpuinfo(void *cpu_env, int fd)
8216 {
8217     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8218     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8219     dprintf(fd, "capabilities\t: os32\n");
8220     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8221     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8222     return 0;
8223 }
8224 #endif
8225 
8226 #if defined(TARGET_M68K)
8227 static int open_hardware(void *cpu_env, int fd)
8228 {
8229     dprintf(fd, "Model:\t\tqemu-m68k\n");
8230     return 0;
8231 }
8232 #endif
8233 
8234 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8235 {
8236     struct fake_open {
8237         const char *filename;
8238         int (*fill)(void *cpu_env, int fd);
8239         int (*cmp)(const char *s1, const char *s2);
8240     };
8241     const struct fake_open *fake_open;
8242     static const struct fake_open fakes[] = {
8243         { "maps", open_self_maps, is_proc_myself },
8244         { "stat", open_self_stat, is_proc_myself },
8245         { "auxv", open_self_auxv, is_proc_myself },
8246         { "cmdline", open_self_cmdline, is_proc_myself },
8247 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8248         { "/proc/net/route", open_net_route, is_proc },
8249 #endif
8250 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8251         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8252 #endif
8253 #if defined(TARGET_M68K)
8254         { "/proc/hardware", open_hardware, is_proc },
8255 #endif
8256         { NULL, NULL, NULL }
8257     };
8258 
8259     if (is_proc_myself(pathname, "exe")) {
8260         int execfd = qemu_getauxval(AT_EXECFD);
8261         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8262     }
8263 
8264     for (fake_open = fakes; fake_open->filename; fake_open++) {
8265         if (fake_open->cmp(pathname, fake_open->filename)) {
8266             break;
8267         }
8268     }
8269 
8270     if (fake_open->filename) {
8271         const char *tmpdir;
8272         char filename[PATH_MAX];
8273         int fd, r;
8274 
8275         /* create temporary file to map stat to */
8276         tmpdir = getenv("TMPDIR");
8277         if (!tmpdir)
8278             tmpdir = "/tmp";
8279         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8280         fd = mkstemp(filename);
8281         if (fd < 0) {
8282             return fd;
8283         }
8284         unlink(filename);
8285 
8286         if ((r = fake_open->fill(cpu_env, fd))) {
8287             int e = errno;
8288             close(fd);
8289             errno = e;
8290             return r;
8291         }
8292         lseek(fd, 0, SEEK_SET);
8293 
8294         return fd;
8295     }
8296 
8297     return safe_openat(dirfd, path(pathname), flags, mode);
8298 }
8299 
8300 #define TIMER_MAGIC 0x0caf0000
8301 #define TIMER_MAGIC_MASK 0xffff0000
8302 
8303 /* Convert QEMU provided timer ID back to internal 16bit index format */
8304 static target_timer_t get_timer_id(abi_long arg)
8305 {
8306     target_timer_t timerid = arg;
8307 
8308     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8309         return -TARGET_EINVAL;
8310     }
8311 
8312     timerid &= 0xffff;
8313 
8314     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8315         return -TARGET_EINVAL;
8316     }
8317 
8318     return timerid;
8319 }
8320 
8321 static int target_to_host_cpu_mask(unsigned long *host_mask,
8322                                    size_t host_size,
8323                                    abi_ulong target_addr,
8324                                    size_t target_size)
8325 {
8326     unsigned target_bits = sizeof(abi_ulong) * 8;
8327     unsigned host_bits = sizeof(*host_mask) * 8;
8328     abi_ulong *target_mask;
8329     unsigned i, j;
8330 
8331     assert(host_size >= target_size);
8332 
8333     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8334     if (!target_mask) {
8335         return -TARGET_EFAULT;
8336     }
8337     memset(host_mask, 0, host_size);
8338 
8339     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8340         unsigned bit = i * target_bits;
8341         abi_ulong val;
8342 
8343         __get_user(val, &target_mask[i]);
8344         for (j = 0; j < target_bits; j++, bit++) {
8345             if (val & (1UL << j)) {
8346                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8347             }
8348         }
8349     }
8350 
8351     unlock_user(target_mask, target_addr, 0);
8352     return 0;
8353 }
8354 
8355 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8356                                    size_t host_size,
8357                                    abi_ulong target_addr,
8358                                    size_t target_size)
8359 {
8360     unsigned target_bits = sizeof(abi_ulong) * 8;
8361     unsigned host_bits = sizeof(*host_mask) * 8;
8362     abi_ulong *target_mask;
8363     unsigned i, j;
8364 
8365     assert(host_size >= target_size);
8366 
8367     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8368     if (!target_mask) {
8369         return -TARGET_EFAULT;
8370     }
8371 
8372     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8373         unsigned bit = i * target_bits;
8374         abi_ulong val = 0;
8375 
8376         for (j = 0; j < target_bits; j++, bit++) {
8377             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8378                 val |= 1UL << j;
8379             }
8380         }
8381         __put_user(val, &target_mask[i]);
8382     }
8383 
8384     unlock_user(target_mask, target_addr, target_size);
8385     return 0;
8386 }
8387 
8388 #ifdef TARGET_NR_getdents
8389 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8390 {
8391     g_autofree void *hdirp = NULL;
8392     void *tdirp;
8393     int hlen, hoff, toff;
8394     int hreclen, treclen;
8395     off64_t prev_diroff = 0;
8396 
8397     hdirp = g_try_malloc(count);
8398     if (!hdirp) {
8399         return -TARGET_ENOMEM;
8400     }
8401 
8402 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8403     hlen = sys_getdents(dirfd, hdirp, count);
8404 #else
8405     hlen = sys_getdents64(dirfd, hdirp, count);
8406 #endif
8407 
8408     hlen = get_errno(hlen);
8409     if (is_error(hlen)) {
8410         return hlen;
8411     }
8412 
8413     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8414     if (!tdirp) {
8415         return -TARGET_EFAULT;
8416     }
8417 
8418     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8419 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8420         struct linux_dirent *hde = hdirp + hoff;
8421 #else
8422         struct linux_dirent64 *hde = hdirp + hoff;
8423 #endif
8424         struct target_dirent *tde = tdirp + toff;
8425         int namelen;
8426         uint8_t type;
8427 
8428         namelen = strlen(hde->d_name);
8429         hreclen = hde->d_reclen;
8430         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8431         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8432 
8433         if (toff + treclen > count) {
8434             /*
8435              * If the host struct is smaller than the target struct, or
8436              * requires less alignment and thus packs into less space,
8437              * then the host can return more entries than we can pass
8438              * on to the guest.
8439              */
8440             if (toff == 0) {
8441                 toff = -TARGET_EINVAL; /* result buffer is too small */
8442                 break;
8443             }
8444             /*
8445              * Return what we have, resetting the file pointer to the
8446              * location of the first record not returned.
8447              */
8448             lseek64(dirfd, prev_diroff, SEEK_SET);
8449             break;
8450         }
8451 
8452         prev_diroff = hde->d_off;
8453         tde->d_ino = tswapal(hde->d_ino);
8454         tde->d_off = tswapal(hde->d_off);
8455         tde->d_reclen = tswap16(treclen);
8456         memcpy(tde->d_name, hde->d_name, namelen + 1);
8457 
8458         /*
8459          * The getdents type is in what was formerly a padding byte at the
8460          * end of the structure.
8461          */
8462 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8463         type = *((uint8_t *)hde + hreclen - 1);
8464 #else
8465         type = hde->d_type;
8466 #endif
8467         *((uint8_t *)tde + treclen - 1) = type;
8468     }
8469 
8470     unlock_user(tdirp, arg2, toff);
8471     return toff;
8472 }
8473 #endif /* TARGET_NR_getdents */
8474 
8475 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8476 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8477 {
8478     g_autofree void *hdirp = NULL;
8479     void *tdirp;
8480     int hlen, hoff, toff;
8481     int hreclen, treclen;
8482     off64_t prev_diroff = 0;
8483 
8484     hdirp = g_try_malloc(count);
8485     if (!hdirp) {
8486         return -TARGET_ENOMEM;
8487     }
8488 
8489     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8490     if (is_error(hlen)) {
8491         return hlen;
8492     }
8493 
8494     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8495     if (!tdirp) {
8496         return -TARGET_EFAULT;
8497     }
8498 
8499     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8500         struct linux_dirent64 *hde = hdirp + hoff;
8501         struct target_dirent64 *tde = tdirp + toff;
8502         int namelen;
8503 
8504         namelen = strlen(hde->d_name) + 1;
8505         hreclen = hde->d_reclen;
8506         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8507         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8508 
8509         if (toff + treclen > count) {
8510             /*
8511              * If the host struct is smaller than the target struct, or
8512              * requires less alignment and thus packs into less space,
8513              * then the host can return more entries than we can pass
8514              * on to the guest.
8515              */
8516             if (toff == 0) {
8517                 toff = -TARGET_EINVAL; /* result buffer is too small */
8518                 break;
8519             }
8520             /*
8521              * Return what we have, resetting the file pointer to the
8522              * location of the first record not returned.
8523              */
8524             lseek64(dirfd, prev_diroff, SEEK_SET);
8525             break;
8526         }
8527 
8528         prev_diroff = hde->d_off;
8529         tde->d_ino = tswap64(hde->d_ino);
8530         tde->d_off = tswap64(hde->d_off);
8531         tde->d_reclen = tswap16(treclen);
8532         tde->d_type = hde->d_type;
8533         memcpy(tde->d_name, hde->d_name, namelen);
8534     }
8535 
8536     unlock_user(tdirp, arg2, toff);
8537     return toff;
8538 }
8539 #endif /* TARGET_NR_getdents64 */
8540 
8541 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8542 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8543 #endif
8544 
8545 /* This is an internal helper for do_syscall so that it is easier
8546  * to have a single return point, so that actions, such as logging
8547  * of syscall results, can be performed.
8548  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8549  */
8550 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8551                             abi_long arg2, abi_long arg3, abi_long arg4,
8552                             abi_long arg5, abi_long arg6, abi_long arg7,
8553                             abi_long arg8)
8554 {
8555     CPUState *cpu = env_cpu(cpu_env);
8556     abi_long ret;
8557 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8558     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8559     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8560     || defined(TARGET_NR_statx)
8561     struct stat st;
8562 #endif
8563 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8564     || defined(TARGET_NR_fstatfs)
8565     struct statfs stfs;
8566 #endif
8567     void *p;
8568 
8569     switch(num) {
8570     case TARGET_NR_exit:
8571         /* In old applications this may be used to implement _exit(2).
8572            However in threaded applications it is used for thread termination,
8573            and _exit_group is used for application termination.
8574            Do thread termination if we have more then one thread.  */
8575 
8576         if (block_signals()) {
8577             return -QEMU_ERESTARTSYS;
8578         }
8579 
8580         pthread_mutex_lock(&clone_lock);
8581 
8582         if (CPU_NEXT(first_cpu)) {
8583             TaskState *ts = cpu->opaque;
8584 
8585             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8586             object_unref(OBJECT(cpu));
8587             /*
8588              * At this point the CPU should be unrealized and removed
8589              * from cpu lists. We can clean-up the rest of the thread
8590              * data without the lock held.
8591              */
8592 
8593             pthread_mutex_unlock(&clone_lock);
8594 
8595             if (ts->child_tidptr) {
8596                 put_user_u32(0, ts->child_tidptr);
8597                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8598                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8599             }
8600             thread_cpu = NULL;
8601             g_free(ts);
8602             rcu_unregister_thread();
8603             pthread_exit(NULL);
8604         }
8605 
8606         pthread_mutex_unlock(&clone_lock);
8607         preexit_cleanup(cpu_env, arg1);
8608         _exit(arg1);
8609         return 0; /* avoid warning */
8610     case TARGET_NR_read:
8611         if (arg2 == 0 && arg3 == 0) {
8612             return get_errno(safe_read(arg1, 0, 0));
8613         } else {
8614             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8615                 return -TARGET_EFAULT;
8616             ret = get_errno(safe_read(arg1, p, arg3));
8617             if (ret >= 0 &&
8618                 fd_trans_host_to_target_data(arg1)) {
8619                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8620             }
8621             unlock_user(p, arg2, ret);
8622         }
8623         return ret;
8624     case TARGET_NR_write:
8625         if (arg2 == 0 && arg3 == 0) {
8626             return get_errno(safe_write(arg1, 0, 0));
8627         }
8628         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8629             return -TARGET_EFAULT;
8630         if (fd_trans_target_to_host_data(arg1)) {
8631             void *copy = g_malloc(arg3);
8632             memcpy(copy, p, arg3);
8633             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8634             if (ret >= 0) {
8635                 ret = get_errno(safe_write(arg1, copy, ret));
8636             }
8637             g_free(copy);
8638         } else {
8639             ret = get_errno(safe_write(arg1, p, arg3));
8640         }
8641         unlock_user(p, arg2, 0);
8642         return ret;
8643 
8644 #ifdef TARGET_NR_open
8645     case TARGET_NR_open:
8646         if (!(p = lock_user_string(arg1)))
8647             return -TARGET_EFAULT;
8648         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8649                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8650                                   arg3));
8651         fd_trans_unregister(ret);
8652         unlock_user(p, arg1, 0);
8653         return ret;
8654 #endif
8655     case TARGET_NR_openat:
8656         if (!(p = lock_user_string(arg2)))
8657             return -TARGET_EFAULT;
8658         ret = get_errno(do_openat(cpu_env, arg1, p,
8659                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8660                                   arg4));
8661         fd_trans_unregister(ret);
8662         unlock_user(p, arg2, 0);
8663         return ret;
8664 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8665     case TARGET_NR_name_to_handle_at:
8666         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8667         return ret;
8668 #endif
8669 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8670     case TARGET_NR_open_by_handle_at:
8671         ret = do_open_by_handle_at(arg1, arg2, arg3);
8672         fd_trans_unregister(ret);
8673         return ret;
8674 #endif
8675     case TARGET_NR_close:
8676         fd_trans_unregister(arg1);
8677         return get_errno(close(arg1));
8678 
8679     case TARGET_NR_brk:
8680         return do_brk(arg1);
8681 #ifdef TARGET_NR_fork
8682     case TARGET_NR_fork:
8683         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8684 #endif
8685 #ifdef TARGET_NR_waitpid
8686     case TARGET_NR_waitpid:
8687         {
8688             int status;
8689             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8690             if (!is_error(ret) && arg2 && ret
8691                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8692                 return -TARGET_EFAULT;
8693         }
8694         return ret;
8695 #endif
8696 #ifdef TARGET_NR_waitid
8697     case TARGET_NR_waitid:
8698         {
8699             siginfo_t info;
8700             info.si_pid = 0;
8701             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8702             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8703                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8704                     return -TARGET_EFAULT;
8705                 host_to_target_siginfo(p, &info);
8706                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8707             }
8708         }
8709         return ret;
8710 #endif
8711 #ifdef TARGET_NR_creat /* not on alpha */
8712     case TARGET_NR_creat:
8713         if (!(p = lock_user_string(arg1)))
8714             return -TARGET_EFAULT;
8715         ret = get_errno(creat(p, arg2));
8716         fd_trans_unregister(ret);
8717         unlock_user(p, arg1, 0);
8718         return ret;
8719 #endif
8720 #ifdef TARGET_NR_link
8721     case TARGET_NR_link:
8722         {
8723             void * p2;
8724             p = lock_user_string(arg1);
8725             p2 = lock_user_string(arg2);
8726             if (!p || !p2)
8727                 ret = -TARGET_EFAULT;
8728             else
8729                 ret = get_errno(link(p, p2));
8730             unlock_user(p2, arg2, 0);
8731             unlock_user(p, arg1, 0);
8732         }
8733         return ret;
8734 #endif
8735 #if defined(TARGET_NR_linkat)
8736     case TARGET_NR_linkat:
8737         {
8738             void * p2 = NULL;
8739             if (!arg2 || !arg4)
8740                 return -TARGET_EFAULT;
8741             p  = lock_user_string(arg2);
8742             p2 = lock_user_string(arg4);
8743             if (!p || !p2)
8744                 ret = -TARGET_EFAULT;
8745             else
8746                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8747             unlock_user(p, arg2, 0);
8748             unlock_user(p2, arg4, 0);
8749         }
8750         return ret;
8751 #endif
8752 #ifdef TARGET_NR_unlink
8753     case TARGET_NR_unlink:
8754         if (!(p = lock_user_string(arg1)))
8755             return -TARGET_EFAULT;
8756         ret = get_errno(unlink(p));
8757         unlock_user(p, arg1, 0);
8758         return ret;
8759 #endif
8760 #if defined(TARGET_NR_unlinkat)
8761     case TARGET_NR_unlinkat:
8762         if (!(p = lock_user_string(arg2)))
8763             return -TARGET_EFAULT;
8764         ret = get_errno(unlinkat(arg1, p, arg3));
8765         unlock_user(p, arg2, 0);
8766         return ret;
8767 #endif
8768     case TARGET_NR_execve:
8769         {
8770             char **argp, **envp;
8771             int argc, envc;
8772             abi_ulong gp;
8773             abi_ulong guest_argp;
8774             abi_ulong guest_envp;
8775             abi_ulong addr;
8776             char **q;
8777 
8778             argc = 0;
8779             guest_argp = arg2;
8780             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8781                 if (get_user_ual(addr, gp))
8782                     return -TARGET_EFAULT;
8783                 if (!addr)
8784                     break;
8785                 argc++;
8786             }
8787             envc = 0;
8788             guest_envp = arg3;
8789             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8790                 if (get_user_ual(addr, gp))
8791                     return -TARGET_EFAULT;
8792                 if (!addr)
8793                     break;
8794                 envc++;
8795             }
8796 
8797             argp = g_new0(char *, argc + 1);
8798             envp = g_new0(char *, envc + 1);
8799 
8800             for (gp = guest_argp, q = argp; gp;
8801                   gp += sizeof(abi_ulong), q++) {
8802                 if (get_user_ual(addr, gp))
8803                     goto execve_efault;
8804                 if (!addr)
8805                     break;
8806                 if (!(*q = lock_user_string(addr)))
8807                     goto execve_efault;
8808             }
8809             *q = NULL;
8810 
8811             for (gp = guest_envp, q = envp; gp;
8812                   gp += sizeof(abi_ulong), q++) {
8813                 if (get_user_ual(addr, gp))
8814                     goto execve_efault;
8815                 if (!addr)
8816                     break;
8817                 if (!(*q = lock_user_string(addr)))
8818                     goto execve_efault;
8819             }
8820             *q = NULL;
8821 
8822             if (!(p = lock_user_string(arg1)))
8823                 goto execve_efault;
8824             /* Although execve() is not an interruptible syscall it is
8825              * a special case where we must use the safe_syscall wrapper:
8826              * if we allow a signal to happen before we make the host
8827              * syscall then we will 'lose' it, because at the point of
8828              * execve the process leaves QEMU's control. So we use the
8829              * safe syscall wrapper to ensure that we either take the
8830              * signal as a guest signal, or else it does not happen
8831              * before the execve completes and makes it the other
8832              * program's problem.
8833              */
8834             ret = get_errno(safe_execve(p, argp, envp));
8835             unlock_user(p, arg1, 0);
8836 
8837             goto execve_end;
8838 
8839         execve_efault:
8840             ret = -TARGET_EFAULT;
8841 
8842         execve_end:
8843             for (gp = guest_argp, q = argp; *q;
8844                   gp += sizeof(abi_ulong), q++) {
8845                 if (get_user_ual(addr, gp)
8846                     || !addr)
8847                     break;
8848                 unlock_user(*q, addr, 0);
8849             }
8850             for (gp = guest_envp, q = envp; *q;
8851                   gp += sizeof(abi_ulong), q++) {
8852                 if (get_user_ual(addr, gp)
8853                     || !addr)
8854                     break;
8855                 unlock_user(*q, addr, 0);
8856             }
8857 
8858             g_free(argp);
8859             g_free(envp);
8860         }
8861         return ret;
8862     case TARGET_NR_chdir:
8863         if (!(p = lock_user_string(arg1)))
8864             return -TARGET_EFAULT;
8865         ret = get_errno(chdir(p));
8866         unlock_user(p, arg1, 0);
8867         return ret;
8868 #ifdef TARGET_NR_time
8869     case TARGET_NR_time:
8870         {
8871             time_t host_time;
8872             ret = get_errno(time(&host_time));
8873             if (!is_error(ret)
8874                 && arg1
8875                 && put_user_sal(host_time, arg1))
8876                 return -TARGET_EFAULT;
8877         }
8878         return ret;
8879 #endif
8880 #ifdef TARGET_NR_mknod
8881     case TARGET_NR_mknod:
8882         if (!(p = lock_user_string(arg1)))
8883             return -TARGET_EFAULT;
8884         ret = get_errno(mknod(p, arg2, arg3));
8885         unlock_user(p, arg1, 0);
8886         return ret;
8887 #endif
8888 #if defined(TARGET_NR_mknodat)
8889     case TARGET_NR_mknodat:
8890         if (!(p = lock_user_string(arg2)))
8891             return -TARGET_EFAULT;
8892         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8893         unlock_user(p, arg2, 0);
8894         return ret;
8895 #endif
8896 #ifdef TARGET_NR_chmod
8897     case TARGET_NR_chmod:
8898         if (!(p = lock_user_string(arg1)))
8899             return -TARGET_EFAULT;
8900         ret = get_errno(chmod(p, arg2));
8901         unlock_user(p, arg1, 0);
8902         return ret;
8903 #endif
8904 #ifdef TARGET_NR_lseek
8905     case TARGET_NR_lseek:
8906         return get_errno(lseek(arg1, arg2, arg3));
8907 #endif
8908 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8909     /* Alpha specific */
8910     case TARGET_NR_getxpid:
8911         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8912         return get_errno(getpid());
8913 #endif
8914 #ifdef TARGET_NR_getpid
8915     case TARGET_NR_getpid:
8916         return get_errno(getpid());
8917 #endif
8918     case TARGET_NR_mount:
8919         {
8920             /* need to look at the data field */
8921             void *p2, *p3;
8922 
8923             if (arg1) {
8924                 p = lock_user_string(arg1);
8925                 if (!p) {
8926                     return -TARGET_EFAULT;
8927                 }
8928             } else {
8929                 p = NULL;
8930             }
8931 
8932             p2 = lock_user_string(arg2);
8933             if (!p2) {
8934                 if (arg1) {
8935                     unlock_user(p, arg1, 0);
8936                 }
8937                 return -TARGET_EFAULT;
8938             }
8939 
8940             if (arg3) {
8941                 p3 = lock_user_string(arg3);
8942                 if (!p3) {
8943                     if (arg1) {
8944                         unlock_user(p, arg1, 0);
8945                     }
8946                     unlock_user(p2, arg2, 0);
8947                     return -TARGET_EFAULT;
8948                 }
8949             } else {
8950                 p3 = NULL;
8951             }
8952 
8953             /* FIXME - arg5 should be locked, but it isn't clear how to
8954              * do that since it's not guaranteed to be a NULL-terminated
8955              * string.
8956              */
8957             if (!arg5) {
8958                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8959             } else {
8960                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8961             }
8962             ret = get_errno(ret);
8963 
8964             if (arg1) {
8965                 unlock_user(p, arg1, 0);
8966             }
8967             unlock_user(p2, arg2, 0);
8968             if (arg3) {
8969                 unlock_user(p3, arg3, 0);
8970             }
8971         }
8972         return ret;
8973 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8974 #if defined(TARGET_NR_umount)
8975     case TARGET_NR_umount:
8976 #endif
8977 #if defined(TARGET_NR_oldumount)
8978     case TARGET_NR_oldumount:
8979 #endif
8980         if (!(p = lock_user_string(arg1)))
8981             return -TARGET_EFAULT;
8982         ret = get_errno(umount(p));
8983         unlock_user(p, arg1, 0);
8984         return ret;
8985 #endif
8986 #ifdef TARGET_NR_stime /* not on alpha */
8987     case TARGET_NR_stime:
8988         {
8989             struct timespec ts;
8990             ts.tv_nsec = 0;
8991             if (get_user_sal(ts.tv_sec, arg1)) {
8992                 return -TARGET_EFAULT;
8993             }
8994             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8995         }
8996 #endif
8997 #ifdef TARGET_NR_alarm /* not on alpha */
8998     case TARGET_NR_alarm:
8999         return alarm(arg1);
9000 #endif
9001 #ifdef TARGET_NR_pause /* not on alpha */
9002     case TARGET_NR_pause:
9003         if (!block_signals()) {
9004             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9005         }
9006         return -TARGET_EINTR;
9007 #endif
9008 #ifdef TARGET_NR_utime
9009     case TARGET_NR_utime:
9010         {
9011             struct utimbuf tbuf, *host_tbuf;
9012             struct target_utimbuf *target_tbuf;
9013             if (arg2) {
9014                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9015                     return -TARGET_EFAULT;
9016                 tbuf.actime = tswapal(target_tbuf->actime);
9017                 tbuf.modtime = tswapal(target_tbuf->modtime);
9018                 unlock_user_struct(target_tbuf, arg2, 0);
9019                 host_tbuf = &tbuf;
9020             } else {
9021                 host_tbuf = NULL;
9022             }
9023             if (!(p = lock_user_string(arg1)))
9024                 return -TARGET_EFAULT;
9025             ret = get_errno(utime(p, host_tbuf));
9026             unlock_user(p, arg1, 0);
9027         }
9028         return ret;
9029 #endif
9030 #ifdef TARGET_NR_utimes
9031     case TARGET_NR_utimes:
9032         {
9033             struct timeval *tvp, tv[2];
9034             if (arg2) {
9035                 if (copy_from_user_timeval(&tv[0], arg2)
9036                     || copy_from_user_timeval(&tv[1],
9037                                               arg2 + sizeof(struct target_timeval)))
9038                     return -TARGET_EFAULT;
9039                 tvp = tv;
9040             } else {
9041                 tvp = NULL;
9042             }
9043             if (!(p = lock_user_string(arg1)))
9044                 return -TARGET_EFAULT;
9045             ret = get_errno(utimes(p, tvp));
9046             unlock_user(p, arg1, 0);
9047         }
9048         return ret;
9049 #endif
9050 #if defined(TARGET_NR_futimesat)
9051     case TARGET_NR_futimesat:
9052         {
9053             struct timeval *tvp, tv[2];
9054             if (arg3) {
9055                 if (copy_from_user_timeval(&tv[0], arg3)
9056                     || copy_from_user_timeval(&tv[1],
9057                                               arg3 + sizeof(struct target_timeval)))
9058                     return -TARGET_EFAULT;
9059                 tvp = tv;
9060             } else {
9061                 tvp = NULL;
9062             }
9063             if (!(p = lock_user_string(arg2))) {
9064                 return -TARGET_EFAULT;
9065             }
9066             ret = get_errno(futimesat(arg1, path(p), tvp));
9067             unlock_user(p, arg2, 0);
9068         }
9069         return ret;
9070 #endif
9071 #ifdef TARGET_NR_access
9072     case TARGET_NR_access:
9073         if (!(p = lock_user_string(arg1))) {
9074             return -TARGET_EFAULT;
9075         }
9076         ret = get_errno(access(path(p), arg2));
9077         unlock_user(p, arg1, 0);
9078         return ret;
9079 #endif
9080 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9081     case TARGET_NR_faccessat:
9082         if (!(p = lock_user_string(arg2))) {
9083             return -TARGET_EFAULT;
9084         }
9085         ret = get_errno(faccessat(arg1, p, arg3, 0));
9086         unlock_user(p, arg2, 0);
9087         return ret;
9088 #endif
9089 #ifdef TARGET_NR_nice /* not on alpha */
9090     case TARGET_NR_nice:
9091         return get_errno(nice(arg1));
9092 #endif
9093     case TARGET_NR_sync:
9094         sync();
9095         return 0;
9096 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9097     case TARGET_NR_syncfs:
9098         return get_errno(syncfs(arg1));
9099 #endif
9100     case TARGET_NR_kill:
9101         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9102 #ifdef TARGET_NR_rename
9103     case TARGET_NR_rename:
9104         {
9105             void *p2;
9106             p = lock_user_string(arg1);
9107             p2 = lock_user_string(arg2);
9108             if (!p || !p2)
9109                 ret = -TARGET_EFAULT;
9110             else
9111                 ret = get_errno(rename(p, p2));
9112             unlock_user(p2, arg2, 0);
9113             unlock_user(p, arg1, 0);
9114         }
9115         return ret;
9116 #endif
9117 #if defined(TARGET_NR_renameat)
9118     case TARGET_NR_renameat:
9119         {
9120             void *p2;
9121             p  = lock_user_string(arg2);
9122             p2 = lock_user_string(arg4);
9123             if (!p || !p2)
9124                 ret = -TARGET_EFAULT;
9125             else
9126                 ret = get_errno(renameat(arg1, p, arg3, p2));
9127             unlock_user(p2, arg4, 0);
9128             unlock_user(p, arg2, 0);
9129         }
9130         return ret;
9131 #endif
9132 #if defined(TARGET_NR_renameat2)
9133     case TARGET_NR_renameat2:
9134         {
9135             void *p2;
9136             p  = lock_user_string(arg2);
9137             p2 = lock_user_string(arg4);
9138             if (!p || !p2) {
9139                 ret = -TARGET_EFAULT;
9140             } else {
9141                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9142             }
9143             unlock_user(p2, arg4, 0);
9144             unlock_user(p, arg2, 0);
9145         }
9146         return ret;
9147 #endif
9148 #ifdef TARGET_NR_mkdir
9149     case TARGET_NR_mkdir:
9150         if (!(p = lock_user_string(arg1)))
9151             return -TARGET_EFAULT;
9152         ret = get_errno(mkdir(p, arg2));
9153         unlock_user(p, arg1, 0);
9154         return ret;
9155 #endif
9156 #if defined(TARGET_NR_mkdirat)
9157     case TARGET_NR_mkdirat:
9158         if (!(p = lock_user_string(arg2)))
9159             return -TARGET_EFAULT;
9160         ret = get_errno(mkdirat(arg1, p, arg3));
9161         unlock_user(p, arg2, 0);
9162         return ret;
9163 #endif
9164 #ifdef TARGET_NR_rmdir
9165     case TARGET_NR_rmdir:
9166         if (!(p = lock_user_string(arg1)))
9167             return -TARGET_EFAULT;
9168         ret = get_errno(rmdir(p));
9169         unlock_user(p, arg1, 0);
9170         return ret;
9171 #endif
9172     case TARGET_NR_dup:
9173         ret = get_errno(dup(arg1));
9174         if (ret >= 0) {
9175             fd_trans_dup(arg1, ret);
9176         }
9177         return ret;
9178 #ifdef TARGET_NR_pipe
9179     case TARGET_NR_pipe:
9180         return do_pipe(cpu_env, arg1, 0, 0);
9181 #endif
9182 #ifdef TARGET_NR_pipe2
9183     case TARGET_NR_pipe2:
9184         return do_pipe(cpu_env, arg1,
9185                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9186 #endif
9187     case TARGET_NR_times:
9188         {
9189             struct target_tms *tmsp;
9190             struct tms tms;
9191             ret = get_errno(times(&tms));
9192             if (arg1) {
9193                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9194                 if (!tmsp)
9195                     return -TARGET_EFAULT;
9196                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9197                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9198                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9199                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9200             }
9201             if (!is_error(ret))
9202                 ret = host_to_target_clock_t(ret);
9203         }
9204         return ret;
9205     case TARGET_NR_acct:
9206         if (arg1 == 0) {
9207             ret = get_errno(acct(NULL));
9208         } else {
9209             if (!(p = lock_user_string(arg1))) {
9210                 return -TARGET_EFAULT;
9211             }
9212             ret = get_errno(acct(path(p)));
9213             unlock_user(p, arg1, 0);
9214         }
9215         return ret;
9216 #ifdef TARGET_NR_umount2
9217     case TARGET_NR_umount2:
9218         if (!(p = lock_user_string(arg1)))
9219             return -TARGET_EFAULT;
9220         ret = get_errno(umount2(p, arg2));
9221         unlock_user(p, arg1, 0);
9222         return ret;
9223 #endif
9224     case TARGET_NR_ioctl:
9225         return do_ioctl(arg1, arg2, arg3);
9226 #ifdef TARGET_NR_fcntl
9227     case TARGET_NR_fcntl:
9228         return do_fcntl(arg1, arg2, arg3);
9229 #endif
9230     case TARGET_NR_setpgid:
9231         return get_errno(setpgid(arg1, arg2));
9232     case TARGET_NR_umask:
9233         return get_errno(umask(arg1));
9234     case TARGET_NR_chroot:
9235         if (!(p = lock_user_string(arg1)))
9236             return -TARGET_EFAULT;
9237         ret = get_errno(chroot(p));
9238         unlock_user(p, arg1, 0);
9239         return ret;
9240 #ifdef TARGET_NR_dup2
9241     case TARGET_NR_dup2:
9242         ret = get_errno(dup2(arg1, arg2));
9243         if (ret >= 0) {
9244             fd_trans_dup(arg1, arg2);
9245         }
9246         return ret;
9247 #endif
9248 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9249     case TARGET_NR_dup3:
9250     {
9251         int host_flags;
9252 
9253         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9254             return -EINVAL;
9255         }
9256         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9257         ret = get_errno(dup3(arg1, arg2, host_flags));
9258         if (ret >= 0) {
9259             fd_trans_dup(arg1, arg2);
9260         }
9261         return ret;
9262     }
9263 #endif
9264 #ifdef TARGET_NR_getppid /* not on alpha */
9265     case TARGET_NR_getppid:
9266         return get_errno(getppid());
9267 #endif
9268 #ifdef TARGET_NR_getpgrp
9269     case TARGET_NR_getpgrp:
9270         return get_errno(getpgrp());
9271 #endif
9272     case TARGET_NR_setsid:
9273         return get_errno(setsid());
9274 #ifdef TARGET_NR_sigaction
9275     case TARGET_NR_sigaction:
9276         {
9277 #if defined(TARGET_MIPS)
9278 	    struct target_sigaction act, oact, *pact, *old_act;
9279 
9280 	    if (arg2) {
9281                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9282                     return -TARGET_EFAULT;
9283 		act._sa_handler = old_act->_sa_handler;
9284 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9285 		act.sa_flags = old_act->sa_flags;
9286 		unlock_user_struct(old_act, arg2, 0);
9287 		pact = &act;
9288 	    } else {
9289 		pact = NULL;
9290 	    }
9291 
9292         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9293 
9294 	    if (!is_error(ret) && arg3) {
9295                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9296                     return -TARGET_EFAULT;
9297 		old_act->_sa_handler = oact._sa_handler;
9298 		old_act->sa_flags = oact.sa_flags;
9299 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9300 		old_act->sa_mask.sig[1] = 0;
9301 		old_act->sa_mask.sig[2] = 0;
9302 		old_act->sa_mask.sig[3] = 0;
9303 		unlock_user_struct(old_act, arg3, 1);
9304 	    }
9305 #else
9306             struct target_old_sigaction *old_act;
9307             struct target_sigaction act, oact, *pact;
9308             if (arg2) {
9309                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9310                     return -TARGET_EFAULT;
9311                 act._sa_handler = old_act->_sa_handler;
9312                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9313                 act.sa_flags = old_act->sa_flags;
9314 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9315                 act.sa_restorer = old_act->sa_restorer;
9316 #endif
9317                 unlock_user_struct(old_act, arg2, 0);
9318                 pact = &act;
9319             } else {
9320                 pact = NULL;
9321             }
9322             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9323             if (!is_error(ret) && arg3) {
9324                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9325                     return -TARGET_EFAULT;
9326                 old_act->_sa_handler = oact._sa_handler;
9327                 old_act->sa_mask = oact.sa_mask.sig[0];
9328                 old_act->sa_flags = oact.sa_flags;
9329 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9330                 old_act->sa_restorer = oact.sa_restorer;
9331 #endif
9332                 unlock_user_struct(old_act, arg3, 1);
9333             }
9334 #endif
9335         }
9336         return ret;
9337 #endif
9338     case TARGET_NR_rt_sigaction:
9339         {
9340             /*
9341              * For Alpha and SPARC this is a 5 argument syscall, with
9342              * a 'restorer' parameter which must be copied into the
9343              * sa_restorer field of the sigaction struct.
9344              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9345              * and arg5 is the sigsetsize.
9346              */
9347 #if defined(TARGET_ALPHA)
9348             target_ulong sigsetsize = arg4;
9349             target_ulong restorer = arg5;
9350 #elif defined(TARGET_SPARC)
9351             target_ulong restorer = arg4;
9352             target_ulong sigsetsize = arg5;
9353 #else
9354             target_ulong sigsetsize = arg4;
9355             target_ulong restorer = 0;
9356 #endif
9357             struct target_sigaction *act = NULL;
9358             struct target_sigaction *oact = NULL;
9359 
9360             if (sigsetsize != sizeof(target_sigset_t)) {
9361                 return -TARGET_EINVAL;
9362             }
9363             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9364                 return -TARGET_EFAULT;
9365             }
9366             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9367                 ret = -TARGET_EFAULT;
9368             } else {
9369                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9370                 if (oact) {
9371                     unlock_user_struct(oact, arg3, 1);
9372                 }
9373             }
9374             if (act) {
9375                 unlock_user_struct(act, arg2, 0);
9376             }
9377         }
9378         return ret;
9379 #ifdef TARGET_NR_sgetmask /* not on alpha */
9380     case TARGET_NR_sgetmask:
9381         {
9382             sigset_t cur_set;
9383             abi_ulong target_set;
9384             ret = do_sigprocmask(0, NULL, &cur_set);
9385             if (!ret) {
9386                 host_to_target_old_sigset(&target_set, &cur_set);
9387                 ret = target_set;
9388             }
9389         }
9390         return ret;
9391 #endif
9392 #ifdef TARGET_NR_ssetmask /* not on alpha */
9393     case TARGET_NR_ssetmask:
9394         {
9395             sigset_t set, oset;
9396             abi_ulong target_set = arg1;
9397             target_to_host_old_sigset(&set, &target_set);
9398             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9399             if (!ret) {
9400                 host_to_target_old_sigset(&target_set, &oset);
9401                 ret = target_set;
9402             }
9403         }
9404         return ret;
9405 #endif
9406 #ifdef TARGET_NR_sigprocmask
9407     case TARGET_NR_sigprocmask:
9408         {
9409 #if defined(TARGET_ALPHA)
9410             sigset_t set, oldset;
9411             abi_ulong mask;
9412             int how;
9413 
9414             switch (arg1) {
9415             case TARGET_SIG_BLOCK:
9416                 how = SIG_BLOCK;
9417                 break;
9418             case TARGET_SIG_UNBLOCK:
9419                 how = SIG_UNBLOCK;
9420                 break;
9421             case TARGET_SIG_SETMASK:
9422                 how = SIG_SETMASK;
9423                 break;
9424             default:
9425                 return -TARGET_EINVAL;
9426             }
9427             mask = arg2;
9428             target_to_host_old_sigset(&set, &mask);
9429 
9430             ret = do_sigprocmask(how, &set, &oldset);
9431             if (!is_error(ret)) {
9432                 host_to_target_old_sigset(&mask, &oldset);
9433                 ret = mask;
9434                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9435             }
9436 #else
9437             sigset_t set, oldset, *set_ptr;
9438             int how;
9439 
9440             if (arg2) {
9441                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9442                 if (!p) {
9443                     return -TARGET_EFAULT;
9444                 }
9445                 target_to_host_old_sigset(&set, p);
9446                 unlock_user(p, arg2, 0);
9447                 set_ptr = &set;
9448                 switch (arg1) {
9449                 case TARGET_SIG_BLOCK:
9450                     how = SIG_BLOCK;
9451                     break;
9452                 case TARGET_SIG_UNBLOCK:
9453                     how = SIG_UNBLOCK;
9454                     break;
9455                 case TARGET_SIG_SETMASK:
9456                     how = SIG_SETMASK;
9457                     break;
9458                 default:
9459                     return -TARGET_EINVAL;
9460                 }
9461             } else {
9462                 how = 0;
9463                 set_ptr = NULL;
9464             }
9465             ret = do_sigprocmask(how, set_ptr, &oldset);
9466             if (!is_error(ret) && arg3) {
9467                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9468                     return -TARGET_EFAULT;
9469                 host_to_target_old_sigset(p, &oldset);
9470                 unlock_user(p, arg3, sizeof(target_sigset_t));
9471             }
9472 #endif
9473         }
9474         return ret;
9475 #endif
9476     case TARGET_NR_rt_sigprocmask:
9477         {
9478             int how = arg1;
9479             sigset_t set, oldset, *set_ptr;
9480 
9481             if (arg4 != sizeof(target_sigset_t)) {
9482                 return -TARGET_EINVAL;
9483             }
9484 
9485             if (arg2) {
9486                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9487                 if (!p) {
9488                     return -TARGET_EFAULT;
9489                 }
9490                 target_to_host_sigset(&set, p);
9491                 unlock_user(p, arg2, 0);
9492                 set_ptr = &set;
9493                 switch(how) {
9494                 case TARGET_SIG_BLOCK:
9495                     how = SIG_BLOCK;
9496                     break;
9497                 case TARGET_SIG_UNBLOCK:
9498                     how = SIG_UNBLOCK;
9499                     break;
9500                 case TARGET_SIG_SETMASK:
9501                     how = SIG_SETMASK;
9502                     break;
9503                 default:
9504                     return -TARGET_EINVAL;
9505                 }
9506             } else {
9507                 how = 0;
9508                 set_ptr = NULL;
9509             }
9510             ret = do_sigprocmask(how, set_ptr, &oldset);
9511             if (!is_error(ret) && arg3) {
9512                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9513                     return -TARGET_EFAULT;
9514                 host_to_target_sigset(p, &oldset);
9515                 unlock_user(p, arg3, sizeof(target_sigset_t));
9516             }
9517         }
9518         return ret;
9519 #ifdef TARGET_NR_sigpending
9520     case TARGET_NR_sigpending:
9521         {
9522             sigset_t set;
9523             ret = get_errno(sigpending(&set));
9524             if (!is_error(ret)) {
9525                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9526                     return -TARGET_EFAULT;
9527                 host_to_target_old_sigset(p, &set);
9528                 unlock_user(p, arg1, sizeof(target_sigset_t));
9529             }
9530         }
9531         return ret;
9532 #endif
9533     case TARGET_NR_rt_sigpending:
9534         {
9535             sigset_t set;
9536 
9537             /* Yes, this check is >, not != like most. We follow the kernel's
9538              * logic and it does it like this because it implements
9539              * NR_sigpending through the same code path, and in that case
9540              * the old_sigset_t is smaller in size.
9541              */
9542             if (arg2 > sizeof(target_sigset_t)) {
9543                 return -TARGET_EINVAL;
9544             }
9545 
9546             ret = get_errno(sigpending(&set));
9547             if (!is_error(ret)) {
9548                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9549                     return -TARGET_EFAULT;
9550                 host_to_target_sigset(p, &set);
9551                 unlock_user(p, arg1, sizeof(target_sigset_t));
9552             }
9553         }
9554         return ret;
9555 #ifdef TARGET_NR_sigsuspend
9556     case TARGET_NR_sigsuspend:
9557         {
9558             TaskState *ts = cpu->opaque;
9559 #if defined(TARGET_ALPHA)
9560             abi_ulong mask = arg1;
9561             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9562 #else
9563             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9564                 return -TARGET_EFAULT;
9565             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9566             unlock_user(p, arg1, 0);
9567 #endif
9568             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9569                                                SIGSET_T_SIZE));
9570             if (ret != -QEMU_ERESTARTSYS) {
9571                 ts->in_sigsuspend = 1;
9572             }
9573         }
9574         return ret;
9575 #endif
9576     case TARGET_NR_rt_sigsuspend:
9577         {
9578             TaskState *ts = cpu->opaque;
9579 
9580             if (arg2 != sizeof(target_sigset_t)) {
9581                 return -TARGET_EINVAL;
9582             }
9583             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9584                 return -TARGET_EFAULT;
9585             target_to_host_sigset(&ts->sigsuspend_mask, p);
9586             unlock_user(p, arg1, 0);
9587             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9588                                                SIGSET_T_SIZE));
9589             if (ret != -QEMU_ERESTARTSYS) {
9590                 ts->in_sigsuspend = 1;
9591             }
9592         }
9593         return ret;
9594 #ifdef TARGET_NR_rt_sigtimedwait
9595     case TARGET_NR_rt_sigtimedwait:
9596         {
9597             sigset_t set;
9598             struct timespec uts, *puts;
9599             siginfo_t uinfo;
9600 
9601             if (arg4 != sizeof(target_sigset_t)) {
9602                 return -TARGET_EINVAL;
9603             }
9604 
9605             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9606                 return -TARGET_EFAULT;
9607             target_to_host_sigset(&set, p);
9608             unlock_user(p, arg1, 0);
9609             if (arg3) {
9610                 puts = &uts;
9611                 if (target_to_host_timespec(puts, arg3)) {
9612                     return -TARGET_EFAULT;
9613                 }
9614             } else {
9615                 puts = NULL;
9616             }
9617             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9618                                                  SIGSET_T_SIZE));
9619             if (!is_error(ret)) {
9620                 if (arg2) {
9621                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9622                                   0);
9623                     if (!p) {
9624                         return -TARGET_EFAULT;
9625                     }
9626                     host_to_target_siginfo(p, &uinfo);
9627                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9628                 }
9629                 ret = host_to_target_signal(ret);
9630             }
9631         }
9632         return ret;
9633 #endif
9634 #ifdef TARGET_NR_rt_sigtimedwait_time64
9635     case TARGET_NR_rt_sigtimedwait_time64:
9636         {
9637             sigset_t set;
9638             struct timespec uts, *puts;
9639             siginfo_t uinfo;
9640 
9641             if (arg4 != sizeof(target_sigset_t)) {
9642                 return -TARGET_EINVAL;
9643             }
9644 
9645             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9646             if (!p) {
9647                 return -TARGET_EFAULT;
9648             }
9649             target_to_host_sigset(&set, p);
9650             unlock_user(p, arg1, 0);
9651             if (arg3) {
9652                 puts = &uts;
9653                 if (target_to_host_timespec64(puts, arg3)) {
9654                     return -TARGET_EFAULT;
9655                 }
9656             } else {
9657                 puts = NULL;
9658             }
9659             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9660                                                  SIGSET_T_SIZE));
9661             if (!is_error(ret)) {
9662                 if (arg2) {
9663                     p = lock_user(VERIFY_WRITE, arg2,
9664                                   sizeof(target_siginfo_t), 0);
9665                     if (!p) {
9666                         return -TARGET_EFAULT;
9667                     }
9668                     host_to_target_siginfo(p, &uinfo);
9669                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9670                 }
9671                 ret = host_to_target_signal(ret);
9672             }
9673         }
9674         return ret;
9675 #endif
9676     case TARGET_NR_rt_sigqueueinfo:
9677         {
9678             siginfo_t uinfo;
9679 
9680             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9681             if (!p) {
9682                 return -TARGET_EFAULT;
9683             }
9684             target_to_host_siginfo(&uinfo, p);
9685             unlock_user(p, arg3, 0);
9686             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9687         }
9688         return ret;
9689     case TARGET_NR_rt_tgsigqueueinfo:
9690         {
9691             siginfo_t uinfo;
9692 
9693             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9694             if (!p) {
9695                 return -TARGET_EFAULT;
9696             }
9697             target_to_host_siginfo(&uinfo, p);
9698             unlock_user(p, arg4, 0);
9699             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9700         }
9701         return ret;
9702 #ifdef TARGET_NR_sigreturn
9703     case TARGET_NR_sigreturn:
9704         if (block_signals()) {
9705             return -QEMU_ERESTARTSYS;
9706         }
9707         return do_sigreturn(cpu_env);
9708 #endif
9709     case TARGET_NR_rt_sigreturn:
9710         if (block_signals()) {
9711             return -QEMU_ERESTARTSYS;
9712         }
9713         return do_rt_sigreturn(cpu_env);
9714     case TARGET_NR_sethostname:
9715         if (!(p = lock_user_string(arg1)))
9716             return -TARGET_EFAULT;
9717         ret = get_errno(sethostname(p, arg2));
9718         unlock_user(p, arg1, 0);
9719         return ret;
9720 #ifdef TARGET_NR_setrlimit
9721     case TARGET_NR_setrlimit:
9722         {
9723             int resource = target_to_host_resource(arg1);
9724             struct target_rlimit *target_rlim;
9725             struct rlimit rlim;
9726             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9727                 return -TARGET_EFAULT;
9728             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9729             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9730             unlock_user_struct(target_rlim, arg2, 0);
9731             /*
9732              * If we just passed through resource limit settings for memory then
9733              * they would also apply to QEMU's own allocations, and QEMU will
9734              * crash or hang or die if its allocations fail. Ideally we would
9735              * track the guest allocations in QEMU and apply the limits ourselves.
9736              * For now, just tell the guest the call succeeded but don't actually
9737              * limit anything.
9738              */
9739             if (resource != RLIMIT_AS &&
9740                 resource != RLIMIT_DATA &&
9741                 resource != RLIMIT_STACK) {
9742                 return get_errno(setrlimit(resource, &rlim));
9743             } else {
9744                 return 0;
9745             }
9746         }
9747 #endif
9748 #ifdef TARGET_NR_getrlimit
9749     case TARGET_NR_getrlimit:
9750         {
9751             int resource = target_to_host_resource(arg1);
9752             struct target_rlimit *target_rlim;
9753             struct rlimit rlim;
9754 
9755             ret = get_errno(getrlimit(resource, &rlim));
9756             if (!is_error(ret)) {
9757                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9758                     return -TARGET_EFAULT;
9759                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9760                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9761                 unlock_user_struct(target_rlim, arg2, 1);
9762             }
9763         }
9764         return ret;
9765 #endif
9766     case TARGET_NR_getrusage:
9767         {
9768             struct rusage rusage;
9769             ret = get_errno(getrusage(arg1, &rusage));
9770             if (!is_error(ret)) {
9771                 ret = host_to_target_rusage(arg2, &rusage);
9772             }
9773         }
9774         return ret;
9775 #if defined(TARGET_NR_gettimeofday)
9776     case TARGET_NR_gettimeofday:
9777         {
9778             struct timeval tv;
9779             struct timezone tz;
9780 
9781             ret = get_errno(gettimeofday(&tv, &tz));
9782             if (!is_error(ret)) {
9783                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9784                     return -TARGET_EFAULT;
9785                 }
9786                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9787                     return -TARGET_EFAULT;
9788                 }
9789             }
9790         }
9791         return ret;
9792 #endif
9793 #if defined(TARGET_NR_settimeofday)
9794     case TARGET_NR_settimeofday:
9795         {
9796             struct timeval tv, *ptv = NULL;
9797             struct timezone tz, *ptz = NULL;
9798 
9799             if (arg1) {
9800                 if (copy_from_user_timeval(&tv, arg1)) {
9801                     return -TARGET_EFAULT;
9802                 }
9803                 ptv = &tv;
9804             }
9805 
9806             if (arg2) {
9807                 if (copy_from_user_timezone(&tz, arg2)) {
9808                     return -TARGET_EFAULT;
9809                 }
9810                 ptz = &tz;
9811             }
9812 
9813             return get_errno(settimeofday(ptv, ptz));
9814         }
9815 #endif
9816 #if defined(TARGET_NR_select)
9817     case TARGET_NR_select:
9818 #if defined(TARGET_WANT_NI_OLD_SELECT)
9819         /* some architectures used to have old_select here
9820          * but now ENOSYS it.
9821          */
9822         ret = -TARGET_ENOSYS;
9823 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9824         ret = do_old_select(arg1);
9825 #else
9826         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9827 #endif
9828         return ret;
9829 #endif
9830 #ifdef TARGET_NR_pselect6
9831     case TARGET_NR_pselect6:
9832         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9833 #endif
9834 #ifdef TARGET_NR_pselect6_time64
9835     case TARGET_NR_pselect6_time64:
9836         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9837 #endif
9838 #ifdef TARGET_NR_symlink
9839     case TARGET_NR_symlink:
9840         {
9841             void *p2;
9842             p = lock_user_string(arg1);
9843             p2 = lock_user_string(arg2);
9844             if (!p || !p2)
9845                 ret = -TARGET_EFAULT;
9846             else
9847                 ret = get_errno(symlink(p, p2));
9848             unlock_user(p2, arg2, 0);
9849             unlock_user(p, arg1, 0);
9850         }
9851         return ret;
9852 #endif
9853 #if defined(TARGET_NR_symlinkat)
9854     case TARGET_NR_symlinkat:
9855         {
9856             void *p2;
9857             p  = lock_user_string(arg1);
9858             p2 = lock_user_string(arg3);
9859             if (!p || !p2)
9860                 ret = -TARGET_EFAULT;
9861             else
9862                 ret = get_errno(symlinkat(p, arg2, p2));
9863             unlock_user(p2, arg3, 0);
9864             unlock_user(p, arg1, 0);
9865         }
9866         return ret;
9867 #endif
9868 #ifdef TARGET_NR_readlink
9869     case TARGET_NR_readlink:
9870         {
9871             void *p2;
9872             p = lock_user_string(arg1);
9873             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9874             if (!p || !p2) {
9875                 ret = -TARGET_EFAULT;
9876             } else if (!arg3) {
9877                 /* Short circuit this for the magic exe check. */
9878                 ret = -TARGET_EINVAL;
9879             } else if (is_proc_myself((const char *)p, "exe")) {
9880                 char real[PATH_MAX], *temp;
9881                 temp = realpath(exec_path, real);
9882                 /* Return value is # of bytes that we wrote to the buffer. */
9883                 if (temp == NULL) {
9884                     ret = get_errno(-1);
9885                 } else {
9886                     /* Don't worry about sign mismatch as earlier mapping
9887                      * logic would have thrown a bad address error. */
9888                     ret = MIN(strlen(real), arg3);
9889                     /* We cannot NUL terminate the string. */
9890                     memcpy(p2, real, ret);
9891                 }
9892             } else {
9893                 ret = get_errno(readlink(path(p), p2, arg3));
9894             }
9895             unlock_user(p2, arg2, ret);
9896             unlock_user(p, arg1, 0);
9897         }
9898         return ret;
9899 #endif
9900 #if defined(TARGET_NR_readlinkat)
9901     case TARGET_NR_readlinkat:
9902         {
9903             void *p2;
9904             p  = lock_user_string(arg2);
9905             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9906             if (!p || !p2) {
9907                 ret = -TARGET_EFAULT;
9908             } else if (is_proc_myself((const char *)p, "exe")) {
9909                 char real[PATH_MAX], *temp;
9910                 temp = realpath(exec_path, real);
9911                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9912                 snprintf((char *)p2, arg4, "%s", real);
9913             } else {
9914                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9915             }
9916             unlock_user(p2, arg3, ret);
9917             unlock_user(p, arg2, 0);
9918         }
9919         return ret;
9920 #endif
9921 #ifdef TARGET_NR_swapon
9922     case TARGET_NR_swapon:
9923         if (!(p = lock_user_string(arg1)))
9924             return -TARGET_EFAULT;
9925         ret = get_errno(swapon(p, arg2));
9926         unlock_user(p, arg1, 0);
9927         return ret;
9928 #endif
9929     case TARGET_NR_reboot:
9930         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9931            /* arg4 must be ignored in all other cases */
9932            p = lock_user_string(arg4);
9933            if (!p) {
9934                return -TARGET_EFAULT;
9935            }
9936            ret = get_errno(reboot(arg1, arg2, arg3, p));
9937            unlock_user(p, arg4, 0);
9938         } else {
9939            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9940         }
9941         return ret;
9942 #ifdef TARGET_NR_mmap
9943     case TARGET_NR_mmap:
9944 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9945     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9946     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9947     || defined(TARGET_S390X)
9948         {
9949             abi_ulong *v;
9950             abi_ulong v1, v2, v3, v4, v5, v6;
9951             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9952                 return -TARGET_EFAULT;
9953             v1 = tswapal(v[0]);
9954             v2 = tswapal(v[1]);
9955             v3 = tswapal(v[2]);
9956             v4 = tswapal(v[3]);
9957             v5 = tswapal(v[4]);
9958             v6 = tswapal(v[5]);
9959             unlock_user(v, arg1, 0);
9960             ret = get_errno(target_mmap(v1, v2, v3,
9961                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9962                                         v5, v6));
9963         }
9964 #else
9965         /* mmap pointers are always untagged */
9966         ret = get_errno(target_mmap(arg1, arg2, arg3,
9967                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9968                                     arg5,
9969                                     arg6));
9970 #endif
9971         return ret;
9972 #endif
9973 #ifdef TARGET_NR_mmap2
9974     case TARGET_NR_mmap2:
9975 #ifndef MMAP_SHIFT
9976 #define MMAP_SHIFT 12
9977 #endif
9978         ret = target_mmap(arg1, arg2, arg3,
9979                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9980                           arg5, arg6 << MMAP_SHIFT);
9981         return get_errno(ret);
9982 #endif
9983     case TARGET_NR_munmap:
9984         arg1 = cpu_untagged_addr(cpu, arg1);
9985         return get_errno(target_munmap(arg1, arg2));
9986     case TARGET_NR_mprotect:
9987         arg1 = cpu_untagged_addr(cpu, arg1);
9988         {
9989             TaskState *ts = cpu->opaque;
9990             /* Special hack to detect libc making the stack executable.  */
9991             if ((arg3 & PROT_GROWSDOWN)
9992                 && arg1 >= ts->info->stack_limit
9993                 && arg1 <= ts->info->start_stack) {
9994                 arg3 &= ~PROT_GROWSDOWN;
9995                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9996                 arg1 = ts->info->stack_limit;
9997             }
9998         }
9999         return get_errno(target_mprotect(arg1, arg2, arg3));
10000 #ifdef TARGET_NR_mremap
10001     case TARGET_NR_mremap:
10002         arg1 = cpu_untagged_addr(cpu, arg1);
10003         /* mremap new_addr (arg5) is always untagged */
10004         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10005 #endif
10006         /* ??? msync/mlock/munlock are broken for softmmu.  */
10007 #ifdef TARGET_NR_msync
10008     case TARGET_NR_msync:
10009         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10010 #endif
10011 #ifdef TARGET_NR_mlock
10012     case TARGET_NR_mlock:
10013         return get_errno(mlock(g2h(cpu, arg1), arg2));
10014 #endif
10015 #ifdef TARGET_NR_munlock
10016     case TARGET_NR_munlock:
10017         return get_errno(munlock(g2h(cpu, arg1), arg2));
10018 #endif
10019 #ifdef TARGET_NR_mlockall
10020     case TARGET_NR_mlockall:
10021         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10022 #endif
10023 #ifdef TARGET_NR_munlockall
10024     case TARGET_NR_munlockall:
10025         return get_errno(munlockall());
10026 #endif
10027 #ifdef TARGET_NR_truncate
10028     case TARGET_NR_truncate:
10029         if (!(p = lock_user_string(arg1)))
10030             return -TARGET_EFAULT;
10031         ret = get_errno(truncate(p, arg2));
10032         unlock_user(p, arg1, 0);
10033         return ret;
10034 #endif
10035 #ifdef TARGET_NR_ftruncate
10036     case TARGET_NR_ftruncate:
10037         return get_errno(ftruncate(arg1, arg2));
10038 #endif
10039     case TARGET_NR_fchmod:
10040         return get_errno(fchmod(arg1, arg2));
10041 #if defined(TARGET_NR_fchmodat)
10042     case TARGET_NR_fchmodat:
10043         if (!(p = lock_user_string(arg2)))
10044             return -TARGET_EFAULT;
10045         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10046         unlock_user(p, arg2, 0);
10047         return ret;
10048 #endif
10049     case TARGET_NR_getpriority:
10050         /* Note that negative values are valid for getpriority, so we must
10051            differentiate based on errno settings.  */
10052         errno = 0;
10053         ret = getpriority(arg1, arg2);
10054         if (ret == -1 && errno != 0) {
10055             return -host_to_target_errno(errno);
10056         }
10057 #ifdef TARGET_ALPHA
10058         /* Return value is the unbiased priority.  Signal no error.  */
10059         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
10060 #else
10061         /* Return value is a biased priority to avoid negative numbers.  */
10062         ret = 20 - ret;
10063 #endif
10064         return ret;
10065     case TARGET_NR_setpriority:
10066         return get_errno(setpriority(arg1, arg2, arg3));
10067 #ifdef TARGET_NR_statfs
10068     case TARGET_NR_statfs:
10069         if (!(p = lock_user_string(arg1))) {
10070             return -TARGET_EFAULT;
10071         }
10072         ret = get_errno(statfs(path(p), &stfs));
10073         unlock_user(p, arg1, 0);
10074     convert_statfs:
10075         if (!is_error(ret)) {
10076             struct target_statfs *target_stfs;
10077 
10078             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10079                 return -TARGET_EFAULT;
10080             __put_user(stfs.f_type, &target_stfs->f_type);
10081             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10082             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10083             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10084             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10085             __put_user(stfs.f_files, &target_stfs->f_files);
10086             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10087             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10088             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10089             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10090             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10091 #ifdef _STATFS_F_FLAGS
10092             __put_user(stfs.f_flags, &target_stfs->f_flags);
10093 #else
10094             __put_user(0, &target_stfs->f_flags);
10095 #endif
10096             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10097             unlock_user_struct(target_stfs, arg2, 1);
10098         }
10099         return ret;
10100 #endif
10101 #ifdef TARGET_NR_fstatfs
10102     case TARGET_NR_fstatfs:
10103         ret = get_errno(fstatfs(arg1, &stfs));
10104         goto convert_statfs;
10105 #endif
10106 #ifdef TARGET_NR_statfs64
10107     case TARGET_NR_statfs64:
10108         if (!(p = lock_user_string(arg1))) {
10109             return -TARGET_EFAULT;
10110         }
10111         ret = get_errno(statfs(path(p), &stfs));
10112         unlock_user(p, arg1, 0);
10113     convert_statfs64:
10114         if (!is_error(ret)) {
10115             struct target_statfs64 *target_stfs;
10116 
10117             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10118                 return -TARGET_EFAULT;
10119             __put_user(stfs.f_type, &target_stfs->f_type);
10120             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10121             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10122             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10123             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10124             __put_user(stfs.f_files, &target_stfs->f_files);
10125             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10126             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10127             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10128             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10129             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10130 #ifdef _STATFS_F_FLAGS
10131             __put_user(stfs.f_flags, &target_stfs->f_flags);
10132 #else
10133             __put_user(0, &target_stfs->f_flags);
10134 #endif
10135             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10136             unlock_user_struct(target_stfs, arg3, 1);
10137         }
10138         return ret;
10139     case TARGET_NR_fstatfs64:
10140         ret = get_errno(fstatfs(arg1, &stfs));
10141         goto convert_statfs64;
10142 #endif
10143 #ifdef TARGET_NR_socketcall
10144     case TARGET_NR_socketcall:
10145         return do_socketcall(arg1, arg2);
10146 #endif
10147 #ifdef TARGET_NR_accept
10148     case TARGET_NR_accept:
10149         return do_accept4(arg1, arg2, arg3, 0);
10150 #endif
10151 #ifdef TARGET_NR_accept4
10152     case TARGET_NR_accept4:
10153         return do_accept4(arg1, arg2, arg3, arg4);
10154 #endif
10155 #ifdef TARGET_NR_bind
10156     case TARGET_NR_bind:
10157         return do_bind(arg1, arg2, arg3);
10158 #endif
10159 #ifdef TARGET_NR_connect
10160     case TARGET_NR_connect:
10161         return do_connect(arg1, arg2, arg3);
10162 #endif
10163 #ifdef TARGET_NR_getpeername
10164     case TARGET_NR_getpeername:
10165         return do_getpeername(arg1, arg2, arg3);
10166 #endif
10167 #ifdef TARGET_NR_getsockname
10168     case TARGET_NR_getsockname:
10169         return do_getsockname(arg1, arg2, arg3);
10170 #endif
10171 #ifdef TARGET_NR_getsockopt
10172     case TARGET_NR_getsockopt:
10173         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10174 #endif
10175 #ifdef TARGET_NR_listen
10176     case TARGET_NR_listen:
10177         return get_errno(listen(arg1, arg2));
10178 #endif
10179 #ifdef TARGET_NR_recv
10180     case TARGET_NR_recv:
10181         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10182 #endif
10183 #ifdef TARGET_NR_recvfrom
10184     case TARGET_NR_recvfrom:
10185         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10186 #endif
10187 #ifdef TARGET_NR_recvmsg
10188     case TARGET_NR_recvmsg:
10189         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10190 #endif
10191 #ifdef TARGET_NR_send
10192     case TARGET_NR_send:
10193         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10194 #endif
10195 #ifdef TARGET_NR_sendmsg
10196     case TARGET_NR_sendmsg:
10197         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10198 #endif
10199 #ifdef TARGET_NR_sendmmsg
10200     case TARGET_NR_sendmmsg:
10201         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10202 #endif
10203 #ifdef TARGET_NR_recvmmsg
10204     case TARGET_NR_recvmmsg:
10205         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10206 #endif
10207 #ifdef TARGET_NR_sendto
10208     case TARGET_NR_sendto:
10209         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10210 #endif
10211 #ifdef TARGET_NR_shutdown
10212     case TARGET_NR_shutdown:
10213         return get_errno(shutdown(arg1, arg2));
10214 #endif
10215 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10216     case TARGET_NR_getrandom:
10217         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10218         if (!p) {
10219             return -TARGET_EFAULT;
10220         }
10221         ret = get_errno(getrandom(p, arg2, arg3));
10222         unlock_user(p, arg1, ret);
10223         return ret;
10224 #endif
10225 #ifdef TARGET_NR_socket
10226     case TARGET_NR_socket:
10227         return do_socket(arg1, arg2, arg3);
10228 #endif
10229 #ifdef TARGET_NR_socketpair
10230     case TARGET_NR_socketpair:
10231         return do_socketpair(arg1, arg2, arg3, arg4);
10232 #endif
10233 #ifdef TARGET_NR_setsockopt
10234     case TARGET_NR_setsockopt:
10235         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10236 #endif
10237 #if defined(TARGET_NR_syslog)
10238     case TARGET_NR_syslog:
10239         {
10240             int len = arg2;
10241 
10242             switch (arg1) {
10243             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10244             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10245             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10246             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10247             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10248             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10249             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10250             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10251                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10252             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10253             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10254             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10255                 {
10256                     if (len < 0) {
10257                         return -TARGET_EINVAL;
10258                     }
10259                     if (len == 0) {
10260                         return 0;
10261                     }
10262                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10263                     if (!p) {
10264                         return -TARGET_EFAULT;
10265                     }
10266                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10267                     unlock_user(p, arg2, arg3);
10268                 }
10269                 return ret;
10270             default:
10271                 return -TARGET_EINVAL;
10272             }
10273         }
10274         break;
10275 #endif
10276     case TARGET_NR_setitimer:
10277         {
10278             struct itimerval value, ovalue, *pvalue;
10279 
10280             if (arg2) {
10281                 pvalue = &value;
10282                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10283                     || copy_from_user_timeval(&pvalue->it_value,
10284                                               arg2 + sizeof(struct target_timeval)))
10285                     return -TARGET_EFAULT;
10286             } else {
10287                 pvalue = NULL;
10288             }
10289             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10290             if (!is_error(ret) && arg3) {
10291                 if (copy_to_user_timeval(arg3,
10292                                          &ovalue.it_interval)
10293                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10294                                             &ovalue.it_value))
10295                     return -TARGET_EFAULT;
10296             }
10297         }
10298         return ret;
10299     case TARGET_NR_getitimer:
10300         {
10301             struct itimerval value;
10302 
10303             ret = get_errno(getitimer(arg1, &value));
10304             if (!is_error(ret) && arg2) {
10305                 if (copy_to_user_timeval(arg2,
10306                                          &value.it_interval)
10307                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10308                                             &value.it_value))
10309                     return -TARGET_EFAULT;
10310             }
10311         }
10312         return ret;
10313 #ifdef TARGET_NR_stat
10314     case TARGET_NR_stat:
10315         if (!(p = lock_user_string(arg1))) {
10316             return -TARGET_EFAULT;
10317         }
10318         ret = get_errno(stat(path(p), &st));
10319         unlock_user(p, arg1, 0);
10320         goto do_stat;
10321 #endif
10322 #ifdef TARGET_NR_lstat
10323     case TARGET_NR_lstat:
10324         if (!(p = lock_user_string(arg1))) {
10325             return -TARGET_EFAULT;
10326         }
10327         ret = get_errno(lstat(path(p), &st));
10328         unlock_user(p, arg1, 0);
10329         goto do_stat;
10330 #endif
10331 #ifdef TARGET_NR_fstat
10332     case TARGET_NR_fstat:
10333         {
10334             ret = get_errno(fstat(arg1, &st));
10335 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10336         do_stat:
10337 #endif
10338             if (!is_error(ret)) {
10339                 struct target_stat *target_st;
10340 
10341                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10342                     return -TARGET_EFAULT;
10343                 memset(target_st, 0, sizeof(*target_st));
10344                 __put_user(st.st_dev, &target_st->st_dev);
10345                 __put_user(st.st_ino, &target_st->st_ino);
10346                 __put_user(st.st_mode, &target_st->st_mode);
10347                 __put_user(st.st_uid, &target_st->st_uid);
10348                 __put_user(st.st_gid, &target_st->st_gid);
10349                 __put_user(st.st_nlink, &target_st->st_nlink);
10350                 __put_user(st.st_rdev, &target_st->st_rdev);
10351                 __put_user(st.st_size, &target_st->st_size);
10352                 __put_user(st.st_blksize, &target_st->st_blksize);
10353                 __put_user(st.st_blocks, &target_st->st_blocks);
10354                 __put_user(st.st_atime, &target_st->target_st_atime);
10355                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10356                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10357 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10358                 __put_user(st.st_atim.tv_nsec,
10359                            &target_st->target_st_atime_nsec);
10360                 __put_user(st.st_mtim.tv_nsec,
10361                            &target_st->target_st_mtime_nsec);
10362                 __put_user(st.st_ctim.tv_nsec,
10363                            &target_st->target_st_ctime_nsec);
10364 #endif
10365                 unlock_user_struct(target_st, arg2, 1);
10366             }
10367         }
10368         return ret;
10369 #endif
10370     case TARGET_NR_vhangup:
10371         return get_errno(vhangup());
10372 #ifdef TARGET_NR_syscall
10373     case TARGET_NR_syscall:
10374         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10375                           arg6, arg7, arg8, 0);
10376 #endif
10377 #if defined(TARGET_NR_wait4)
10378     case TARGET_NR_wait4:
10379         {
10380             int status;
10381             abi_long status_ptr = arg2;
10382             struct rusage rusage, *rusage_ptr;
10383             abi_ulong target_rusage = arg4;
10384             abi_long rusage_err;
10385             if (target_rusage)
10386                 rusage_ptr = &rusage;
10387             else
10388                 rusage_ptr = NULL;
10389             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10390             if (!is_error(ret)) {
10391                 if (status_ptr && ret) {
10392                     status = host_to_target_waitstatus(status);
10393                     if (put_user_s32(status, status_ptr))
10394                         return -TARGET_EFAULT;
10395                 }
10396                 if (target_rusage) {
10397                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10398                     if (rusage_err) {
10399                         ret = rusage_err;
10400                     }
10401                 }
10402             }
10403         }
10404         return ret;
10405 #endif
10406 #ifdef TARGET_NR_swapoff
10407     case TARGET_NR_swapoff:
10408         if (!(p = lock_user_string(arg1)))
10409             return -TARGET_EFAULT;
10410         ret = get_errno(swapoff(p));
10411         unlock_user(p, arg1, 0);
10412         return ret;
10413 #endif
10414     case TARGET_NR_sysinfo:
10415         {
10416             struct target_sysinfo *target_value;
10417             struct sysinfo value;
10418             ret = get_errno(sysinfo(&value));
10419             if (!is_error(ret) && arg1)
10420             {
10421                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10422                     return -TARGET_EFAULT;
10423                 __put_user(value.uptime, &target_value->uptime);
10424                 __put_user(value.loads[0], &target_value->loads[0]);
10425                 __put_user(value.loads[1], &target_value->loads[1]);
10426                 __put_user(value.loads[2], &target_value->loads[2]);
10427                 __put_user(value.totalram, &target_value->totalram);
10428                 __put_user(value.freeram, &target_value->freeram);
10429                 __put_user(value.sharedram, &target_value->sharedram);
10430                 __put_user(value.bufferram, &target_value->bufferram);
10431                 __put_user(value.totalswap, &target_value->totalswap);
10432                 __put_user(value.freeswap, &target_value->freeswap);
10433                 __put_user(value.procs, &target_value->procs);
10434                 __put_user(value.totalhigh, &target_value->totalhigh);
10435                 __put_user(value.freehigh, &target_value->freehigh);
10436                 __put_user(value.mem_unit, &target_value->mem_unit);
10437                 unlock_user_struct(target_value, arg1, 1);
10438             }
10439         }
10440         return ret;
10441 #ifdef TARGET_NR_ipc
10442     case TARGET_NR_ipc:
10443         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10444 #endif
10445 #ifdef TARGET_NR_semget
10446     case TARGET_NR_semget:
10447         return get_errno(semget(arg1, arg2, arg3));
10448 #endif
10449 #ifdef TARGET_NR_semop
10450     case TARGET_NR_semop:
10451         return do_semtimedop(arg1, arg2, arg3, 0, false);
10452 #endif
10453 #ifdef TARGET_NR_semtimedop
10454     case TARGET_NR_semtimedop:
10455         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10456 #endif
10457 #ifdef TARGET_NR_semtimedop_time64
10458     case TARGET_NR_semtimedop_time64:
10459         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10460 #endif
10461 #ifdef TARGET_NR_semctl
10462     case TARGET_NR_semctl:
10463         return do_semctl(arg1, arg2, arg3, arg4);
10464 #endif
10465 #ifdef TARGET_NR_msgctl
10466     case TARGET_NR_msgctl:
10467         return do_msgctl(arg1, arg2, arg3);
10468 #endif
10469 #ifdef TARGET_NR_msgget
10470     case TARGET_NR_msgget:
10471         return get_errno(msgget(arg1, arg2));
10472 #endif
10473 #ifdef TARGET_NR_msgrcv
10474     case TARGET_NR_msgrcv:
10475         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10476 #endif
10477 #ifdef TARGET_NR_msgsnd
10478     case TARGET_NR_msgsnd:
10479         return do_msgsnd(arg1, arg2, arg3, arg4);
10480 #endif
10481 #ifdef TARGET_NR_shmget
10482     case TARGET_NR_shmget:
10483         return get_errno(shmget(arg1, arg2, arg3));
10484 #endif
10485 #ifdef TARGET_NR_shmctl
10486     case TARGET_NR_shmctl:
10487         return do_shmctl(arg1, arg2, arg3);
10488 #endif
10489 #ifdef TARGET_NR_shmat
10490     case TARGET_NR_shmat:
10491         return do_shmat(cpu_env, arg1, arg2, arg3);
10492 #endif
10493 #ifdef TARGET_NR_shmdt
10494     case TARGET_NR_shmdt:
10495         return do_shmdt(arg1);
10496 #endif
10497     case TARGET_NR_fsync:
10498         return get_errno(fsync(arg1));
10499     case TARGET_NR_clone:
10500         /* Linux manages to have three different orderings for its
10501          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10502          * match the kernel's CONFIG_CLONE_* settings.
10503          * Microblaze is further special in that it uses a sixth
10504          * implicit argument to clone for the TLS pointer.
10505          */
10506 #if defined(TARGET_MICROBLAZE)
10507         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10508 #elif defined(TARGET_CLONE_BACKWARDS)
10509         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10510 #elif defined(TARGET_CLONE_BACKWARDS2)
10511         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10512 #else
10513         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10514 #endif
10515         return ret;
10516 #ifdef __NR_exit_group
10517         /* new thread calls */
10518     case TARGET_NR_exit_group:
10519         preexit_cleanup(cpu_env, arg1);
10520         return get_errno(exit_group(arg1));
10521 #endif
10522     case TARGET_NR_setdomainname:
10523         if (!(p = lock_user_string(arg1)))
10524             return -TARGET_EFAULT;
10525         ret = get_errno(setdomainname(p, arg2));
10526         unlock_user(p, arg1, 0);
10527         return ret;
10528     case TARGET_NR_uname:
10529         /* no need to transcode because we use the linux syscall */
10530         {
10531             struct new_utsname * buf;
10532 
10533             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10534                 return -TARGET_EFAULT;
10535             ret = get_errno(sys_uname(buf));
10536             if (!is_error(ret)) {
10537                 /* Overwrite the native machine name with whatever is being
10538                    emulated. */
10539                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10540                           sizeof(buf->machine));
10541                 /* Allow the user to override the reported release.  */
10542                 if (qemu_uname_release && *qemu_uname_release) {
10543                     g_strlcpy(buf->release, qemu_uname_release,
10544                               sizeof(buf->release));
10545                 }
10546             }
10547             unlock_user_struct(buf, arg1, 1);
10548         }
10549         return ret;
10550 #ifdef TARGET_I386
10551     case TARGET_NR_modify_ldt:
10552         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10553 #if !defined(TARGET_X86_64)
10554     case TARGET_NR_vm86:
10555         return do_vm86(cpu_env, arg1, arg2);
10556 #endif
10557 #endif
10558 #if defined(TARGET_NR_adjtimex)
10559     case TARGET_NR_adjtimex:
10560         {
10561             struct timex host_buf;
10562 
10563             if (target_to_host_timex(&host_buf, arg1) != 0) {
10564                 return -TARGET_EFAULT;
10565             }
10566             ret = get_errno(adjtimex(&host_buf));
10567             if (!is_error(ret)) {
10568                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10569                     return -TARGET_EFAULT;
10570                 }
10571             }
10572         }
10573         return ret;
10574 #endif
10575 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10576     case TARGET_NR_clock_adjtime:
10577         {
10578             struct timex htx, *phtx = &htx;
10579 
10580             if (target_to_host_timex(phtx, arg2) != 0) {
10581                 return -TARGET_EFAULT;
10582             }
10583             ret = get_errno(clock_adjtime(arg1, phtx));
10584             if (!is_error(ret) && phtx) {
10585                 if (host_to_target_timex(arg2, phtx) != 0) {
10586                     return -TARGET_EFAULT;
10587                 }
10588             }
10589         }
10590         return ret;
10591 #endif
10592 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10593     case TARGET_NR_clock_adjtime64:
10594         {
10595             struct timex htx;
10596 
10597             if (target_to_host_timex64(&htx, arg2) != 0) {
10598                 return -TARGET_EFAULT;
10599             }
10600             ret = get_errno(clock_adjtime(arg1, &htx));
10601             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10602                     return -TARGET_EFAULT;
10603             }
10604         }
10605         return ret;
10606 #endif
10607     case TARGET_NR_getpgid:
10608         return get_errno(getpgid(arg1));
10609     case TARGET_NR_fchdir:
10610         return get_errno(fchdir(arg1));
10611     case TARGET_NR_personality:
10612         return get_errno(personality(arg1));
10613 #ifdef TARGET_NR__llseek /* Not on alpha */
10614     case TARGET_NR__llseek:
10615         {
10616             int64_t res;
10617 #if !defined(__NR_llseek)
10618             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10619             if (res == -1) {
10620                 ret = get_errno(res);
10621             } else {
10622                 ret = 0;
10623             }
10624 #else
10625             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10626 #endif
10627             if ((ret == 0) && put_user_s64(res, arg4)) {
10628                 return -TARGET_EFAULT;
10629             }
10630         }
10631         return ret;
10632 #endif
10633 #ifdef TARGET_NR_getdents
10634     case TARGET_NR_getdents:
10635         return do_getdents(arg1, arg2, arg3);
10636 #endif /* TARGET_NR_getdents */
10637 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10638     case TARGET_NR_getdents64:
10639         return do_getdents64(arg1, arg2, arg3);
10640 #endif /* TARGET_NR_getdents64 */
10641 #if defined(TARGET_NR__newselect)
10642     case TARGET_NR__newselect:
10643         return do_select(arg1, arg2, arg3, arg4, arg5);
10644 #endif
10645 #ifdef TARGET_NR_poll
10646     case TARGET_NR_poll:
10647         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10648 #endif
10649 #ifdef TARGET_NR_ppoll
10650     case TARGET_NR_ppoll:
10651         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10652 #endif
10653 #ifdef TARGET_NR_ppoll_time64
10654     case TARGET_NR_ppoll_time64:
10655         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10656 #endif
10657     case TARGET_NR_flock:
10658         /* NOTE: the flock constant seems to be the same for every
10659            Linux platform */
10660         return get_errno(safe_flock(arg1, arg2));
10661     case TARGET_NR_readv:
10662         {
10663             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10664             if (vec != NULL) {
10665                 ret = get_errno(safe_readv(arg1, vec, arg3));
10666                 unlock_iovec(vec, arg2, arg3, 1);
10667             } else {
10668                 ret = -host_to_target_errno(errno);
10669             }
10670         }
10671         return ret;
10672     case TARGET_NR_writev:
10673         {
10674             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10675             if (vec != NULL) {
10676                 ret = get_errno(safe_writev(arg1, vec, arg3));
10677                 unlock_iovec(vec, arg2, arg3, 0);
10678             } else {
10679                 ret = -host_to_target_errno(errno);
10680             }
10681         }
10682         return ret;
10683 #if defined(TARGET_NR_preadv)
10684     case TARGET_NR_preadv:
10685         {
10686             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10687             if (vec != NULL) {
10688                 unsigned long low, high;
10689 
10690                 target_to_host_low_high(arg4, arg5, &low, &high);
10691                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10692                 unlock_iovec(vec, arg2, arg3, 1);
10693             } else {
10694                 ret = -host_to_target_errno(errno);
10695            }
10696         }
10697         return ret;
10698 #endif
10699 #if defined(TARGET_NR_pwritev)
10700     case TARGET_NR_pwritev:
10701         {
10702             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10703             if (vec != NULL) {
10704                 unsigned long low, high;
10705 
10706                 target_to_host_low_high(arg4, arg5, &low, &high);
10707                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10708                 unlock_iovec(vec, arg2, arg3, 0);
10709             } else {
10710                 ret = -host_to_target_errno(errno);
10711            }
10712         }
10713         return ret;
10714 #endif
10715     case TARGET_NR_getsid:
10716         return get_errno(getsid(arg1));
10717 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10718     case TARGET_NR_fdatasync:
10719         return get_errno(fdatasync(arg1));
10720 #endif
10721     case TARGET_NR_sched_getaffinity:
10722         {
10723             unsigned int mask_size;
10724             unsigned long *mask;
10725 
10726             /*
10727              * sched_getaffinity needs multiples of ulong, so need to take
10728              * care of mismatches between target ulong and host ulong sizes.
10729              */
10730             if (arg2 & (sizeof(abi_ulong) - 1)) {
10731                 return -TARGET_EINVAL;
10732             }
10733             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10734 
10735             mask = alloca(mask_size);
10736             memset(mask, 0, mask_size);
10737             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10738 
10739             if (!is_error(ret)) {
10740                 if (ret > arg2) {
10741                     /* More data returned than the caller's buffer will fit.
10742                      * This only happens if sizeof(abi_long) < sizeof(long)
10743                      * and the caller passed us a buffer holding an odd number
10744                      * of abi_longs. If the host kernel is actually using the
10745                      * extra 4 bytes then fail EINVAL; otherwise we can just
10746                      * ignore them and only copy the interesting part.
10747                      */
10748                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10749                     if (numcpus > arg2 * 8) {
10750                         return -TARGET_EINVAL;
10751                     }
10752                     ret = arg2;
10753                 }
10754 
10755                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10756                     return -TARGET_EFAULT;
10757                 }
10758             }
10759         }
10760         return ret;
10761     case TARGET_NR_sched_setaffinity:
10762         {
10763             unsigned int mask_size;
10764             unsigned long *mask;
10765 
10766             /*
10767              * sched_setaffinity needs multiples of ulong, so need to take
10768              * care of mismatches between target ulong and host ulong sizes.
10769              */
10770             if (arg2 & (sizeof(abi_ulong) - 1)) {
10771                 return -TARGET_EINVAL;
10772             }
10773             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10774             mask = alloca(mask_size);
10775 
10776             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10777             if (ret) {
10778                 return ret;
10779             }
10780 
10781             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10782         }
10783     case TARGET_NR_getcpu:
10784         {
10785             unsigned cpu, node;
10786             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10787                                        arg2 ? &node : NULL,
10788                                        NULL));
10789             if (is_error(ret)) {
10790                 return ret;
10791             }
10792             if (arg1 && put_user_u32(cpu, arg1)) {
10793                 return -TARGET_EFAULT;
10794             }
10795             if (arg2 && put_user_u32(node, arg2)) {
10796                 return -TARGET_EFAULT;
10797             }
10798         }
10799         return ret;
10800     case TARGET_NR_sched_setparam:
10801         {
10802             struct target_sched_param *target_schp;
10803             struct sched_param schp;
10804 
10805             if (arg2 == 0) {
10806                 return -TARGET_EINVAL;
10807             }
10808             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10809                 return -TARGET_EFAULT;
10810             }
10811             schp.sched_priority = tswap32(target_schp->sched_priority);
10812             unlock_user_struct(target_schp, arg2, 0);
10813             return get_errno(sys_sched_setparam(arg1, &schp));
10814         }
10815     case TARGET_NR_sched_getparam:
10816         {
10817             struct target_sched_param *target_schp;
10818             struct sched_param schp;
10819 
10820             if (arg2 == 0) {
10821                 return -TARGET_EINVAL;
10822             }
10823             ret = get_errno(sys_sched_getparam(arg1, &schp));
10824             if (!is_error(ret)) {
10825                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10826                     return -TARGET_EFAULT;
10827                 }
10828                 target_schp->sched_priority = tswap32(schp.sched_priority);
10829                 unlock_user_struct(target_schp, arg2, 1);
10830             }
10831         }
10832         return ret;
10833     case TARGET_NR_sched_setscheduler:
10834         {
10835             struct target_sched_param *target_schp;
10836             struct sched_param schp;
10837             if (arg3 == 0) {
10838                 return -TARGET_EINVAL;
10839             }
10840             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10841                 return -TARGET_EFAULT;
10842             }
10843             schp.sched_priority = tswap32(target_schp->sched_priority);
10844             unlock_user_struct(target_schp, arg3, 0);
10845             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10846         }
10847     case TARGET_NR_sched_getscheduler:
10848         return get_errno(sys_sched_getscheduler(arg1));
10849     case TARGET_NR_sched_getattr:
10850         {
10851             struct target_sched_attr *target_scha;
10852             struct sched_attr scha;
10853             if (arg2 == 0) {
10854                 return -TARGET_EINVAL;
10855             }
10856             if (arg3 > sizeof(scha)) {
10857                 arg3 = sizeof(scha);
10858             }
10859             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10860             if (!is_error(ret)) {
10861                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10862                 if (!target_scha) {
10863                     return -TARGET_EFAULT;
10864                 }
10865                 target_scha->size = tswap32(scha.size);
10866                 target_scha->sched_policy = tswap32(scha.sched_policy);
10867                 target_scha->sched_flags = tswap64(scha.sched_flags);
10868                 target_scha->sched_nice = tswap32(scha.sched_nice);
10869                 target_scha->sched_priority = tswap32(scha.sched_priority);
10870                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10871                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10872                 target_scha->sched_period = tswap64(scha.sched_period);
10873                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10874                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10875                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10876                 }
10877                 unlock_user(target_scha, arg2, arg3);
10878             }
10879             return ret;
10880         }
10881     case TARGET_NR_sched_setattr:
10882         {
10883             struct target_sched_attr *target_scha;
10884             struct sched_attr scha;
10885             uint32_t size;
10886             int zeroed;
10887             if (arg2 == 0) {
10888                 return -TARGET_EINVAL;
10889             }
10890             if (get_user_u32(size, arg2)) {
10891                 return -TARGET_EFAULT;
10892             }
10893             if (!size) {
10894                 size = offsetof(struct target_sched_attr, sched_util_min);
10895             }
10896             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10897                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10898                     return -TARGET_EFAULT;
10899                 }
10900                 return -TARGET_E2BIG;
10901             }
10902 
10903             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
10904             if (zeroed < 0) {
10905                 return zeroed;
10906             } else if (zeroed == 0) {
10907                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10908                     return -TARGET_EFAULT;
10909                 }
10910                 return -TARGET_E2BIG;
10911             }
10912             if (size > sizeof(struct target_sched_attr)) {
10913                 size = sizeof(struct target_sched_attr);
10914             }
10915 
10916             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
10917             if (!target_scha) {
10918                 return -TARGET_EFAULT;
10919             }
10920             scha.size = size;
10921             scha.sched_policy = tswap32(target_scha->sched_policy);
10922             scha.sched_flags = tswap64(target_scha->sched_flags);
10923             scha.sched_nice = tswap32(target_scha->sched_nice);
10924             scha.sched_priority = tswap32(target_scha->sched_priority);
10925             scha.sched_runtime = tswap64(target_scha->sched_runtime);
10926             scha.sched_deadline = tswap64(target_scha->sched_deadline);
10927             scha.sched_period = tswap64(target_scha->sched_period);
10928             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
10929                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
10930                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
10931             }
10932             unlock_user(target_scha, arg2, 0);
10933             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
10934         }
10935     case TARGET_NR_sched_yield:
10936         return get_errno(sched_yield());
10937     case TARGET_NR_sched_get_priority_max:
10938         return get_errno(sched_get_priority_max(arg1));
10939     case TARGET_NR_sched_get_priority_min:
10940         return get_errno(sched_get_priority_min(arg1));
10941 #ifdef TARGET_NR_sched_rr_get_interval
10942     case TARGET_NR_sched_rr_get_interval:
10943         {
10944             struct timespec ts;
10945             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10946             if (!is_error(ret)) {
10947                 ret = host_to_target_timespec(arg2, &ts);
10948             }
10949         }
10950         return ret;
10951 #endif
10952 #ifdef TARGET_NR_sched_rr_get_interval_time64
10953     case TARGET_NR_sched_rr_get_interval_time64:
10954         {
10955             struct timespec ts;
10956             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10957             if (!is_error(ret)) {
10958                 ret = host_to_target_timespec64(arg2, &ts);
10959             }
10960         }
10961         return ret;
10962 #endif
10963 #if defined(TARGET_NR_nanosleep)
10964     case TARGET_NR_nanosleep:
10965         {
10966             struct timespec req, rem;
10967             target_to_host_timespec(&req, arg1);
10968             ret = get_errno(safe_nanosleep(&req, &rem));
10969             if (is_error(ret) && arg2) {
10970                 host_to_target_timespec(arg2, &rem);
10971             }
10972         }
10973         return ret;
10974 #endif
10975     case TARGET_NR_prctl:
10976         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
10977         break;
10978 #ifdef TARGET_NR_arch_prctl
10979     case TARGET_NR_arch_prctl:
10980         return do_arch_prctl(cpu_env, arg1, arg2);
10981 #endif
10982 #ifdef TARGET_NR_pread64
10983     case TARGET_NR_pread64:
10984         if (regpairs_aligned(cpu_env, num)) {
10985             arg4 = arg5;
10986             arg5 = arg6;
10987         }
10988         if (arg2 == 0 && arg3 == 0) {
10989             /* Special-case NULL buffer and zero length, which should succeed */
10990             p = 0;
10991         } else {
10992             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10993             if (!p) {
10994                 return -TARGET_EFAULT;
10995             }
10996         }
10997         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10998         unlock_user(p, arg2, ret);
10999         return ret;
11000     case TARGET_NR_pwrite64:
11001         if (regpairs_aligned(cpu_env, num)) {
11002             arg4 = arg5;
11003             arg5 = arg6;
11004         }
11005         if (arg2 == 0 && arg3 == 0) {
11006             /* Special-case NULL buffer and zero length, which should succeed */
11007             p = 0;
11008         } else {
11009             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11010             if (!p) {
11011                 return -TARGET_EFAULT;
11012             }
11013         }
11014         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11015         unlock_user(p, arg2, 0);
11016         return ret;
11017 #endif
11018     case TARGET_NR_getcwd:
11019         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11020             return -TARGET_EFAULT;
11021         ret = get_errno(sys_getcwd1(p, arg2));
11022         unlock_user(p, arg1, ret);
11023         return ret;
11024     case TARGET_NR_capget:
11025     case TARGET_NR_capset:
11026     {
11027         struct target_user_cap_header *target_header;
11028         struct target_user_cap_data *target_data = NULL;
11029         struct __user_cap_header_struct header;
11030         struct __user_cap_data_struct data[2];
11031         struct __user_cap_data_struct *dataptr = NULL;
11032         int i, target_datalen;
11033         int data_items = 1;
11034 
11035         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11036             return -TARGET_EFAULT;
11037         }
11038         header.version = tswap32(target_header->version);
11039         header.pid = tswap32(target_header->pid);
11040 
11041         if (header.version != _LINUX_CAPABILITY_VERSION) {
11042             /* Version 2 and up takes pointer to two user_data structs */
11043             data_items = 2;
11044         }
11045 
11046         target_datalen = sizeof(*target_data) * data_items;
11047 
11048         if (arg2) {
11049             if (num == TARGET_NR_capget) {
11050                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11051             } else {
11052                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11053             }
11054             if (!target_data) {
11055                 unlock_user_struct(target_header, arg1, 0);
11056                 return -TARGET_EFAULT;
11057             }
11058 
11059             if (num == TARGET_NR_capset) {
11060                 for (i = 0; i < data_items; i++) {
11061                     data[i].effective = tswap32(target_data[i].effective);
11062                     data[i].permitted = tswap32(target_data[i].permitted);
11063                     data[i].inheritable = tswap32(target_data[i].inheritable);
11064                 }
11065             }
11066 
11067             dataptr = data;
11068         }
11069 
11070         if (num == TARGET_NR_capget) {
11071             ret = get_errno(capget(&header, dataptr));
11072         } else {
11073             ret = get_errno(capset(&header, dataptr));
11074         }
11075 
11076         /* The kernel always updates version for both capget and capset */
11077         target_header->version = tswap32(header.version);
11078         unlock_user_struct(target_header, arg1, 1);
11079 
11080         if (arg2) {
11081             if (num == TARGET_NR_capget) {
11082                 for (i = 0; i < data_items; i++) {
11083                     target_data[i].effective = tswap32(data[i].effective);
11084                     target_data[i].permitted = tswap32(data[i].permitted);
11085                     target_data[i].inheritable = tswap32(data[i].inheritable);
11086                 }
11087                 unlock_user(target_data, arg2, target_datalen);
11088             } else {
11089                 unlock_user(target_data, arg2, 0);
11090             }
11091         }
11092         return ret;
11093     }
11094     case TARGET_NR_sigaltstack:
11095         return do_sigaltstack(arg1, arg2, cpu_env);
11096 
11097 #ifdef CONFIG_SENDFILE
11098 #ifdef TARGET_NR_sendfile
11099     case TARGET_NR_sendfile:
11100     {
11101         off_t *offp = NULL;
11102         off_t off;
11103         if (arg3) {
11104             ret = get_user_sal(off, arg3);
11105             if (is_error(ret)) {
11106                 return ret;
11107             }
11108             offp = &off;
11109         }
11110         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11111         if (!is_error(ret) && arg3) {
11112             abi_long ret2 = put_user_sal(off, arg3);
11113             if (is_error(ret2)) {
11114                 ret = ret2;
11115             }
11116         }
11117         return ret;
11118     }
11119 #endif
11120 #ifdef TARGET_NR_sendfile64
11121     case TARGET_NR_sendfile64:
11122     {
11123         off_t *offp = NULL;
11124         off_t off;
11125         if (arg3) {
11126             ret = get_user_s64(off, arg3);
11127             if (is_error(ret)) {
11128                 return ret;
11129             }
11130             offp = &off;
11131         }
11132         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11133         if (!is_error(ret) && arg3) {
11134             abi_long ret2 = put_user_s64(off, arg3);
11135             if (is_error(ret2)) {
11136                 ret = ret2;
11137             }
11138         }
11139         return ret;
11140     }
11141 #endif
11142 #endif
11143 #ifdef TARGET_NR_vfork
11144     case TARGET_NR_vfork:
11145         return get_errno(do_fork(cpu_env,
11146                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11147                          0, 0, 0, 0));
11148 #endif
11149 #ifdef TARGET_NR_ugetrlimit
11150     case TARGET_NR_ugetrlimit:
11151     {
11152 	struct rlimit rlim;
11153 	int resource = target_to_host_resource(arg1);
11154 	ret = get_errno(getrlimit(resource, &rlim));
11155 	if (!is_error(ret)) {
11156 	    struct target_rlimit *target_rlim;
11157             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11158                 return -TARGET_EFAULT;
11159 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11160 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11161             unlock_user_struct(target_rlim, arg2, 1);
11162 	}
11163         return ret;
11164     }
11165 #endif
11166 #ifdef TARGET_NR_truncate64
11167     case TARGET_NR_truncate64:
11168         if (!(p = lock_user_string(arg1)))
11169             return -TARGET_EFAULT;
11170 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11171         unlock_user(p, arg1, 0);
11172         return ret;
11173 #endif
11174 #ifdef TARGET_NR_ftruncate64
11175     case TARGET_NR_ftruncate64:
11176         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11177 #endif
11178 #ifdef TARGET_NR_stat64
11179     case TARGET_NR_stat64:
11180         if (!(p = lock_user_string(arg1))) {
11181             return -TARGET_EFAULT;
11182         }
11183         ret = get_errno(stat(path(p), &st));
11184         unlock_user(p, arg1, 0);
11185         if (!is_error(ret))
11186             ret = host_to_target_stat64(cpu_env, arg2, &st);
11187         return ret;
11188 #endif
11189 #ifdef TARGET_NR_lstat64
11190     case TARGET_NR_lstat64:
11191         if (!(p = lock_user_string(arg1))) {
11192             return -TARGET_EFAULT;
11193         }
11194         ret = get_errno(lstat(path(p), &st));
11195         unlock_user(p, arg1, 0);
11196         if (!is_error(ret))
11197             ret = host_to_target_stat64(cpu_env, arg2, &st);
11198         return ret;
11199 #endif
11200 #ifdef TARGET_NR_fstat64
11201     case TARGET_NR_fstat64:
11202         ret = get_errno(fstat(arg1, &st));
11203         if (!is_error(ret))
11204             ret = host_to_target_stat64(cpu_env, arg2, &st);
11205         return ret;
11206 #endif
11207 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11208 #ifdef TARGET_NR_fstatat64
11209     case TARGET_NR_fstatat64:
11210 #endif
11211 #ifdef TARGET_NR_newfstatat
11212     case TARGET_NR_newfstatat:
11213 #endif
11214         if (!(p = lock_user_string(arg2))) {
11215             return -TARGET_EFAULT;
11216         }
11217         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11218         unlock_user(p, arg2, 0);
11219         if (!is_error(ret))
11220             ret = host_to_target_stat64(cpu_env, arg3, &st);
11221         return ret;
11222 #endif
11223 #if defined(TARGET_NR_statx)
11224     case TARGET_NR_statx:
11225         {
11226             struct target_statx *target_stx;
11227             int dirfd = arg1;
11228             int flags = arg3;
11229 
11230             p = lock_user_string(arg2);
11231             if (p == NULL) {
11232                 return -TARGET_EFAULT;
11233             }
11234 #if defined(__NR_statx)
11235             {
11236                 /*
11237                  * It is assumed that struct statx is architecture independent.
11238                  */
11239                 struct target_statx host_stx;
11240                 int mask = arg4;
11241 
11242                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11243                 if (!is_error(ret)) {
11244                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11245                         unlock_user(p, arg2, 0);
11246                         return -TARGET_EFAULT;
11247                     }
11248                 }
11249 
11250                 if (ret != -TARGET_ENOSYS) {
11251                     unlock_user(p, arg2, 0);
11252                     return ret;
11253                 }
11254             }
11255 #endif
11256             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11257             unlock_user(p, arg2, 0);
11258 
11259             if (!is_error(ret)) {
11260                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11261                     return -TARGET_EFAULT;
11262                 }
11263                 memset(target_stx, 0, sizeof(*target_stx));
11264                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11265                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11266                 __put_user(st.st_ino, &target_stx->stx_ino);
11267                 __put_user(st.st_mode, &target_stx->stx_mode);
11268                 __put_user(st.st_uid, &target_stx->stx_uid);
11269                 __put_user(st.st_gid, &target_stx->stx_gid);
11270                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11271                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11272                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11273                 __put_user(st.st_size, &target_stx->stx_size);
11274                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11275                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11276                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11277                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11278                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11279                 unlock_user_struct(target_stx, arg5, 1);
11280             }
11281         }
11282         return ret;
11283 #endif
11284 #ifdef TARGET_NR_lchown
11285     case TARGET_NR_lchown:
11286         if (!(p = lock_user_string(arg1)))
11287             return -TARGET_EFAULT;
11288         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11289         unlock_user(p, arg1, 0);
11290         return ret;
11291 #endif
11292 #ifdef TARGET_NR_getuid
11293     case TARGET_NR_getuid:
11294         return get_errno(high2lowuid(getuid()));
11295 #endif
11296 #ifdef TARGET_NR_getgid
11297     case TARGET_NR_getgid:
11298         return get_errno(high2lowgid(getgid()));
11299 #endif
11300 #ifdef TARGET_NR_geteuid
11301     case TARGET_NR_geteuid:
11302         return get_errno(high2lowuid(geteuid()));
11303 #endif
11304 #ifdef TARGET_NR_getegid
11305     case TARGET_NR_getegid:
11306         return get_errno(high2lowgid(getegid()));
11307 #endif
11308     case TARGET_NR_setreuid:
11309         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11310     case TARGET_NR_setregid:
11311         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11312     case TARGET_NR_getgroups:
11313         {
11314             int gidsetsize = arg1;
11315             target_id *target_grouplist;
11316             gid_t *grouplist;
11317             int i;
11318 
11319             grouplist = alloca(gidsetsize * sizeof(gid_t));
11320             ret = get_errno(getgroups(gidsetsize, grouplist));
11321             if (gidsetsize == 0)
11322                 return ret;
11323             if (!is_error(ret)) {
11324                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11325                 if (!target_grouplist)
11326                     return -TARGET_EFAULT;
11327                 for(i = 0;i < ret; i++)
11328                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11329                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11330             }
11331         }
11332         return ret;
11333     case TARGET_NR_setgroups:
11334         {
11335             int gidsetsize = arg1;
11336             target_id *target_grouplist;
11337             gid_t *grouplist = NULL;
11338             int i;
11339             if (gidsetsize) {
11340                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11341                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11342                 if (!target_grouplist) {
11343                     return -TARGET_EFAULT;
11344                 }
11345                 for (i = 0; i < gidsetsize; i++) {
11346                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11347                 }
11348                 unlock_user(target_grouplist, arg2, 0);
11349             }
11350             return get_errno(setgroups(gidsetsize, grouplist));
11351         }
11352     case TARGET_NR_fchown:
11353         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11354 #if defined(TARGET_NR_fchownat)
11355     case TARGET_NR_fchownat:
11356         if (!(p = lock_user_string(arg2)))
11357             return -TARGET_EFAULT;
11358         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11359                                  low2highgid(arg4), arg5));
11360         unlock_user(p, arg2, 0);
11361         return ret;
11362 #endif
11363 #ifdef TARGET_NR_setresuid
11364     case TARGET_NR_setresuid:
11365         return get_errno(sys_setresuid(low2highuid(arg1),
11366                                        low2highuid(arg2),
11367                                        low2highuid(arg3)));
11368 #endif
11369 #ifdef TARGET_NR_getresuid
11370     case TARGET_NR_getresuid:
11371         {
11372             uid_t ruid, euid, suid;
11373             ret = get_errno(getresuid(&ruid, &euid, &suid));
11374             if (!is_error(ret)) {
11375                 if (put_user_id(high2lowuid(ruid), arg1)
11376                     || put_user_id(high2lowuid(euid), arg2)
11377                     || put_user_id(high2lowuid(suid), arg3))
11378                     return -TARGET_EFAULT;
11379             }
11380         }
11381         return ret;
11382 #endif
11383 #ifdef TARGET_NR_getresgid
11384     case TARGET_NR_setresgid:
11385         return get_errno(sys_setresgid(low2highgid(arg1),
11386                                        low2highgid(arg2),
11387                                        low2highgid(arg3)));
11388 #endif
11389 #ifdef TARGET_NR_getresgid
11390     case TARGET_NR_getresgid:
11391         {
11392             gid_t rgid, egid, sgid;
11393             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11394             if (!is_error(ret)) {
11395                 if (put_user_id(high2lowgid(rgid), arg1)
11396                     || put_user_id(high2lowgid(egid), arg2)
11397                     || put_user_id(high2lowgid(sgid), arg3))
11398                     return -TARGET_EFAULT;
11399             }
11400         }
11401         return ret;
11402 #endif
11403 #ifdef TARGET_NR_chown
11404     case TARGET_NR_chown:
11405         if (!(p = lock_user_string(arg1)))
11406             return -TARGET_EFAULT;
11407         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11408         unlock_user(p, arg1, 0);
11409         return ret;
11410 #endif
11411     case TARGET_NR_setuid:
11412         return get_errno(sys_setuid(low2highuid(arg1)));
11413     case TARGET_NR_setgid:
11414         return get_errno(sys_setgid(low2highgid(arg1)));
11415     case TARGET_NR_setfsuid:
11416         return get_errno(setfsuid(arg1));
11417     case TARGET_NR_setfsgid:
11418         return get_errno(setfsgid(arg1));
11419 
11420 #ifdef TARGET_NR_lchown32
11421     case TARGET_NR_lchown32:
11422         if (!(p = lock_user_string(arg1)))
11423             return -TARGET_EFAULT;
11424         ret = get_errno(lchown(p, arg2, arg3));
11425         unlock_user(p, arg1, 0);
11426         return ret;
11427 #endif
11428 #ifdef TARGET_NR_getuid32
11429     case TARGET_NR_getuid32:
11430         return get_errno(getuid());
11431 #endif
11432 
11433 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11434    /* Alpha specific */
11435     case TARGET_NR_getxuid:
11436          {
11437             uid_t euid;
11438             euid=geteuid();
11439             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11440          }
11441         return get_errno(getuid());
11442 #endif
11443 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11444    /* Alpha specific */
11445     case TARGET_NR_getxgid:
11446          {
11447             uid_t egid;
11448             egid=getegid();
11449             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11450          }
11451         return get_errno(getgid());
11452 #endif
11453 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11454     /* Alpha specific */
11455     case TARGET_NR_osf_getsysinfo:
11456         ret = -TARGET_EOPNOTSUPP;
11457         switch (arg1) {
11458           case TARGET_GSI_IEEE_FP_CONTROL:
11459             {
11460                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11461                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11462 
11463                 swcr &= ~SWCR_STATUS_MASK;
11464                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11465 
11466                 if (put_user_u64 (swcr, arg2))
11467                         return -TARGET_EFAULT;
11468                 ret = 0;
11469             }
11470             break;
11471 
11472           /* case GSI_IEEE_STATE_AT_SIGNAL:
11473              -- Not implemented in linux kernel.
11474              case GSI_UACPROC:
11475              -- Retrieves current unaligned access state; not much used.
11476              case GSI_PROC_TYPE:
11477              -- Retrieves implver information; surely not used.
11478              case GSI_GET_HWRPB:
11479              -- Grabs a copy of the HWRPB; surely not used.
11480           */
11481         }
11482         return ret;
11483 #endif
11484 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11485     /* Alpha specific */
11486     case TARGET_NR_osf_setsysinfo:
11487         ret = -TARGET_EOPNOTSUPP;
11488         switch (arg1) {
11489           case TARGET_SSI_IEEE_FP_CONTROL:
11490             {
11491                 uint64_t swcr, fpcr;
11492 
11493                 if (get_user_u64 (swcr, arg2)) {
11494                     return -TARGET_EFAULT;
11495                 }
11496 
11497                 /*
11498                  * The kernel calls swcr_update_status to update the
11499                  * status bits from the fpcr at every point that it
11500                  * could be queried.  Therefore, we store the status
11501                  * bits only in FPCR.
11502                  */
11503                 ((CPUAlphaState *)cpu_env)->swcr
11504                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11505 
11506                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11507                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11508                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11509                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11510                 ret = 0;
11511             }
11512             break;
11513 
11514           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11515             {
11516                 uint64_t exc, fpcr, fex;
11517 
11518                 if (get_user_u64(exc, arg2)) {
11519                     return -TARGET_EFAULT;
11520                 }
11521                 exc &= SWCR_STATUS_MASK;
11522                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11523 
11524                 /* Old exceptions are not signaled.  */
11525                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11526                 fex = exc & ~fex;
11527                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11528                 fex &= ((CPUArchState *)cpu_env)->swcr;
11529 
11530                 /* Update the hardware fpcr.  */
11531                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11532                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11533 
11534                 if (fex) {
11535                     int si_code = TARGET_FPE_FLTUNK;
11536                     target_siginfo_t info;
11537 
11538                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11539                         si_code = TARGET_FPE_FLTUND;
11540                     }
11541                     if (fex & SWCR_TRAP_ENABLE_INE) {
11542                         si_code = TARGET_FPE_FLTRES;
11543                     }
11544                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11545                         si_code = TARGET_FPE_FLTUND;
11546                     }
11547                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11548                         si_code = TARGET_FPE_FLTOVF;
11549                     }
11550                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11551                         si_code = TARGET_FPE_FLTDIV;
11552                     }
11553                     if (fex & SWCR_TRAP_ENABLE_INV) {
11554                         si_code = TARGET_FPE_FLTINV;
11555                     }
11556 
11557                     info.si_signo = SIGFPE;
11558                     info.si_errno = 0;
11559                     info.si_code = si_code;
11560                     info._sifields._sigfault._addr
11561                         = ((CPUArchState *)cpu_env)->pc;
11562                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11563                                  QEMU_SI_FAULT, &info);
11564                 }
11565                 ret = 0;
11566             }
11567             break;
11568 
11569           /* case SSI_NVPAIRS:
11570              -- Used with SSIN_UACPROC to enable unaligned accesses.
11571              case SSI_IEEE_STATE_AT_SIGNAL:
11572              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11573              -- Not implemented in linux kernel
11574           */
11575         }
11576         return ret;
11577 #endif
11578 #ifdef TARGET_NR_osf_sigprocmask
11579     /* Alpha specific.  */
11580     case TARGET_NR_osf_sigprocmask:
11581         {
11582             abi_ulong mask;
11583             int how;
11584             sigset_t set, oldset;
11585 
11586             switch(arg1) {
11587             case TARGET_SIG_BLOCK:
11588                 how = SIG_BLOCK;
11589                 break;
11590             case TARGET_SIG_UNBLOCK:
11591                 how = SIG_UNBLOCK;
11592                 break;
11593             case TARGET_SIG_SETMASK:
11594                 how = SIG_SETMASK;
11595                 break;
11596             default:
11597                 return -TARGET_EINVAL;
11598             }
11599             mask = arg2;
11600             target_to_host_old_sigset(&set, &mask);
11601             ret = do_sigprocmask(how, &set, &oldset);
11602             if (!ret) {
11603                 host_to_target_old_sigset(&mask, &oldset);
11604                 ret = mask;
11605             }
11606         }
11607         return ret;
11608 #endif
11609 
11610 #ifdef TARGET_NR_getgid32
11611     case TARGET_NR_getgid32:
11612         return get_errno(getgid());
11613 #endif
11614 #ifdef TARGET_NR_geteuid32
11615     case TARGET_NR_geteuid32:
11616         return get_errno(geteuid());
11617 #endif
11618 #ifdef TARGET_NR_getegid32
11619     case TARGET_NR_getegid32:
11620         return get_errno(getegid());
11621 #endif
11622 #ifdef TARGET_NR_setreuid32
11623     case TARGET_NR_setreuid32:
11624         return get_errno(setreuid(arg1, arg2));
11625 #endif
11626 #ifdef TARGET_NR_setregid32
11627     case TARGET_NR_setregid32:
11628         return get_errno(setregid(arg1, arg2));
11629 #endif
11630 #ifdef TARGET_NR_getgroups32
11631     case TARGET_NR_getgroups32:
11632         {
11633             int gidsetsize = arg1;
11634             uint32_t *target_grouplist;
11635             gid_t *grouplist;
11636             int i;
11637 
11638             grouplist = alloca(gidsetsize * sizeof(gid_t));
11639             ret = get_errno(getgroups(gidsetsize, grouplist));
11640             if (gidsetsize == 0)
11641                 return ret;
11642             if (!is_error(ret)) {
11643                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11644                 if (!target_grouplist) {
11645                     return -TARGET_EFAULT;
11646                 }
11647                 for(i = 0;i < ret; i++)
11648                     target_grouplist[i] = tswap32(grouplist[i]);
11649                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11650             }
11651         }
11652         return ret;
11653 #endif
11654 #ifdef TARGET_NR_setgroups32
11655     case TARGET_NR_setgroups32:
11656         {
11657             int gidsetsize = arg1;
11658             uint32_t *target_grouplist;
11659             gid_t *grouplist;
11660             int i;
11661 
11662             grouplist = alloca(gidsetsize * sizeof(gid_t));
11663             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11664             if (!target_grouplist) {
11665                 return -TARGET_EFAULT;
11666             }
11667             for(i = 0;i < gidsetsize; i++)
11668                 grouplist[i] = tswap32(target_grouplist[i]);
11669             unlock_user(target_grouplist, arg2, 0);
11670             return get_errno(setgroups(gidsetsize, grouplist));
11671         }
11672 #endif
11673 #ifdef TARGET_NR_fchown32
11674     case TARGET_NR_fchown32:
11675         return get_errno(fchown(arg1, arg2, arg3));
11676 #endif
11677 #ifdef TARGET_NR_setresuid32
11678     case TARGET_NR_setresuid32:
11679         return get_errno(sys_setresuid(arg1, arg2, arg3));
11680 #endif
11681 #ifdef TARGET_NR_getresuid32
11682     case TARGET_NR_getresuid32:
11683         {
11684             uid_t ruid, euid, suid;
11685             ret = get_errno(getresuid(&ruid, &euid, &suid));
11686             if (!is_error(ret)) {
11687                 if (put_user_u32(ruid, arg1)
11688                     || put_user_u32(euid, arg2)
11689                     || put_user_u32(suid, arg3))
11690                     return -TARGET_EFAULT;
11691             }
11692         }
11693         return ret;
11694 #endif
11695 #ifdef TARGET_NR_setresgid32
11696     case TARGET_NR_setresgid32:
11697         return get_errno(sys_setresgid(arg1, arg2, arg3));
11698 #endif
11699 #ifdef TARGET_NR_getresgid32
11700     case TARGET_NR_getresgid32:
11701         {
11702             gid_t rgid, egid, sgid;
11703             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11704             if (!is_error(ret)) {
11705                 if (put_user_u32(rgid, arg1)
11706                     || put_user_u32(egid, arg2)
11707                     || put_user_u32(sgid, arg3))
11708                     return -TARGET_EFAULT;
11709             }
11710         }
11711         return ret;
11712 #endif
11713 #ifdef TARGET_NR_chown32
11714     case TARGET_NR_chown32:
11715         if (!(p = lock_user_string(arg1)))
11716             return -TARGET_EFAULT;
11717         ret = get_errno(chown(p, arg2, arg3));
11718         unlock_user(p, arg1, 0);
11719         return ret;
11720 #endif
11721 #ifdef TARGET_NR_setuid32
11722     case TARGET_NR_setuid32:
11723         return get_errno(sys_setuid(arg1));
11724 #endif
11725 #ifdef TARGET_NR_setgid32
11726     case TARGET_NR_setgid32:
11727         return get_errno(sys_setgid(arg1));
11728 #endif
11729 #ifdef TARGET_NR_setfsuid32
11730     case TARGET_NR_setfsuid32:
11731         return get_errno(setfsuid(arg1));
11732 #endif
11733 #ifdef TARGET_NR_setfsgid32
11734     case TARGET_NR_setfsgid32:
11735         return get_errno(setfsgid(arg1));
11736 #endif
11737 #ifdef TARGET_NR_mincore
11738     case TARGET_NR_mincore:
11739         {
11740             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11741             if (!a) {
11742                 return -TARGET_ENOMEM;
11743             }
11744             p = lock_user_string(arg3);
11745             if (!p) {
11746                 ret = -TARGET_EFAULT;
11747             } else {
11748                 ret = get_errno(mincore(a, arg2, p));
11749                 unlock_user(p, arg3, ret);
11750             }
11751             unlock_user(a, arg1, 0);
11752         }
11753         return ret;
11754 #endif
11755 #ifdef TARGET_NR_arm_fadvise64_64
11756     case TARGET_NR_arm_fadvise64_64:
11757         /* arm_fadvise64_64 looks like fadvise64_64 but
11758          * with different argument order: fd, advice, offset, len
11759          * rather than the usual fd, offset, len, advice.
11760          * Note that offset and len are both 64-bit so appear as
11761          * pairs of 32-bit registers.
11762          */
11763         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11764                             target_offset64(arg5, arg6), arg2);
11765         return -host_to_target_errno(ret);
11766 #endif
11767 
11768 #if TARGET_ABI_BITS == 32
11769 
11770 #ifdef TARGET_NR_fadvise64_64
11771     case TARGET_NR_fadvise64_64:
11772 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11773         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11774         ret = arg2;
11775         arg2 = arg3;
11776         arg3 = arg4;
11777         arg4 = arg5;
11778         arg5 = arg6;
11779         arg6 = ret;
11780 #else
11781         /* 6 args: fd, offset (high, low), len (high, low), advice */
11782         if (regpairs_aligned(cpu_env, num)) {
11783             /* offset is in (3,4), len in (5,6) and advice in 7 */
11784             arg2 = arg3;
11785             arg3 = arg4;
11786             arg4 = arg5;
11787             arg5 = arg6;
11788             arg6 = arg7;
11789         }
11790 #endif
11791         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11792                             target_offset64(arg4, arg5), arg6);
11793         return -host_to_target_errno(ret);
11794 #endif
11795 
11796 #ifdef TARGET_NR_fadvise64
11797     case TARGET_NR_fadvise64:
11798         /* 5 args: fd, offset (high, low), len, advice */
11799         if (regpairs_aligned(cpu_env, num)) {
11800             /* offset is in (3,4), len in 5 and advice in 6 */
11801             arg2 = arg3;
11802             arg3 = arg4;
11803             arg4 = arg5;
11804             arg5 = arg6;
11805         }
11806         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11807         return -host_to_target_errno(ret);
11808 #endif
11809 
11810 #else /* not a 32-bit ABI */
11811 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11812 #ifdef TARGET_NR_fadvise64_64
11813     case TARGET_NR_fadvise64_64:
11814 #endif
11815 #ifdef TARGET_NR_fadvise64
11816     case TARGET_NR_fadvise64:
11817 #endif
11818 #ifdef TARGET_S390X
11819         switch (arg4) {
11820         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11821         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11822         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11823         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11824         default: break;
11825         }
11826 #endif
11827         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11828 #endif
11829 #endif /* end of 64-bit ABI fadvise handling */
11830 
11831 #ifdef TARGET_NR_madvise
11832     case TARGET_NR_madvise:
11833         /* A straight passthrough may not be safe because qemu sometimes
11834            turns private file-backed mappings into anonymous mappings.
11835            This will break MADV_DONTNEED.
11836            This is a hint, so ignoring and returning success is ok.  */
11837         return 0;
11838 #endif
11839 #ifdef TARGET_NR_fcntl64
11840     case TARGET_NR_fcntl64:
11841     {
11842         int cmd;
11843         struct flock64 fl;
11844         from_flock64_fn *copyfrom = copy_from_user_flock64;
11845         to_flock64_fn *copyto = copy_to_user_flock64;
11846 
11847 #ifdef TARGET_ARM
11848         if (!((CPUARMState *)cpu_env)->eabi) {
11849             copyfrom = copy_from_user_oabi_flock64;
11850             copyto = copy_to_user_oabi_flock64;
11851         }
11852 #endif
11853 
11854         cmd = target_to_host_fcntl_cmd(arg2);
11855         if (cmd == -TARGET_EINVAL) {
11856             return cmd;
11857         }
11858 
11859         switch(arg2) {
11860         case TARGET_F_GETLK64:
11861             ret = copyfrom(&fl, arg3);
11862             if (ret) {
11863                 break;
11864             }
11865             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11866             if (ret == 0) {
11867                 ret = copyto(arg3, &fl);
11868             }
11869 	    break;
11870 
11871         case TARGET_F_SETLK64:
11872         case TARGET_F_SETLKW64:
11873             ret = copyfrom(&fl, arg3);
11874             if (ret) {
11875                 break;
11876             }
11877             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11878 	    break;
11879         default:
11880             ret = do_fcntl(arg1, arg2, arg3);
11881             break;
11882         }
11883         return ret;
11884     }
11885 #endif
11886 #ifdef TARGET_NR_cacheflush
11887     case TARGET_NR_cacheflush:
11888         /* self-modifying code is handled automatically, so nothing needed */
11889         return 0;
11890 #endif
11891 #ifdef TARGET_NR_getpagesize
11892     case TARGET_NR_getpagesize:
11893         return TARGET_PAGE_SIZE;
11894 #endif
11895     case TARGET_NR_gettid:
11896         return get_errno(sys_gettid());
11897 #ifdef TARGET_NR_readahead
11898     case TARGET_NR_readahead:
11899 #if TARGET_ABI_BITS == 32
11900         if (regpairs_aligned(cpu_env, num)) {
11901             arg2 = arg3;
11902             arg3 = arg4;
11903             arg4 = arg5;
11904         }
11905         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11906 #else
11907         ret = get_errno(readahead(arg1, arg2, arg3));
11908 #endif
11909         return ret;
11910 #endif
11911 #ifdef CONFIG_ATTR
11912 #ifdef TARGET_NR_setxattr
11913     case TARGET_NR_listxattr:
11914     case TARGET_NR_llistxattr:
11915     {
11916         void *p, *b = 0;
11917         if (arg2) {
11918             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11919             if (!b) {
11920                 return -TARGET_EFAULT;
11921             }
11922         }
11923         p = lock_user_string(arg1);
11924         if (p) {
11925             if (num == TARGET_NR_listxattr) {
11926                 ret = get_errno(listxattr(p, b, arg3));
11927             } else {
11928                 ret = get_errno(llistxattr(p, b, arg3));
11929             }
11930         } else {
11931             ret = -TARGET_EFAULT;
11932         }
11933         unlock_user(p, arg1, 0);
11934         unlock_user(b, arg2, arg3);
11935         return ret;
11936     }
11937     case TARGET_NR_flistxattr:
11938     {
11939         void *b = 0;
11940         if (arg2) {
11941             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11942             if (!b) {
11943                 return -TARGET_EFAULT;
11944             }
11945         }
11946         ret = get_errno(flistxattr(arg1, b, arg3));
11947         unlock_user(b, arg2, arg3);
11948         return ret;
11949     }
11950     case TARGET_NR_setxattr:
11951     case TARGET_NR_lsetxattr:
11952         {
11953             void *p, *n, *v = 0;
11954             if (arg3) {
11955                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11956                 if (!v) {
11957                     return -TARGET_EFAULT;
11958                 }
11959             }
11960             p = lock_user_string(arg1);
11961             n = lock_user_string(arg2);
11962             if (p && n) {
11963                 if (num == TARGET_NR_setxattr) {
11964                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11965                 } else {
11966                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11967                 }
11968             } else {
11969                 ret = -TARGET_EFAULT;
11970             }
11971             unlock_user(p, arg1, 0);
11972             unlock_user(n, arg2, 0);
11973             unlock_user(v, arg3, 0);
11974         }
11975         return ret;
11976     case TARGET_NR_fsetxattr:
11977         {
11978             void *n, *v = 0;
11979             if (arg3) {
11980                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11981                 if (!v) {
11982                     return -TARGET_EFAULT;
11983                 }
11984             }
11985             n = lock_user_string(arg2);
11986             if (n) {
11987                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11988             } else {
11989                 ret = -TARGET_EFAULT;
11990             }
11991             unlock_user(n, arg2, 0);
11992             unlock_user(v, arg3, 0);
11993         }
11994         return ret;
11995     case TARGET_NR_getxattr:
11996     case TARGET_NR_lgetxattr:
11997         {
11998             void *p, *n, *v = 0;
11999             if (arg3) {
12000                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12001                 if (!v) {
12002                     return -TARGET_EFAULT;
12003                 }
12004             }
12005             p = lock_user_string(arg1);
12006             n = lock_user_string(arg2);
12007             if (p && n) {
12008                 if (num == TARGET_NR_getxattr) {
12009                     ret = get_errno(getxattr(p, n, v, arg4));
12010                 } else {
12011                     ret = get_errno(lgetxattr(p, n, v, arg4));
12012                 }
12013             } else {
12014                 ret = -TARGET_EFAULT;
12015             }
12016             unlock_user(p, arg1, 0);
12017             unlock_user(n, arg2, 0);
12018             unlock_user(v, arg3, arg4);
12019         }
12020         return ret;
12021     case TARGET_NR_fgetxattr:
12022         {
12023             void *n, *v = 0;
12024             if (arg3) {
12025                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12026                 if (!v) {
12027                     return -TARGET_EFAULT;
12028                 }
12029             }
12030             n = lock_user_string(arg2);
12031             if (n) {
12032                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12033             } else {
12034                 ret = -TARGET_EFAULT;
12035             }
12036             unlock_user(n, arg2, 0);
12037             unlock_user(v, arg3, arg4);
12038         }
12039         return ret;
12040     case TARGET_NR_removexattr:
12041     case TARGET_NR_lremovexattr:
12042         {
12043             void *p, *n;
12044             p = lock_user_string(arg1);
12045             n = lock_user_string(arg2);
12046             if (p && n) {
12047                 if (num == TARGET_NR_removexattr) {
12048                     ret = get_errno(removexattr(p, n));
12049                 } else {
12050                     ret = get_errno(lremovexattr(p, n));
12051                 }
12052             } else {
12053                 ret = -TARGET_EFAULT;
12054             }
12055             unlock_user(p, arg1, 0);
12056             unlock_user(n, arg2, 0);
12057         }
12058         return ret;
12059     case TARGET_NR_fremovexattr:
12060         {
12061             void *n;
12062             n = lock_user_string(arg2);
12063             if (n) {
12064                 ret = get_errno(fremovexattr(arg1, n));
12065             } else {
12066                 ret = -TARGET_EFAULT;
12067             }
12068             unlock_user(n, arg2, 0);
12069         }
12070         return ret;
12071 #endif
12072 #endif /* CONFIG_ATTR */
12073 #ifdef TARGET_NR_set_thread_area
12074     case TARGET_NR_set_thread_area:
12075 #if defined(TARGET_MIPS)
12076       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12077       return 0;
12078 #elif defined(TARGET_CRIS)
12079       if (arg1 & 0xff)
12080           ret = -TARGET_EINVAL;
12081       else {
12082           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12083           ret = 0;
12084       }
12085       return ret;
12086 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12087       return do_set_thread_area(cpu_env, arg1);
12088 #elif defined(TARGET_M68K)
12089       {
12090           TaskState *ts = cpu->opaque;
12091           ts->tp_value = arg1;
12092           return 0;
12093       }
12094 #else
12095       return -TARGET_ENOSYS;
12096 #endif
12097 #endif
12098 #ifdef TARGET_NR_get_thread_area
12099     case TARGET_NR_get_thread_area:
12100 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12101         return do_get_thread_area(cpu_env, arg1);
12102 #elif defined(TARGET_M68K)
12103         {
12104             TaskState *ts = cpu->opaque;
12105             return ts->tp_value;
12106         }
12107 #else
12108         return -TARGET_ENOSYS;
12109 #endif
12110 #endif
12111 #ifdef TARGET_NR_getdomainname
12112     case TARGET_NR_getdomainname:
12113         return -TARGET_ENOSYS;
12114 #endif
12115 
12116 #ifdef TARGET_NR_clock_settime
12117     case TARGET_NR_clock_settime:
12118     {
12119         struct timespec ts;
12120 
12121         ret = target_to_host_timespec(&ts, arg2);
12122         if (!is_error(ret)) {
12123             ret = get_errno(clock_settime(arg1, &ts));
12124         }
12125         return ret;
12126     }
12127 #endif
12128 #ifdef TARGET_NR_clock_settime64
12129     case TARGET_NR_clock_settime64:
12130     {
12131         struct timespec ts;
12132 
12133         ret = target_to_host_timespec64(&ts, arg2);
12134         if (!is_error(ret)) {
12135             ret = get_errno(clock_settime(arg1, &ts));
12136         }
12137         return ret;
12138     }
12139 #endif
12140 #ifdef TARGET_NR_clock_gettime
12141     case TARGET_NR_clock_gettime:
12142     {
12143         struct timespec ts;
12144         ret = get_errno(clock_gettime(arg1, &ts));
12145         if (!is_error(ret)) {
12146             ret = host_to_target_timespec(arg2, &ts);
12147         }
12148         return ret;
12149     }
12150 #endif
12151 #ifdef TARGET_NR_clock_gettime64
12152     case TARGET_NR_clock_gettime64:
12153     {
12154         struct timespec ts;
12155         ret = get_errno(clock_gettime(arg1, &ts));
12156         if (!is_error(ret)) {
12157             ret = host_to_target_timespec64(arg2, &ts);
12158         }
12159         return ret;
12160     }
12161 #endif
12162 #ifdef TARGET_NR_clock_getres
12163     case TARGET_NR_clock_getres:
12164     {
12165         struct timespec ts;
12166         ret = get_errno(clock_getres(arg1, &ts));
12167         if (!is_error(ret)) {
12168             host_to_target_timespec(arg2, &ts);
12169         }
12170         return ret;
12171     }
12172 #endif
12173 #ifdef TARGET_NR_clock_getres_time64
12174     case TARGET_NR_clock_getres_time64:
12175     {
12176         struct timespec ts;
12177         ret = get_errno(clock_getres(arg1, &ts));
12178         if (!is_error(ret)) {
12179             host_to_target_timespec64(arg2, &ts);
12180         }
12181         return ret;
12182     }
12183 #endif
12184 #ifdef TARGET_NR_clock_nanosleep
12185     case TARGET_NR_clock_nanosleep:
12186     {
12187         struct timespec ts;
12188         if (target_to_host_timespec(&ts, arg3)) {
12189             return -TARGET_EFAULT;
12190         }
12191         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12192                                              &ts, arg4 ? &ts : NULL));
12193         /*
12194          * if the call is interrupted by a signal handler, it fails
12195          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12196          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12197          */
12198         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12199             host_to_target_timespec(arg4, &ts)) {
12200               return -TARGET_EFAULT;
12201         }
12202 
12203         return ret;
12204     }
12205 #endif
12206 #ifdef TARGET_NR_clock_nanosleep_time64
12207     case TARGET_NR_clock_nanosleep_time64:
12208     {
12209         struct timespec ts;
12210 
12211         if (target_to_host_timespec64(&ts, arg3)) {
12212             return -TARGET_EFAULT;
12213         }
12214 
12215         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12216                                              &ts, arg4 ? &ts : NULL));
12217 
12218         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12219             host_to_target_timespec64(arg4, &ts)) {
12220             return -TARGET_EFAULT;
12221         }
12222         return ret;
12223     }
12224 #endif
12225 
12226 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12227     case TARGET_NR_set_tid_address:
12228         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12229 #endif
12230 
12231     case TARGET_NR_tkill:
12232         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12233 
12234     case TARGET_NR_tgkill:
12235         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12236                          target_to_host_signal(arg3)));
12237 
12238 #ifdef TARGET_NR_set_robust_list
12239     case TARGET_NR_set_robust_list:
12240     case TARGET_NR_get_robust_list:
12241         /* The ABI for supporting robust futexes has userspace pass
12242          * the kernel a pointer to a linked list which is updated by
12243          * userspace after the syscall; the list is walked by the kernel
12244          * when the thread exits. Since the linked list in QEMU guest
12245          * memory isn't a valid linked list for the host and we have
12246          * no way to reliably intercept the thread-death event, we can't
12247          * support these. Silently return ENOSYS so that guest userspace
12248          * falls back to a non-robust futex implementation (which should
12249          * be OK except in the corner case of the guest crashing while
12250          * holding a mutex that is shared with another process via
12251          * shared memory).
12252          */
12253         return -TARGET_ENOSYS;
12254 #endif
12255 
12256 #if defined(TARGET_NR_utimensat)
12257     case TARGET_NR_utimensat:
12258         {
12259             struct timespec *tsp, ts[2];
12260             if (!arg3) {
12261                 tsp = NULL;
12262             } else {
12263                 if (target_to_host_timespec(ts, arg3)) {
12264                     return -TARGET_EFAULT;
12265                 }
12266                 if (target_to_host_timespec(ts + 1, arg3 +
12267                                             sizeof(struct target_timespec))) {
12268                     return -TARGET_EFAULT;
12269                 }
12270                 tsp = ts;
12271             }
12272             if (!arg2)
12273                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12274             else {
12275                 if (!(p = lock_user_string(arg2))) {
12276                     return -TARGET_EFAULT;
12277                 }
12278                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12279                 unlock_user(p, arg2, 0);
12280             }
12281         }
12282         return ret;
12283 #endif
12284 #ifdef TARGET_NR_utimensat_time64
12285     case TARGET_NR_utimensat_time64:
12286         {
12287             struct timespec *tsp, ts[2];
12288             if (!arg3) {
12289                 tsp = NULL;
12290             } else {
12291                 if (target_to_host_timespec64(ts, arg3)) {
12292                     return -TARGET_EFAULT;
12293                 }
12294                 if (target_to_host_timespec64(ts + 1, arg3 +
12295                                      sizeof(struct target__kernel_timespec))) {
12296                     return -TARGET_EFAULT;
12297                 }
12298                 tsp = ts;
12299             }
12300             if (!arg2)
12301                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12302             else {
12303                 p = lock_user_string(arg2);
12304                 if (!p) {
12305                     return -TARGET_EFAULT;
12306                 }
12307                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12308                 unlock_user(p, arg2, 0);
12309             }
12310         }
12311         return ret;
12312 #endif
12313 #ifdef TARGET_NR_futex
12314     case TARGET_NR_futex:
12315         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12316 #endif
12317 #ifdef TARGET_NR_futex_time64
12318     case TARGET_NR_futex_time64:
12319         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12320 #endif
12321 #ifdef CONFIG_INOTIFY
12322 #if defined(TARGET_NR_inotify_init)
12323     case TARGET_NR_inotify_init:
12324         ret = get_errno(inotify_init());
12325         if (ret >= 0) {
12326             fd_trans_register(ret, &target_inotify_trans);
12327         }
12328         return ret;
12329 #endif
12330 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12331     case TARGET_NR_inotify_init1:
12332         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12333                                           fcntl_flags_tbl)));
12334         if (ret >= 0) {
12335             fd_trans_register(ret, &target_inotify_trans);
12336         }
12337         return ret;
12338 #endif
12339 #if defined(TARGET_NR_inotify_add_watch)
12340     case TARGET_NR_inotify_add_watch:
12341         p = lock_user_string(arg2);
12342         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12343         unlock_user(p, arg2, 0);
12344         return ret;
12345 #endif
12346 #if defined(TARGET_NR_inotify_rm_watch)
12347     case TARGET_NR_inotify_rm_watch:
12348         return get_errno(inotify_rm_watch(arg1, arg2));
12349 #endif
12350 #endif
12351 
12352 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12353     case TARGET_NR_mq_open:
12354         {
12355             struct mq_attr posix_mq_attr;
12356             struct mq_attr *pposix_mq_attr;
12357             int host_flags;
12358 
12359             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12360             pposix_mq_attr = NULL;
12361             if (arg4) {
12362                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12363                     return -TARGET_EFAULT;
12364                 }
12365                 pposix_mq_attr = &posix_mq_attr;
12366             }
12367             p = lock_user_string(arg1 - 1);
12368             if (!p) {
12369                 return -TARGET_EFAULT;
12370             }
12371             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12372             unlock_user (p, arg1, 0);
12373         }
12374         return ret;
12375 
12376     case TARGET_NR_mq_unlink:
12377         p = lock_user_string(arg1 - 1);
12378         if (!p) {
12379             return -TARGET_EFAULT;
12380         }
12381         ret = get_errno(mq_unlink(p));
12382         unlock_user (p, arg1, 0);
12383         return ret;
12384 
12385 #ifdef TARGET_NR_mq_timedsend
12386     case TARGET_NR_mq_timedsend:
12387         {
12388             struct timespec ts;
12389 
12390             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12391             if (arg5 != 0) {
12392                 if (target_to_host_timespec(&ts, arg5)) {
12393                     return -TARGET_EFAULT;
12394                 }
12395                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12396                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12397                     return -TARGET_EFAULT;
12398                 }
12399             } else {
12400                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12401             }
12402             unlock_user (p, arg2, arg3);
12403         }
12404         return ret;
12405 #endif
12406 #ifdef TARGET_NR_mq_timedsend_time64
12407     case TARGET_NR_mq_timedsend_time64:
12408         {
12409             struct timespec ts;
12410 
12411             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12412             if (arg5 != 0) {
12413                 if (target_to_host_timespec64(&ts, arg5)) {
12414                     return -TARGET_EFAULT;
12415                 }
12416                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12417                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12418                     return -TARGET_EFAULT;
12419                 }
12420             } else {
12421                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12422             }
12423             unlock_user(p, arg2, arg3);
12424         }
12425         return ret;
12426 #endif
12427 
12428 #ifdef TARGET_NR_mq_timedreceive
12429     case TARGET_NR_mq_timedreceive:
12430         {
12431             struct timespec ts;
12432             unsigned int prio;
12433 
12434             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12435             if (arg5 != 0) {
12436                 if (target_to_host_timespec(&ts, arg5)) {
12437                     return -TARGET_EFAULT;
12438                 }
12439                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12440                                                      &prio, &ts));
12441                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12442                     return -TARGET_EFAULT;
12443                 }
12444             } else {
12445                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12446                                                      &prio, NULL));
12447             }
12448             unlock_user (p, arg2, arg3);
12449             if (arg4 != 0)
12450                 put_user_u32(prio, arg4);
12451         }
12452         return ret;
12453 #endif
12454 #ifdef TARGET_NR_mq_timedreceive_time64
12455     case TARGET_NR_mq_timedreceive_time64:
12456         {
12457             struct timespec ts;
12458             unsigned int prio;
12459 
12460             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12461             if (arg5 != 0) {
12462                 if (target_to_host_timespec64(&ts, arg5)) {
12463                     return -TARGET_EFAULT;
12464                 }
12465                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12466                                                      &prio, &ts));
12467                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12468                     return -TARGET_EFAULT;
12469                 }
12470             } else {
12471                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12472                                                      &prio, NULL));
12473             }
12474             unlock_user(p, arg2, arg3);
12475             if (arg4 != 0) {
12476                 put_user_u32(prio, arg4);
12477             }
12478         }
12479         return ret;
12480 #endif
12481 
12482     /* Not implemented for now... */
12483 /*     case TARGET_NR_mq_notify: */
12484 /*         break; */
12485 
12486     case TARGET_NR_mq_getsetattr:
12487         {
12488             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12489             ret = 0;
12490             if (arg2 != 0) {
12491                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12492                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12493                                            &posix_mq_attr_out));
12494             } else if (arg3 != 0) {
12495                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12496             }
12497             if (ret == 0 && arg3 != 0) {
12498                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12499             }
12500         }
12501         return ret;
12502 #endif
12503 
12504 #ifdef CONFIG_SPLICE
12505 #ifdef TARGET_NR_tee
12506     case TARGET_NR_tee:
12507         {
12508             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12509         }
12510         return ret;
12511 #endif
12512 #ifdef TARGET_NR_splice
12513     case TARGET_NR_splice:
12514         {
12515             loff_t loff_in, loff_out;
12516             loff_t *ploff_in = NULL, *ploff_out = NULL;
12517             if (arg2) {
12518                 if (get_user_u64(loff_in, arg2)) {
12519                     return -TARGET_EFAULT;
12520                 }
12521                 ploff_in = &loff_in;
12522             }
12523             if (arg4) {
12524                 if (get_user_u64(loff_out, arg4)) {
12525                     return -TARGET_EFAULT;
12526                 }
12527                 ploff_out = &loff_out;
12528             }
12529             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12530             if (arg2) {
12531                 if (put_user_u64(loff_in, arg2)) {
12532                     return -TARGET_EFAULT;
12533                 }
12534             }
12535             if (arg4) {
12536                 if (put_user_u64(loff_out, arg4)) {
12537                     return -TARGET_EFAULT;
12538                 }
12539             }
12540         }
12541         return ret;
12542 #endif
12543 #ifdef TARGET_NR_vmsplice
12544 	case TARGET_NR_vmsplice:
12545         {
12546             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12547             if (vec != NULL) {
12548                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12549                 unlock_iovec(vec, arg2, arg3, 0);
12550             } else {
12551                 ret = -host_to_target_errno(errno);
12552             }
12553         }
12554         return ret;
12555 #endif
12556 #endif /* CONFIG_SPLICE */
12557 #ifdef CONFIG_EVENTFD
12558 #if defined(TARGET_NR_eventfd)
12559     case TARGET_NR_eventfd:
12560         ret = get_errno(eventfd(arg1, 0));
12561         if (ret >= 0) {
12562             fd_trans_register(ret, &target_eventfd_trans);
12563         }
12564         return ret;
12565 #endif
12566 #if defined(TARGET_NR_eventfd2)
12567     case TARGET_NR_eventfd2:
12568     {
12569         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12570         if (arg2 & TARGET_O_NONBLOCK) {
12571             host_flags |= O_NONBLOCK;
12572         }
12573         if (arg2 & TARGET_O_CLOEXEC) {
12574             host_flags |= O_CLOEXEC;
12575         }
12576         ret = get_errno(eventfd(arg1, host_flags));
12577         if (ret >= 0) {
12578             fd_trans_register(ret, &target_eventfd_trans);
12579         }
12580         return ret;
12581     }
12582 #endif
12583 #endif /* CONFIG_EVENTFD  */
12584 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12585     case TARGET_NR_fallocate:
12586 #if TARGET_ABI_BITS == 32
12587         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12588                                   target_offset64(arg5, arg6)));
12589 #else
12590         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12591 #endif
12592         return ret;
12593 #endif
12594 #if defined(CONFIG_SYNC_FILE_RANGE)
12595 #if defined(TARGET_NR_sync_file_range)
12596     case TARGET_NR_sync_file_range:
12597 #if TARGET_ABI_BITS == 32
12598 #if defined(TARGET_MIPS)
12599         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12600                                         target_offset64(arg5, arg6), arg7));
12601 #else
12602         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12603                                         target_offset64(arg4, arg5), arg6));
12604 #endif /* !TARGET_MIPS */
12605 #else
12606         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12607 #endif
12608         return ret;
12609 #endif
12610 #if defined(TARGET_NR_sync_file_range2) || \
12611     defined(TARGET_NR_arm_sync_file_range)
12612 #if defined(TARGET_NR_sync_file_range2)
12613     case TARGET_NR_sync_file_range2:
12614 #endif
12615 #if defined(TARGET_NR_arm_sync_file_range)
12616     case TARGET_NR_arm_sync_file_range:
12617 #endif
12618         /* This is like sync_file_range but the arguments are reordered */
12619 #if TARGET_ABI_BITS == 32
12620         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12621                                         target_offset64(arg5, arg6), arg2));
12622 #else
12623         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12624 #endif
12625         return ret;
12626 #endif
12627 #endif
12628 #if defined(TARGET_NR_signalfd4)
12629     case TARGET_NR_signalfd4:
12630         return do_signalfd4(arg1, arg2, arg4);
12631 #endif
12632 #if defined(TARGET_NR_signalfd)
12633     case TARGET_NR_signalfd:
12634         return do_signalfd4(arg1, arg2, 0);
12635 #endif
12636 #if defined(CONFIG_EPOLL)
12637 #if defined(TARGET_NR_epoll_create)
12638     case TARGET_NR_epoll_create:
12639         return get_errno(epoll_create(arg1));
12640 #endif
12641 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12642     case TARGET_NR_epoll_create1:
12643         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12644 #endif
12645 #if defined(TARGET_NR_epoll_ctl)
12646     case TARGET_NR_epoll_ctl:
12647     {
12648         struct epoll_event ep;
12649         struct epoll_event *epp = 0;
12650         if (arg4) {
12651             if (arg2 != EPOLL_CTL_DEL) {
12652                 struct target_epoll_event *target_ep;
12653                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12654                     return -TARGET_EFAULT;
12655                 }
12656                 ep.events = tswap32(target_ep->events);
12657                 /*
12658                  * The epoll_data_t union is just opaque data to the kernel,
12659                  * so we transfer all 64 bits across and need not worry what
12660                  * actual data type it is.
12661                  */
12662                 ep.data.u64 = tswap64(target_ep->data.u64);
12663                 unlock_user_struct(target_ep, arg4, 0);
12664             }
12665             /*
12666              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12667              * non-null pointer, even though this argument is ignored.
12668              *
12669              */
12670             epp = &ep;
12671         }
12672         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12673     }
12674 #endif
12675 
12676 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12677 #if defined(TARGET_NR_epoll_wait)
12678     case TARGET_NR_epoll_wait:
12679 #endif
12680 #if defined(TARGET_NR_epoll_pwait)
12681     case TARGET_NR_epoll_pwait:
12682 #endif
12683     {
12684         struct target_epoll_event *target_ep;
12685         struct epoll_event *ep;
12686         int epfd = arg1;
12687         int maxevents = arg3;
12688         int timeout = arg4;
12689 
12690         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12691             return -TARGET_EINVAL;
12692         }
12693 
12694         target_ep = lock_user(VERIFY_WRITE, arg2,
12695                               maxevents * sizeof(struct target_epoll_event), 1);
12696         if (!target_ep) {
12697             return -TARGET_EFAULT;
12698         }
12699 
12700         ep = g_try_new(struct epoll_event, maxevents);
12701         if (!ep) {
12702             unlock_user(target_ep, arg2, 0);
12703             return -TARGET_ENOMEM;
12704         }
12705 
12706         switch (num) {
12707 #if defined(TARGET_NR_epoll_pwait)
12708         case TARGET_NR_epoll_pwait:
12709         {
12710             target_sigset_t *target_set;
12711             sigset_t _set, *set = &_set;
12712 
12713             if (arg5) {
12714                 if (arg6 != sizeof(target_sigset_t)) {
12715                     ret = -TARGET_EINVAL;
12716                     break;
12717                 }
12718 
12719                 target_set = lock_user(VERIFY_READ, arg5,
12720                                        sizeof(target_sigset_t), 1);
12721                 if (!target_set) {
12722                     ret = -TARGET_EFAULT;
12723                     break;
12724                 }
12725                 target_to_host_sigset(set, target_set);
12726                 unlock_user(target_set, arg5, 0);
12727             } else {
12728                 set = NULL;
12729             }
12730 
12731             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12732                                              set, SIGSET_T_SIZE));
12733             break;
12734         }
12735 #endif
12736 #if defined(TARGET_NR_epoll_wait)
12737         case TARGET_NR_epoll_wait:
12738             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12739                                              NULL, 0));
12740             break;
12741 #endif
12742         default:
12743             ret = -TARGET_ENOSYS;
12744         }
12745         if (!is_error(ret)) {
12746             int i;
12747             for (i = 0; i < ret; i++) {
12748                 target_ep[i].events = tswap32(ep[i].events);
12749                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12750             }
12751             unlock_user(target_ep, arg2,
12752                         ret * sizeof(struct target_epoll_event));
12753         } else {
12754             unlock_user(target_ep, arg2, 0);
12755         }
12756         g_free(ep);
12757         return ret;
12758     }
12759 #endif
12760 #endif
12761 #ifdef TARGET_NR_prlimit64
12762     case TARGET_NR_prlimit64:
12763     {
12764         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12765         struct target_rlimit64 *target_rnew, *target_rold;
12766         struct host_rlimit64 rnew, rold, *rnewp = 0;
12767         int resource = target_to_host_resource(arg2);
12768 
12769         if (arg3 && (resource != RLIMIT_AS &&
12770                      resource != RLIMIT_DATA &&
12771                      resource != RLIMIT_STACK)) {
12772             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12773                 return -TARGET_EFAULT;
12774             }
12775             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12776             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12777             unlock_user_struct(target_rnew, arg3, 0);
12778             rnewp = &rnew;
12779         }
12780 
12781         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12782         if (!is_error(ret) && arg4) {
12783             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12784                 return -TARGET_EFAULT;
12785             }
12786             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12787             target_rold->rlim_max = tswap64(rold.rlim_max);
12788             unlock_user_struct(target_rold, arg4, 1);
12789         }
12790         return ret;
12791     }
12792 #endif
12793 #ifdef TARGET_NR_gethostname
12794     case TARGET_NR_gethostname:
12795     {
12796         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12797         if (name) {
12798             ret = get_errno(gethostname(name, arg2));
12799             unlock_user(name, arg1, arg2);
12800         } else {
12801             ret = -TARGET_EFAULT;
12802         }
12803         return ret;
12804     }
12805 #endif
12806 #ifdef TARGET_NR_atomic_cmpxchg_32
12807     case TARGET_NR_atomic_cmpxchg_32:
12808     {
12809         /* should use start_exclusive from main.c */
12810         abi_ulong mem_value;
12811         if (get_user_u32(mem_value, arg6)) {
12812             target_siginfo_t info;
12813             info.si_signo = SIGSEGV;
12814             info.si_errno = 0;
12815             info.si_code = TARGET_SEGV_MAPERR;
12816             info._sifields._sigfault._addr = arg6;
12817             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12818                          QEMU_SI_FAULT, &info);
12819             ret = 0xdeadbeef;
12820 
12821         }
12822         if (mem_value == arg2)
12823             put_user_u32(arg1, arg6);
12824         return mem_value;
12825     }
12826 #endif
12827 #ifdef TARGET_NR_atomic_barrier
12828     case TARGET_NR_atomic_barrier:
12829         /* Like the kernel implementation and the
12830            qemu arm barrier, no-op this? */
12831         return 0;
12832 #endif
12833 
12834 #ifdef TARGET_NR_timer_create
12835     case TARGET_NR_timer_create:
12836     {
12837         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12838 
12839         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12840 
12841         int clkid = arg1;
12842         int timer_index = next_free_host_timer();
12843 
12844         if (timer_index < 0) {
12845             ret = -TARGET_EAGAIN;
12846         } else {
12847             timer_t *phtimer = g_posix_timers  + timer_index;
12848 
12849             if (arg2) {
12850                 phost_sevp = &host_sevp;
12851                 ret = target_to_host_sigevent(phost_sevp, arg2);
12852                 if (ret != 0) {
12853                     return ret;
12854                 }
12855             }
12856 
12857             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12858             if (ret) {
12859                 phtimer = NULL;
12860             } else {
12861                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12862                     return -TARGET_EFAULT;
12863                 }
12864             }
12865         }
12866         return ret;
12867     }
12868 #endif
12869 
12870 #ifdef TARGET_NR_timer_settime
12871     case TARGET_NR_timer_settime:
12872     {
12873         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12874          * struct itimerspec * old_value */
12875         target_timer_t timerid = get_timer_id(arg1);
12876 
12877         if (timerid < 0) {
12878             ret = timerid;
12879         } else if (arg3 == 0) {
12880             ret = -TARGET_EINVAL;
12881         } else {
12882             timer_t htimer = g_posix_timers[timerid];
12883             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12884 
12885             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12886                 return -TARGET_EFAULT;
12887             }
12888             ret = get_errno(
12889                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12890             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12891                 return -TARGET_EFAULT;
12892             }
12893         }
12894         return ret;
12895     }
12896 #endif
12897 
12898 #ifdef TARGET_NR_timer_settime64
12899     case TARGET_NR_timer_settime64:
12900     {
12901         target_timer_t timerid = get_timer_id(arg1);
12902 
12903         if (timerid < 0) {
12904             ret = timerid;
12905         } else if (arg3 == 0) {
12906             ret = -TARGET_EINVAL;
12907         } else {
12908             timer_t htimer = g_posix_timers[timerid];
12909             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12910 
12911             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12912                 return -TARGET_EFAULT;
12913             }
12914             ret = get_errno(
12915                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12916             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12917                 return -TARGET_EFAULT;
12918             }
12919         }
12920         return ret;
12921     }
12922 #endif
12923 
12924 #ifdef TARGET_NR_timer_gettime
12925     case TARGET_NR_timer_gettime:
12926     {
12927         /* args: timer_t timerid, struct itimerspec *curr_value */
12928         target_timer_t timerid = get_timer_id(arg1);
12929 
12930         if (timerid < 0) {
12931             ret = timerid;
12932         } else if (!arg2) {
12933             ret = -TARGET_EFAULT;
12934         } else {
12935             timer_t htimer = g_posix_timers[timerid];
12936             struct itimerspec hspec;
12937             ret = get_errno(timer_gettime(htimer, &hspec));
12938 
12939             if (host_to_target_itimerspec(arg2, &hspec)) {
12940                 ret = -TARGET_EFAULT;
12941             }
12942         }
12943         return ret;
12944     }
12945 #endif
12946 
12947 #ifdef TARGET_NR_timer_gettime64
12948     case TARGET_NR_timer_gettime64:
12949     {
12950         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12951         target_timer_t timerid = get_timer_id(arg1);
12952 
12953         if (timerid < 0) {
12954             ret = timerid;
12955         } else if (!arg2) {
12956             ret = -TARGET_EFAULT;
12957         } else {
12958             timer_t htimer = g_posix_timers[timerid];
12959             struct itimerspec hspec;
12960             ret = get_errno(timer_gettime(htimer, &hspec));
12961 
12962             if (host_to_target_itimerspec64(arg2, &hspec)) {
12963                 ret = -TARGET_EFAULT;
12964             }
12965         }
12966         return ret;
12967     }
12968 #endif
12969 
12970 #ifdef TARGET_NR_timer_getoverrun
12971     case TARGET_NR_timer_getoverrun:
12972     {
12973         /* args: timer_t timerid */
12974         target_timer_t timerid = get_timer_id(arg1);
12975 
12976         if (timerid < 0) {
12977             ret = timerid;
12978         } else {
12979             timer_t htimer = g_posix_timers[timerid];
12980             ret = get_errno(timer_getoverrun(htimer));
12981         }
12982         return ret;
12983     }
12984 #endif
12985 
12986 #ifdef TARGET_NR_timer_delete
12987     case TARGET_NR_timer_delete:
12988     {
12989         /* args: timer_t timerid */
12990         target_timer_t timerid = get_timer_id(arg1);
12991 
12992         if (timerid < 0) {
12993             ret = timerid;
12994         } else {
12995             timer_t htimer = g_posix_timers[timerid];
12996             ret = get_errno(timer_delete(htimer));
12997             g_posix_timers[timerid] = 0;
12998         }
12999         return ret;
13000     }
13001 #endif
13002 
13003 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13004     case TARGET_NR_timerfd_create:
13005         return get_errno(timerfd_create(arg1,
13006                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13007 #endif
13008 
13009 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13010     case TARGET_NR_timerfd_gettime:
13011         {
13012             struct itimerspec its_curr;
13013 
13014             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13015 
13016             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13017                 return -TARGET_EFAULT;
13018             }
13019         }
13020         return ret;
13021 #endif
13022 
13023 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13024     case TARGET_NR_timerfd_gettime64:
13025         {
13026             struct itimerspec its_curr;
13027 
13028             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13029 
13030             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13031                 return -TARGET_EFAULT;
13032             }
13033         }
13034         return ret;
13035 #endif
13036 
13037 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13038     case TARGET_NR_timerfd_settime:
13039         {
13040             struct itimerspec its_new, its_old, *p_new;
13041 
13042             if (arg3) {
13043                 if (target_to_host_itimerspec(&its_new, arg3)) {
13044                     return -TARGET_EFAULT;
13045                 }
13046                 p_new = &its_new;
13047             } else {
13048                 p_new = NULL;
13049             }
13050 
13051             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13052 
13053             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13054                 return -TARGET_EFAULT;
13055             }
13056         }
13057         return ret;
13058 #endif
13059 
13060 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13061     case TARGET_NR_timerfd_settime64:
13062         {
13063             struct itimerspec its_new, its_old, *p_new;
13064 
13065             if (arg3) {
13066                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13067                     return -TARGET_EFAULT;
13068                 }
13069                 p_new = &its_new;
13070             } else {
13071                 p_new = NULL;
13072             }
13073 
13074             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13075 
13076             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13077                 return -TARGET_EFAULT;
13078             }
13079         }
13080         return ret;
13081 #endif
13082 
13083 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13084     case TARGET_NR_ioprio_get:
13085         return get_errno(ioprio_get(arg1, arg2));
13086 #endif
13087 
13088 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13089     case TARGET_NR_ioprio_set:
13090         return get_errno(ioprio_set(arg1, arg2, arg3));
13091 #endif
13092 
13093 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13094     case TARGET_NR_setns:
13095         return get_errno(setns(arg1, arg2));
13096 #endif
13097 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13098     case TARGET_NR_unshare:
13099         return get_errno(unshare(arg1));
13100 #endif
13101 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13102     case TARGET_NR_kcmp:
13103         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13104 #endif
13105 #ifdef TARGET_NR_swapcontext
13106     case TARGET_NR_swapcontext:
13107         /* PowerPC specific.  */
13108         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13109 #endif
13110 #ifdef TARGET_NR_memfd_create
13111     case TARGET_NR_memfd_create:
13112         p = lock_user_string(arg1);
13113         if (!p) {
13114             return -TARGET_EFAULT;
13115         }
13116         ret = get_errno(memfd_create(p, arg2));
13117         fd_trans_unregister(ret);
13118         unlock_user(p, arg1, 0);
13119         return ret;
13120 #endif
13121 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13122     case TARGET_NR_membarrier:
13123         return get_errno(membarrier(arg1, arg2));
13124 #endif
13125 
13126 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13127     case TARGET_NR_copy_file_range:
13128         {
13129             loff_t inoff, outoff;
13130             loff_t *pinoff = NULL, *poutoff = NULL;
13131 
13132             if (arg2) {
13133                 if (get_user_u64(inoff, arg2)) {
13134                     return -TARGET_EFAULT;
13135                 }
13136                 pinoff = &inoff;
13137             }
13138             if (arg4) {
13139                 if (get_user_u64(outoff, arg4)) {
13140                     return -TARGET_EFAULT;
13141                 }
13142                 poutoff = &outoff;
13143             }
13144             /* Do not sign-extend the count parameter. */
13145             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13146                                                  (abi_ulong)arg5, arg6));
13147             if (!is_error(ret) && ret > 0) {
13148                 if (arg2) {
13149                     if (put_user_u64(inoff, arg2)) {
13150                         return -TARGET_EFAULT;
13151                     }
13152                 }
13153                 if (arg4) {
13154                     if (put_user_u64(outoff, arg4)) {
13155                         return -TARGET_EFAULT;
13156                     }
13157                 }
13158             }
13159         }
13160         return ret;
13161 #endif
13162 
13163 #if defined(TARGET_NR_pivot_root)
13164     case TARGET_NR_pivot_root:
13165         {
13166             void *p2;
13167             p = lock_user_string(arg1); /* new_root */
13168             p2 = lock_user_string(arg2); /* put_old */
13169             if (!p || !p2) {
13170                 ret = -TARGET_EFAULT;
13171             } else {
13172                 ret = get_errno(pivot_root(p, p2));
13173             }
13174             unlock_user(p2, arg2, 0);
13175             unlock_user(p, arg1, 0);
13176         }
13177         return ret;
13178 #endif
13179 
13180     default:
13181         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13182         return -TARGET_ENOSYS;
13183     }
13184     return ret;
13185 }
13186 
13187 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13188                     abi_long arg2, abi_long arg3, abi_long arg4,
13189                     abi_long arg5, abi_long arg6, abi_long arg7,
13190                     abi_long arg8)
13191 {
13192     CPUState *cpu = env_cpu(cpu_env);
13193     abi_long ret;
13194 
13195 #ifdef DEBUG_ERESTARTSYS
13196     /* Debug-only code for exercising the syscall-restart code paths
13197      * in the per-architecture cpu main loops: restart every syscall
13198      * the guest makes once before letting it through.
13199      */
13200     {
13201         static bool flag;
13202         flag = !flag;
13203         if (flag) {
13204             return -QEMU_ERESTARTSYS;
13205         }
13206     }
13207 #endif
13208 
13209     record_syscall_start(cpu, num, arg1,
13210                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13211 
13212     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13213         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13214     }
13215 
13216     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13217                       arg5, arg6, arg7, arg8);
13218 
13219     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13220         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13221                           arg3, arg4, arg5, arg6);
13222     }
13223 
13224     record_syscall_return(cpu, num, ret);
13225     return ret;
13226 }
13227