xref: /openbmc/qemu/linux-user/syscall.c (revision af804f39)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 
99 #ifdef HAVE_SYS_MOUNT_FSCONFIG
100 /*
101  * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h,
102  * which in turn prevents use of linux/fs.h. So we have to
103  * define the constants ourselves for now.
104  */
105 #define FS_IOC_GETFLAGS                _IOR('f', 1, long)
106 #define FS_IOC_SETFLAGS                _IOW('f', 2, long)
107 #define FS_IOC_GETVERSION              _IOR('v', 1, long)
108 #define FS_IOC_SETVERSION              _IOW('v', 2, long)
109 #define FS_IOC_FIEMAP                  _IOWR('f', 11, struct fiemap)
110 #define FS_IOC32_GETFLAGS              _IOR('f', 1, int)
111 #define FS_IOC32_SETFLAGS              _IOW('f', 2, int)
112 #define FS_IOC32_GETVERSION            _IOR('v', 1, int)
113 #define FS_IOC32_SETVERSION            _IOW('v', 2, int)
114 
115 #define BLKGETSIZE64 _IOR(0x12,114,size_t)
116 #define BLKDISCARD _IO(0x12,119)
117 #define BLKIOMIN _IO(0x12,120)
118 #define BLKIOOPT _IO(0x12,121)
119 #define BLKALIGNOFF _IO(0x12,122)
120 #define BLKPBSZGET _IO(0x12,123)
121 #define BLKDISCARDZEROES _IO(0x12,124)
122 #define BLKSECDISCARD _IO(0x12,125)
123 #define BLKROTATIONAL _IO(0x12,126)
124 #define BLKZEROOUT _IO(0x12,127)
125 
126 #define FIBMAP     _IO(0x00,1)
127 #define FIGETBSZ   _IO(0x00,2)
128 
129 struct file_clone_range {
130         __s64 src_fd;
131         __u64 src_offset;
132         __u64 src_length;
133         __u64 dest_offset;
134 };
135 
136 #define FICLONE         _IOW(0x94, 9, int)
137 #define FICLONERANGE    _IOW(0x94, 13, struct file_clone_range)
138 
139 #else
140 #include <linux/fs.h>
141 #endif
142 #include <linux/fd.h>
143 #if defined(CONFIG_FIEMAP)
144 #include <linux/fiemap.h>
145 #endif
146 #include <linux/fb.h>
147 #if defined(CONFIG_USBFS)
148 #include <linux/usbdevice_fs.h>
149 #include <linux/usb/ch9.h>
150 #endif
151 #include <linux/vt.h>
152 #include <linux/dm-ioctl.h>
153 #include <linux/reboot.h>
154 #include <linux/route.h>
155 #include <linux/filter.h>
156 #include <linux/blkpg.h>
157 #include <netpacket/packet.h>
158 #include <linux/netlink.h>
159 #include <linux/if_alg.h>
160 #include <linux/rtc.h>
161 #include <sound/asound.h>
162 #ifdef HAVE_BTRFS_H
163 #include <linux/btrfs.h>
164 #endif
165 #ifdef HAVE_DRM_H
166 #include <libdrm/drm.h>
167 #include <libdrm/i915_drm.h>
168 #endif
169 #include "linux_loop.h"
170 #include "uname.h"
171 
172 #include "qemu.h"
173 #include "user-internals.h"
174 #include "strace.h"
175 #include "signal-common.h"
176 #include "loader.h"
177 #include "user-mmap.h"
178 #include "user/safe-syscall.h"
179 #include "qemu/guest-random.h"
180 #include "qemu/selfmap.h"
181 #include "user/syscall-trace.h"
182 #include "special-errno.h"
183 #include "qapi/error.h"
184 #include "fd-trans.h"
185 #include "tcg/tcg.h"
186 #include "cpu_loop-common.h"
187 
188 #ifndef CLONE_IO
189 #define CLONE_IO                0x80000000      /* Clone io context */
190 #endif
191 
192 /* We can't directly call the host clone syscall, because this will
193  * badly confuse libc (breaking mutexes, for example). So we must
194  * divide clone flags into:
195  *  * flag combinations that look like pthread_create()
196  *  * flag combinations that look like fork()
197  *  * flags we can implement within QEMU itself
198  *  * flags we can't support and will return an error for
199  */
200 /* For thread creation, all these flags must be present; for
201  * fork, none must be present.
202  */
203 #define CLONE_THREAD_FLAGS                              \
204     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
205      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
206 
207 /* These flags are ignored:
208  * CLONE_DETACHED is now ignored by the kernel;
209  * CLONE_IO is just an optimisation hint to the I/O scheduler
210  */
211 #define CLONE_IGNORED_FLAGS                     \
212     (CLONE_DETACHED | CLONE_IO)
213 
214 /* Flags for fork which we can implement within QEMU itself */
215 #define CLONE_OPTIONAL_FORK_FLAGS               \
216     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
217      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
218 
219 /* Flags for thread creation which we can implement within QEMU itself */
220 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
221     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
222      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
223 
224 #define CLONE_INVALID_FORK_FLAGS                                        \
225     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
226 
227 #define CLONE_INVALID_THREAD_FLAGS                                      \
228     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
229        CLONE_IGNORED_FLAGS))
230 
231 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
232  * have almost all been allocated. We cannot support any of
233  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
234  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
235  * The checks against the invalid thread masks above will catch these.
236  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
237  */
238 
239 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
240  * once. This exercises the codepaths for restart.
241  */
242 //#define DEBUG_ERESTARTSYS
243 
244 //#include <linux/msdos_fs.h>
245 #define VFAT_IOCTL_READDIR_BOTH \
246     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
247 #define VFAT_IOCTL_READDIR_SHORT \
248     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
249 
250 #undef _syscall0
251 #undef _syscall1
252 #undef _syscall2
253 #undef _syscall3
254 #undef _syscall4
255 #undef _syscall5
256 #undef _syscall6
257 
258 #define _syscall0(type,name)		\
259 static type name (void)			\
260 {					\
261 	return syscall(__NR_##name);	\
262 }
263 
264 #define _syscall1(type,name,type1,arg1)		\
265 static type name (type1 arg1)			\
266 {						\
267 	return syscall(__NR_##name, arg1);	\
268 }
269 
270 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
271 static type name (type1 arg1,type2 arg2)		\
272 {							\
273 	return syscall(__NR_##name, arg1, arg2);	\
274 }
275 
276 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
277 static type name (type1 arg1,type2 arg2,type3 arg3)		\
278 {								\
279 	return syscall(__NR_##name, arg1, arg2, arg3);		\
280 }
281 
282 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
283 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
284 {										\
285 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
286 }
287 
288 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
289 		  type5,arg5)							\
290 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
291 {										\
292 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
293 }
294 
295 
296 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
297 		  type5,arg5,type6,arg6)					\
298 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
299                   type6 arg6)							\
300 {										\
301 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
302 }
303 
304 
305 #define __NR_sys_uname __NR_uname
306 #define __NR_sys_getcwd1 __NR_getcwd
307 #define __NR_sys_getdents __NR_getdents
308 #define __NR_sys_getdents64 __NR_getdents64
309 #define __NR_sys_getpriority __NR_getpriority
310 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
311 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
312 #define __NR_sys_syslog __NR_syslog
313 #if defined(__NR_futex)
314 # define __NR_sys_futex __NR_futex
315 #endif
316 #if defined(__NR_futex_time64)
317 # define __NR_sys_futex_time64 __NR_futex_time64
318 #endif
319 #define __NR_sys_statx __NR_statx
320 
321 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
322 #define __NR__llseek __NR_lseek
323 #endif
324 
325 /* Newer kernel ports have llseek() instead of _llseek() */
326 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
327 #define TARGET_NR__llseek TARGET_NR_llseek
328 #endif
329 
330 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
331 #ifndef TARGET_O_NONBLOCK_MASK
332 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
333 #endif
334 
335 #define __NR_sys_gettid __NR_gettid
336 _syscall0(int, sys_gettid)
337 
338 /* For the 64-bit guest on 32-bit host case we must emulate
339  * getdents using getdents64, because otherwise the host
340  * might hand us back more dirent records than we can fit
341  * into the guest buffer after structure format conversion.
342  * Otherwise we emulate getdents with getdents if the host has it.
343  */
344 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
345 #define EMULATE_GETDENTS_WITH_GETDENTS
346 #endif
347 
348 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
349 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
350 #endif
351 #if (defined(TARGET_NR_getdents) && \
352       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
353     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
354 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
355 #endif
356 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
357 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
358           loff_t *, res, uint, wh);
359 #endif
360 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
361 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
362           siginfo_t *, uinfo)
363 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
364 #ifdef __NR_exit_group
365 _syscall1(int,exit_group,int,error_code)
366 #endif
367 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
368 #define __NR_sys_close_range __NR_close_range
369 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
370 #ifndef CLOSE_RANGE_CLOEXEC
371 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
372 #endif
373 #endif
374 #if defined(__NR_futex)
375 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
376           const struct timespec *,timeout,int *,uaddr2,int,val3)
377 #endif
378 #if defined(__NR_futex_time64)
379 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
380           const struct timespec *,timeout,int *,uaddr2,int,val3)
381 #endif
382 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
383 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
384 #endif
385 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
386 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
387                              unsigned int, flags);
388 #endif
389 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
390 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
391 #endif
392 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
393 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
394           unsigned long *, user_mask_ptr);
395 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
396 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
397           unsigned long *, user_mask_ptr);
398 /* sched_attr is not defined in glibc */
399 struct sched_attr {
400     uint32_t size;
401     uint32_t sched_policy;
402     uint64_t sched_flags;
403     int32_t sched_nice;
404     uint32_t sched_priority;
405     uint64_t sched_runtime;
406     uint64_t sched_deadline;
407     uint64_t sched_period;
408     uint32_t sched_util_min;
409     uint32_t sched_util_max;
410 };
411 #define __NR_sys_sched_getattr __NR_sched_getattr
412 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
413           unsigned int, size, unsigned int, flags);
414 #define __NR_sys_sched_setattr __NR_sched_setattr
415 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
416           unsigned int, flags);
417 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
418 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
419 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
420 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
421           const struct sched_param *, param);
422 #define __NR_sys_sched_getparam __NR_sched_getparam
423 _syscall2(int, sys_sched_getparam, pid_t, pid,
424           struct sched_param *, param);
425 #define __NR_sys_sched_setparam __NR_sched_setparam
426 _syscall2(int, sys_sched_setparam, pid_t, pid,
427           const struct sched_param *, param);
428 #define __NR_sys_getcpu __NR_getcpu
429 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
430 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
431           void *, arg);
432 _syscall2(int, capget, struct __user_cap_header_struct *, header,
433           struct __user_cap_data_struct *, data);
434 _syscall2(int, capset, struct __user_cap_header_struct *, header,
435           struct __user_cap_data_struct *, data);
436 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
437 _syscall2(int, ioprio_get, int, which, int, who)
438 #endif
439 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
440 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
441 #endif
442 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
443 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
444 #endif
445 
446 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
447 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
448           unsigned long, idx1, unsigned long, idx2)
449 #endif
450 
451 /*
452  * It is assumed that struct statx is architecture independent.
453  */
454 #if defined(TARGET_NR_statx) && defined(__NR_statx)
455 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
456           unsigned int, mask, struct target_statx *, statxbuf)
457 #endif
458 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
459 _syscall2(int, membarrier, int, cmd, int, flags)
460 #endif
461 
462 static const bitmask_transtbl fcntl_flags_tbl[] = {
463   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
464   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
465   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
466   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
467   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
468   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
469   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
470   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
471   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
472   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
473   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
474   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
475   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
476 #if defined(O_DIRECT)
477   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
478 #endif
479 #if defined(O_NOATIME)
480   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
481 #endif
482 #if defined(O_CLOEXEC)
483   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
484 #endif
485 #if defined(O_PATH)
486   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
487 #endif
488 #if defined(O_TMPFILE)
489   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
490 #endif
491   /* Don't terminate the list prematurely on 64-bit host+guest.  */
492 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
493   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
494 #endif
495   { 0, 0, 0, 0 }
496 };
497 
498 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
499 
500 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
501 #if defined(__NR_utimensat)
502 #define __NR_sys_utimensat __NR_utimensat
503 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
504           const struct timespec *,tsp,int,flags)
505 #else
506 static int sys_utimensat(int dirfd, const char *pathname,
507                          const struct timespec times[2], int flags)
508 {
509     errno = ENOSYS;
510     return -1;
511 }
512 #endif
513 #endif /* TARGET_NR_utimensat */
514 
515 #ifdef TARGET_NR_renameat2
516 #if defined(__NR_renameat2)
517 #define __NR_sys_renameat2 __NR_renameat2
518 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
519           const char *, new, unsigned int, flags)
520 #else
521 static int sys_renameat2(int oldfd, const char *old,
522                          int newfd, const char *new, int flags)
523 {
524     if (flags == 0) {
525         return renameat(oldfd, old, newfd, new);
526     }
527     errno = ENOSYS;
528     return -1;
529 }
530 #endif
531 #endif /* TARGET_NR_renameat2 */
532 
533 #ifdef CONFIG_INOTIFY
534 #include <sys/inotify.h>
535 #else
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY  */
542 
543 #if defined(TARGET_NR_prlimit64)
544 #ifndef __NR_prlimit64
545 # define __NR_prlimit64 -1
546 #endif
547 #define __NR_sys_prlimit64 __NR_prlimit64
548 /* The glibc rlimit structure may not be that used by the underlying syscall */
549 struct host_rlimit64 {
550     uint64_t rlim_cur;
551     uint64_t rlim_max;
552 };
553 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
554           const struct host_rlimit64 *, new_limit,
555           struct host_rlimit64 *, old_limit)
556 #endif
557 
558 
559 #if defined(TARGET_NR_timer_create)
560 /* Maximum of 32 active POSIX timers allowed at any one time. */
561 #define GUEST_TIMER_MAX 32
562 static timer_t g_posix_timers[GUEST_TIMER_MAX];
563 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
564 
565 static inline int next_free_host_timer(void)
566 {
567     int k;
568     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
569         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
570             return k;
571         }
572     }
573     return -1;
574 }
575 
576 static inline void free_host_timer_slot(int id)
577 {
578     qatomic_store_release(g_posix_timer_allocated + id, 0);
579 }
580 #endif
581 
582 static inline int host_to_target_errno(int host_errno)
583 {
584     switch (host_errno) {
585 #define E(X)  case X: return TARGET_##X;
586 #include "errnos.c.inc"
587 #undef E
588     default:
589         return host_errno;
590     }
591 }
592 
593 static inline int target_to_host_errno(int target_errno)
594 {
595     switch (target_errno) {
596 #define E(X)  case TARGET_##X: return X;
597 #include "errnos.c.inc"
598 #undef E
599     default:
600         return target_errno;
601     }
602 }
603 
604 abi_long get_errno(abi_long ret)
605 {
606     if (ret == -1)
607         return -host_to_target_errno(errno);
608     else
609         return ret;
610 }
611 
612 const char *target_strerror(int err)
613 {
614     if (err == QEMU_ERESTARTSYS) {
615         return "To be restarted";
616     }
617     if (err == QEMU_ESIGRETURN) {
618         return "Successful exit from sigreturn";
619     }
620 
621     return strerror(target_to_host_errno(err));
622 }
623 
624 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
625 {
626     int i;
627     uint8_t b;
628     if (usize <= ksize) {
629         return 1;
630     }
631     for (i = ksize; i < usize; i++) {
632         if (get_user_u8(b, addr + i)) {
633             return -TARGET_EFAULT;
634         }
635         if (b != 0) {
636             return 0;
637         }
638     }
639     return 1;
640 }
641 
642 #define safe_syscall0(type, name) \
643 static type safe_##name(void) \
644 { \
645     return safe_syscall(__NR_##name); \
646 }
647 
648 #define safe_syscall1(type, name, type1, arg1) \
649 static type safe_##name(type1 arg1) \
650 { \
651     return safe_syscall(__NR_##name, arg1); \
652 }
653 
654 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
655 static type safe_##name(type1 arg1, type2 arg2) \
656 { \
657     return safe_syscall(__NR_##name, arg1, arg2); \
658 }
659 
660 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
661 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
662 { \
663     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
664 }
665 
666 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
667     type4, arg4) \
668 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
669 { \
670     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
671 }
672 
673 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
674     type4, arg4, type5, arg5) \
675 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
676     type5 arg5) \
677 { \
678     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
679 }
680 
681 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
682     type4, arg4, type5, arg5, type6, arg6) \
683 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
684     type5 arg5, type6 arg6) \
685 { \
686     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
687 }
688 
689 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
690 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
691 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
692               int, flags, mode_t, mode)
693 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
694 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
695               struct rusage *, rusage)
696 #endif
697 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
698               int, options, struct rusage *, rusage)
699 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
700 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
701     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
702 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
703               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
704 #endif
705 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
706 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
707               struct timespec *, tsp, const sigset_t *, sigmask,
708               size_t, sigsetsize)
709 #endif
710 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
711               int, maxevents, int, timeout, const sigset_t *, sigmask,
712               size_t, sigsetsize)
713 #if defined(__NR_futex)
714 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
715               const struct timespec *,timeout,int *,uaddr2,int,val3)
716 #endif
717 #if defined(__NR_futex_time64)
718 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
719               const struct timespec *,timeout,int *,uaddr2,int,val3)
720 #endif
721 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
722 safe_syscall2(int, kill, pid_t, pid, int, sig)
723 safe_syscall2(int, tkill, int, tid, int, sig)
724 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
725 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
726 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
727 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
728               unsigned long, pos_l, unsigned long, pos_h)
729 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
730               unsigned long, pos_l, unsigned long, pos_h)
731 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
732               socklen_t, addrlen)
733 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
734               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
735 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
736               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
737 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
738 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
739 safe_syscall2(int, flock, int, fd, int, operation)
740 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
741 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
742               const struct timespec *, uts, size_t, sigsetsize)
743 #endif
744 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
745               int, flags)
746 #if defined(TARGET_NR_nanosleep)
747 safe_syscall2(int, nanosleep, const struct timespec *, req,
748               struct timespec *, rem)
749 #endif
750 #if defined(TARGET_NR_clock_nanosleep) || \
751     defined(TARGET_NR_clock_nanosleep_time64)
752 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
753               const struct timespec *, req, struct timespec *, rem)
754 #endif
755 #ifdef __NR_ipc
756 #ifdef __s390x__
757 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
758               void *, ptr)
759 #else
760 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
761               void *, ptr, long, fifth)
762 #endif
763 #endif
764 #ifdef __NR_msgsnd
765 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
766               int, flags)
767 #endif
768 #ifdef __NR_msgrcv
769 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
770               long, msgtype, int, flags)
771 #endif
772 #ifdef __NR_semtimedop
773 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
774               unsigned, nsops, const struct timespec *, timeout)
775 #endif
776 #if defined(TARGET_NR_mq_timedsend) || \
777     defined(TARGET_NR_mq_timedsend_time64)
778 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
779               size_t, len, unsigned, prio, const struct timespec *, timeout)
780 #endif
781 #if defined(TARGET_NR_mq_timedreceive) || \
782     defined(TARGET_NR_mq_timedreceive_time64)
783 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
784               size_t, len, unsigned *, prio, const struct timespec *, timeout)
785 #endif
786 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
787 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
788               int, outfd, loff_t *, poutoff, size_t, length,
789               unsigned int, flags)
790 #endif
791 
792 /* We do ioctl like this rather than via safe_syscall3 to preserve the
793  * "third argument might be integer or pointer or not present" behaviour of
794  * the libc function.
795  */
796 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
797 /* Similarly for fcntl. Note that callers must always:
798  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
799  *  use the flock64 struct rather than unsuffixed flock
800  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
801  */
802 #ifdef __NR_fcntl64
803 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
804 #else
805 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
806 #endif
807 
808 static inline int host_to_target_sock_type(int host_type)
809 {
810     int target_type;
811 
812     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
813     case SOCK_DGRAM:
814         target_type = TARGET_SOCK_DGRAM;
815         break;
816     case SOCK_STREAM:
817         target_type = TARGET_SOCK_STREAM;
818         break;
819     default:
820         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
821         break;
822     }
823 
824 #if defined(SOCK_CLOEXEC)
825     if (host_type & SOCK_CLOEXEC) {
826         target_type |= TARGET_SOCK_CLOEXEC;
827     }
828 #endif
829 
830 #if defined(SOCK_NONBLOCK)
831     if (host_type & SOCK_NONBLOCK) {
832         target_type |= TARGET_SOCK_NONBLOCK;
833     }
834 #endif
835 
836     return target_type;
837 }
838 
839 static abi_ulong target_brk;
840 static abi_ulong target_original_brk;
841 static abi_ulong brk_page;
842 
843 void target_set_brk(abi_ulong new_brk)
844 {
845     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
846     brk_page = HOST_PAGE_ALIGN(target_brk);
847 }
848 
849 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
850 #define DEBUGF_BRK(message, args...)
851 
852 /* do_brk() must return target values and target errnos. */
853 abi_long do_brk(abi_ulong new_brk)
854 {
855     abi_long mapped_addr;
856     abi_ulong new_alloc_size;
857 
858     /* brk pointers are always untagged */
859 
860     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
861 
862     if (!new_brk) {
863         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
864         return target_brk;
865     }
866     if (new_brk < target_original_brk) {
867         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
868                    target_brk);
869         return target_brk;
870     }
871 
872     /* If the new brk is less than the highest page reserved to the
873      * target heap allocation, set it and we're almost done...  */
874     if (new_brk <= brk_page) {
875         /* Heap contents are initialized to zero, as for anonymous
876          * mapped pages.  */
877         if (new_brk > target_brk) {
878             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
879         }
880 	target_brk = new_brk;
881         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
882 	return target_brk;
883     }
884 
885     /* We need to allocate more memory after the brk... Note that
886      * we don't use MAP_FIXED because that will map over the top of
887      * any existing mapping (like the one with the host libc or qemu
888      * itself); instead we treat "mapped but at wrong address" as
889      * a failure and unmap again.
890      */
891     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
892     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
893                                         PROT_READ|PROT_WRITE,
894                                         MAP_ANON|MAP_PRIVATE, 0, 0));
895 
896     if (mapped_addr == brk_page) {
897         /* Heap contents are initialized to zero, as for anonymous
898          * mapped pages.  Technically the new pages are already
899          * initialized to zero since they *are* anonymous mapped
900          * pages, however we have to take care with the contents that
901          * come from the remaining part of the previous page: it may
902          * contains garbage data due to a previous heap usage (grown
903          * then shrunken).  */
904         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
905 
906         target_brk = new_brk;
907         brk_page = HOST_PAGE_ALIGN(target_brk);
908         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
909             target_brk);
910         return target_brk;
911     } else if (mapped_addr != -1) {
912         /* Mapped but at wrong address, meaning there wasn't actually
913          * enough space for this brk.
914          */
915         target_munmap(mapped_addr, new_alloc_size);
916         mapped_addr = -1;
917         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
918     }
919     else {
920         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
921     }
922 
923 #if defined(TARGET_ALPHA)
924     /* We (partially) emulate OSF/1 on Alpha, which requires we
925        return a proper errno, not an unchanged brk value.  */
926     return -TARGET_ENOMEM;
927 #endif
928     /* For everything else, return the previous break. */
929     return target_brk;
930 }
931 
932 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
933     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
934 static inline abi_long copy_from_user_fdset(fd_set *fds,
935                                             abi_ulong target_fds_addr,
936                                             int n)
937 {
938     int i, nw, j, k;
939     abi_ulong b, *target_fds;
940 
941     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
942     if (!(target_fds = lock_user(VERIFY_READ,
943                                  target_fds_addr,
944                                  sizeof(abi_ulong) * nw,
945                                  1)))
946         return -TARGET_EFAULT;
947 
948     FD_ZERO(fds);
949     k = 0;
950     for (i = 0; i < nw; i++) {
951         /* grab the abi_ulong */
952         __get_user(b, &target_fds[i]);
953         for (j = 0; j < TARGET_ABI_BITS; j++) {
954             /* check the bit inside the abi_ulong */
955             if ((b >> j) & 1)
956                 FD_SET(k, fds);
957             k++;
958         }
959     }
960 
961     unlock_user(target_fds, target_fds_addr, 0);
962 
963     return 0;
964 }
965 
966 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
967                                                  abi_ulong target_fds_addr,
968                                                  int n)
969 {
970     if (target_fds_addr) {
971         if (copy_from_user_fdset(fds, target_fds_addr, n))
972             return -TARGET_EFAULT;
973         *fds_ptr = fds;
974     } else {
975         *fds_ptr = NULL;
976     }
977     return 0;
978 }
979 
980 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
981                                           const fd_set *fds,
982                                           int n)
983 {
984     int i, nw, j, k;
985     abi_long v;
986     abi_ulong *target_fds;
987 
988     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
989     if (!(target_fds = lock_user(VERIFY_WRITE,
990                                  target_fds_addr,
991                                  sizeof(abi_ulong) * nw,
992                                  0)))
993         return -TARGET_EFAULT;
994 
995     k = 0;
996     for (i = 0; i < nw; i++) {
997         v = 0;
998         for (j = 0; j < TARGET_ABI_BITS; j++) {
999             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1000             k++;
1001         }
1002         __put_user(v, &target_fds[i]);
1003     }
1004 
1005     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1006 
1007     return 0;
1008 }
1009 #endif
1010 
1011 #if defined(__alpha__)
1012 #define HOST_HZ 1024
1013 #else
1014 #define HOST_HZ 100
1015 #endif
1016 
1017 static inline abi_long host_to_target_clock_t(long ticks)
1018 {
1019 #if HOST_HZ == TARGET_HZ
1020     return ticks;
1021 #else
1022     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1023 #endif
1024 }
1025 
1026 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1027                                              const struct rusage *rusage)
1028 {
1029     struct target_rusage *target_rusage;
1030 
1031     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1032         return -TARGET_EFAULT;
1033     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1034     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1035     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1036     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1037     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1038     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1039     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1040     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1041     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1042     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1043     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1044     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1045     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1046     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1047     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1048     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1049     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1050     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1051     unlock_user_struct(target_rusage, target_addr, 1);
1052 
1053     return 0;
1054 }
1055 
1056 #ifdef TARGET_NR_setrlimit
1057 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1058 {
1059     abi_ulong target_rlim_swap;
1060     rlim_t result;
1061 
1062     target_rlim_swap = tswapal(target_rlim);
1063     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1064         return RLIM_INFINITY;
1065 
1066     result = target_rlim_swap;
1067     if (target_rlim_swap != (rlim_t)result)
1068         return RLIM_INFINITY;
1069 
1070     return result;
1071 }
1072 #endif
1073 
1074 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1075 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1076 {
1077     abi_ulong target_rlim_swap;
1078     abi_ulong result;
1079 
1080     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1081         target_rlim_swap = TARGET_RLIM_INFINITY;
1082     else
1083         target_rlim_swap = rlim;
1084     result = tswapal(target_rlim_swap);
1085 
1086     return result;
1087 }
1088 #endif
1089 
1090 static inline int target_to_host_resource(int code)
1091 {
1092     switch (code) {
1093     case TARGET_RLIMIT_AS:
1094         return RLIMIT_AS;
1095     case TARGET_RLIMIT_CORE:
1096         return RLIMIT_CORE;
1097     case TARGET_RLIMIT_CPU:
1098         return RLIMIT_CPU;
1099     case TARGET_RLIMIT_DATA:
1100         return RLIMIT_DATA;
1101     case TARGET_RLIMIT_FSIZE:
1102         return RLIMIT_FSIZE;
1103     case TARGET_RLIMIT_LOCKS:
1104         return RLIMIT_LOCKS;
1105     case TARGET_RLIMIT_MEMLOCK:
1106         return RLIMIT_MEMLOCK;
1107     case TARGET_RLIMIT_MSGQUEUE:
1108         return RLIMIT_MSGQUEUE;
1109     case TARGET_RLIMIT_NICE:
1110         return RLIMIT_NICE;
1111     case TARGET_RLIMIT_NOFILE:
1112         return RLIMIT_NOFILE;
1113     case TARGET_RLIMIT_NPROC:
1114         return RLIMIT_NPROC;
1115     case TARGET_RLIMIT_RSS:
1116         return RLIMIT_RSS;
1117     case TARGET_RLIMIT_RTPRIO:
1118         return RLIMIT_RTPRIO;
1119 #ifdef RLIMIT_RTTIME
1120     case TARGET_RLIMIT_RTTIME:
1121         return RLIMIT_RTTIME;
1122 #endif
1123     case TARGET_RLIMIT_SIGPENDING:
1124         return RLIMIT_SIGPENDING;
1125     case TARGET_RLIMIT_STACK:
1126         return RLIMIT_STACK;
1127     default:
1128         return code;
1129     }
1130 }
1131 
1132 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1133                                               abi_ulong target_tv_addr)
1134 {
1135     struct target_timeval *target_tv;
1136 
1137     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1138         return -TARGET_EFAULT;
1139     }
1140 
1141     __get_user(tv->tv_sec, &target_tv->tv_sec);
1142     __get_user(tv->tv_usec, &target_tv->tv_usec);
1143 
1144     unlock_user_struct(target_tv, target_tv_addr, 0);
1145 
1146     return 0;
1147 }
1148 
1149 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1150                                             const struct timeval *tv)
1151 {
1152     struct target_timeval *target_tv;
1153 
1154     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1155         return -TARGET_EFAULT;
1156     }
1157 
1158     __put_user(tv->tv_sec, &target_tv->tv_sec);
1159     __put_user(tv->tv_usec, &target_tv->tv_usec);
1160 
1161     unlock_user_struct(target_tv, target_tv_addr, 1);
1162 
1163     return 0;
1164 }
1165 
1166 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1167 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1168                                                 abi_ulong target_tv_addr)
1169 {
1170     struct target__kernel_sock_timeval *target_tv;
1171 
1172     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1173         return -TARGET_EFAULT;
1174     }
1175 
1176     __get_user(tv->tv_sec, &target_tv->tv_sec);
1177     __get_user(tv->tv_usec, &target_tv->tv_usec);
1178 
1179     unlock_user_struct(target_tv, target_tv_addr, 0);
1180 
1181     return 0;
1182 }
1183 #endif
1184 
1185 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1186                                               const struct timeval *tv)
1187 {
1188     struct target__kernel_sock_timeval *target_tv;
1189 
1190     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1191         return -TARGET_EFAULT;
1192     }
1193 
1194     __put_user(tv->tv_sec, &target_tv->tv_sec);
1195     __put_user(tv->tv_usec, &target_tv->tv_usec);
1196 
1197     unlock_user_struct(target_tv, target_tv_addr, 1);
1198 
1199     return 0;
1200 }
1201 
1202 #if defined(TARGET_NR_futex) || \
1203     defined(TARGET_NR_rt_sigtimedwait) || \
1204     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1205     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1206     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1207     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1208     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1209     defined(TARGET_NR_timer_settime) || \
1210     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1211 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1212                                                abi_ulong target_addr)
1213 {
1214     struct target_timespec *target_ts;
1215 
1216     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1217         return -TARGET_EFAULT;
1218     }
1219     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1220     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1221     unlock_user_struct(target_ts, target_addr, 0);
1222     return 0;
1223 }
1224 #endif
1225 
1226 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1227     defined(TARGET_NR_timer_settime64) || \
1228     defined(TARGET_NR_mq_timedsend_time64) || \
1229     defined(TARGET_NR_mq_timedreceive_time64) || \
1230     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1231     defined(TARGET_NR_clock_nanosleep_time64) || \
1232     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1233     defined(TARGET_NR_utimensat) || \
1234     defined(TARGET_NR_utimensat_time64) || \
1235     defined(TARGET_NR_semtimedop_time64) || \
1236     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1237 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1238                                                  abi_ulong target_addr)
1239 {
1240     struct target__kernel_timespec *target_ts;
1241 
1242     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1243         return -TARGET_EFAULT;
1244     }
1245     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1246     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1247     /* in 32bit mode, this drops the padding */
1248     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1249     unlock_user_struct(target_ts, target_addr, 0);
1250     return 0;
1251 }
1252 #endif
1253 
1254 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1255                                                struct timespec *host_ts)
1256 {
1257     struct target_timespec *target_ts;
1258 
1259     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1260         return -TARGET_EFAULT;
1261     }
1262     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1263     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1264     unlock_user_struct(target_ts, target_addr, 1);
1265     return 0;
1266 }
1267 
1268 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1269                                                  struct timespec *host_ts)
1270 {
1271     struct target__kernel_timespec *target_ts;
1272 
1273     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1274         return -TARGET_EFAULT;
1275     }
1276     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1277     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1278     unlock_user_struct(target_ts, target_addr, 1);
1279     return 0;
1280 }
1281 
1282 #if defined(TARGET_NR_gettimeofday)
1283 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1284                                              struct timezone *tz)
1285 {
1286     struct target_timezone *target_tz;
1287 
1288     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1289         return -TARGET_EFAULT;
1290     }
1291 
1292     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1293     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1294 
1295     unlock_user_struct(target_tz, target_tz_addr, 1);
1296 
1297     return 0;
1298 }
1299 #endif
1300 
1301 #if defined(TARGET_NR_settimeofday)
1302 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1303                                                abi_ulong target_tz_addr)
1304 {
1305     struct target_timezone *target_tz;
1306 
1307     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1308         return -TARGET_EFAULT;
1309     }
1310 
1311     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1312     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1313 
1314     unlock_user_struct(target_tz, target_tz_addr, 0);
1315 
1316     return 0;
1317 }
1318 #endif
1319 
1320 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1321 #include <mqueue.h>
1322 
1323 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1324                                               abi_ulong target_mq_attr_addr)
1325 {
1326     struct target_mq_attr *target_mq_attr;
1327 
1328     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1329                           target_mq_attr_addr, 1))
1330         return -TARGET_EFAULT;
1331 
1332     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1333     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1334     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1335     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1336 
1337     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1338 
1339     return 0;
1340 }
1341 
1342 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1343                                             const struct mq_attr *attr)
1344 {
1345     struct target_mq_attr *target_mq_attr;
1346 
1347     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1348                           target_mq_attr_addr, 0))
1349         return -TARGET_EFAULT;
1350 
1351     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1352     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1353     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1354     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1355 
1356     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1357 
1358     return 0;
1359 }
1360 #endif
1361 
1362 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1363 /* do_select() must return target values and target errnos. */
1364 static abi_long do_select(int n,
1365                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1366                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1367 {
1368     fd_set rfds, wfds, efds;
1369     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1370     struct timeval tv;
1371     struct timespec ts, *ts_ptr;
1372     abi_long ret;
1373 
1374     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1375     if (ret) {
1376         return ret;
1377     }
1378     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1379     if (ret) {
1380         return ret;
1381     }
1382     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1383     if (ret) {
1384         return ret;
1385     }
1386 
1387     if (target_tv_addr) {
1388         if (copy_from_user_timeval(&tv, target_tv_addr))
1389             return -TARGET_EFAULT;
1390         ts.tv_sec = tv.tv_sec;
1391         ts.tv_nsec = tv.tv_usec * 1000;
1392         ts_ptr = &ts;
1393     } else {
1394         ts_ptr = NULL;
1395     }
1396 
1397     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1398                                   ts_ptr, NULL));
1399 
1400     if (!is_error(ret)) {
1401         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1402             return -TARGET_EFAULT;
1403         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1404             return -TARGET_EFAULT;
1405         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1406             return -TARGET_EFAULT;
1407 
1408         if (target_tv_addr) {
1409             tv.tv_sec = ts.tv_sec;
1410             tv.tv_usec = ts.tv_nsec / 1000;
1411             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1412                 return -TARGET_EFAULT;
1413             }
1414         }
1415     }
1416 
1417     return ret;
1418 }
1419 
1420 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1421 static abi_long do_old_select(abi_ulong arg1)
1422 {
1423     struct target_sel_arg_struct *sel;
1424     abi_ulong inp, outp, exp, tvp;
1425     long nsel;
1426 
1427     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1428         return -TARGET_EFAULT;
1429     }
1430 
1431     nsel = tswapal(sel->n);
1432     inp = tswapal(sel->inp);
1433     outp = tswapal(sel->outp);
1434     exp = tswapal(sel->exp);
1435     tvp = tswapal(sel->tvp);
1436 
1437     unlock_user_struct(sel, arg1, 0);
1438 
1439     return do_select(nsel, inp, outp, exp, tvp);
1440 }
1441 #endif
1442 #endif
1443 
1444 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1445 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1446                             abi_long arg4, abi_long arg5, abi_long arg6,
1447                             bool time64)
1448 {
1449     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1450     fd_set rfds, wfds, efds;
1451     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1452     struct timespec ts, *ts_ptr;
1453     abi_long ret;
1454 
1455     /*
1456      * The 6th arg is actually two args smashed together,
1457      * so we cannot use the C library.
1458      */
1459     struct {
1460         sigset_t *set;
1461         size_t size;
1462     } sig, *sig_ptr;
1463 
1464     abi_ulong arg_sigset, arg_sigsize, *arg7;
1465 
1466     n = arg1;
1467     rfd_addr = arg2;
1468     wfd_addr = arg3;
1469     efd_addr = arg4;
1470     ts_addr = arg5;
1471 
1472     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1473     if (ret) {
1474         return ret;
1475     }
1476     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1477     if (ret) {
1478         return ret;
1479     }
1480     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1481     if (ret) {
1482         return ret;
1483     }
1484 
1485     /*
1486      * This takes a timespec, and not a timeval, so we cannot
1487      * use the do_select() helper ...
1488      */
1489     if (ts_addr) {
1490         if (time64) {
1491             if (target_to_host_timespec64(&ts, ts_addr)) {
1492                 return -TARGET_EFAULT;
1493             }
1494         } else {
1495             if (target_to_host_timespec(&ts, ts_addr)) {
1496                 return -TARGET_EFAULT;
1497             }
1498         }
1499             ts_ptr = &ts;
1500     } else {
1501         ts_ptr = NULL;
1502     }
1503 
1504     /* Extract the two packed args for the sigset */
1505     sig_ptr = NULL;
1506     if (arg6) {
1507         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1508         if (!arg7) {
1509             return -TARGET_EFAULT;
1510         }
1511         arg_sigset = tswapal(arg7[0]);
1512         arg_sigsize = tswapal(arg7[1]);
1513         unlock_user(arg7, arg6, 0);
1514 
1515         if (arg_sigset) {
1516             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1517             if (ret != 0) {
1518                 return ret;
1519             }
1520             sig_ptr = &sig;
1521             sig.size = SIGSET_T_SIZE;
1522         }
1523     }
1524 
1525     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1526                                   ts_ptr, sig_ptr));
1527 
1528     if (sig_ptr) {
1529         finish_sigsuspend_mask(ret);
1530     }
1531 
1532     if (!is_error(ret)) {
1533         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1534             return -TARGET_EFAULT;
1535         }
1536         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1537             return -TARGET_EFAULT;
1538         }
1539         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1540             return -TARGET_EFAULT;
1541         }
1542         if (time64) {
1543             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1544                 return -TARGET_EFAULT;
1545             }
1546         } else {
1547             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1548                 return -TARGET_EFAULT;
1549             }
1550         }
1551     }
1552     return ret;
1553 }
1554 #endif
1555 
1556 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1557     defined(TARGET_NR_ppoll_time64)
1558 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1559                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1560 {
1561     struct target_pollfd *target_pfd;
1562     unsigned int nfds = arg2;
1563     struct pollfd *pfd;
1564     unsigned int i;
1565     abi_long ret;
1566 
1567     pfd = NULL;
1568     target_pfd = NULL;
1569     if (nfds) {
1570         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1571             return -TARGET_EINVAL;
1572         }
1573         target_pfd = lock_user(VERIFY_WRITE, arg1,
1574                                sizeof(struct target_pollfd) * nfds, 1);
1575         if (!target_pfd) {
1576             return -TARGET_EFAULT;
1577         }
1578 
1579         pfd = alloca(sizeof(struct pollfd) * nfds);
1580         for (i = 0; i < nfds; i++) {
1581             pfd[i].fd = tswap32(target_pfd[i].fd);
1582             pfd[i].events = tswap16(target_pfd[i].events);
1583         }
1584     }
1585     if (ppoll) {
1586         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1587         sigset_t *set = NULL;
1588 
1589         if (arg3) {
1590             if (time64) {
1591                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1592                     unlock_user(target_pfd, arg1, 0);
1593                     return -TARGET_EFAULT;
1594                 }
1595             } else {
1596                 if (target_to_host_timespec(timeout_ts, arg3)) {
1597                     unlock_user(target_pfd, arg1, 0);
1598                     return -TARGET_EFAULT;
1599                 }
1600             }
1601         } else {
1602             timeout_ts = NULL;
1603         }
1604 
1605         if (arg4) {
1606             ret = process_sigsuspend_mask(&set, arg4, arg5);
1607             if (ret != 0) {
1608                 unlock_user(target_pfd, arg1, 0);
1609                 return ret;
1610             }
1611         }
1612 
1613         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1614                                    set, SIGSET_T_SIZE));
1615 
1616         if (set) {
1617             finish_sigsuspend_mask(ret);
1618         }
1619         if (!is_error(ret) && arg3) {
1620             if (time64) {
1621                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1622                     return -TARGET_EFAULT;
1623                 }
1624             } else {
1625                 if (host_to_target_timespec(arg3, timeout_ts)) {
1626                     return -TARGET_EFAULT;
1627                 }
1628             }
1629         }
1630     } else {
1631           struct timespec ts, *pts;
1632 
1633           if (arg3 >= 0) {
1634               /* Convert ms to secs, ns */
1635               ts.tv_sec = arg3 / 1000;
1636               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1637               pts = &ts;
1638           } else {
1639               /* -ve poll() timeout means "infinite" */
1640               pts = NULL;
1641           }
1642           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1643     }
1644 
1645     if (!is_error(ret)) {
1646         for (i = 0; i < nfds; i++) {
1647             target_pfd[i].revents = tswap16(pfd[i].revents);
1648         }
1649     }
1650     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1651     return ret;
1652 }
1653 #endif
1654 
1655 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1656                         int flags, int is_pipe2)
1657 {
1658     int host_pipe[2];
1659     abi_long ret;
1660     ret = pipe2(host_pipe, flags);
1661 
1662     if (is_error(ret))
1663         return get_errno(ret);
1664 
1665     /* Several targets have special calling conventions for the original
1666        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1667     if (!is_pipe2) {
1668 #if defined(TARGET_ALPHA)
1669         cpu_env->ir[IR_A4] = host_pipe[1];
1670         return host_pipe[0];
1671 #elif defined(TARGET_MIPS)
1672         cpu_env->active_tc.gpr[3] = host_pipe[1];
1673         return host_pipe[0];
1674 #elif defined(TARGET_SH4)
1675         cpu_env->gregs[1] = host_pipe[1];
1676         return host_pipe[0];
1677 #elif defined(TARGET_SPARC)
1678         cpu_env->regwptr[1] = host_pipe[1];
1679         return host_pipe[0];
1680 #endif
1681     }
1682 
1683     if (put_user_s32(host_pipe[0], pipedes)
1684         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1685         return -TARGET_EFAULT;
1686     return get_errno(ret);
1687 }
1688 
1689 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1690                                               abi_ulong target_addr,
1691                                               socklen_t len)
1692 {
1693     struct target_ip_mreqn *target_smreqn;
1694 
1695     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1696     if (!target_smreqn)
1697         return -TARGET_EFAULT;
1698     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1699     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1700     if (len == sizeof(struct target_ip_mreqn))
1701         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1702     unlock_user(target_smreqn, target_addr, 0);
1703 
1704     return 0;
1705 }
1706 
1707 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1708                                                abi_ulong target_addr,
1709                                                socklen_t len)
1710 {
1711     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1712     sa_family_t sa_family;
1713     struct target_sockaddr *target_saddr;
1714 
1715     if (fd_trans_target_to_host_addr(fd)) {
1716         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1717     }
1718 
1719     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1720     if (!target_saddr)
1721         return -TARGET_EFAULT;
1722 
1723     sa_family = tswap16(target_saddr->sa_family);
1724 
1725     /* Oops. The caller might send a incomplete sun_path; sun_path
1726      * must be terminated by \0 (see the manual page), but
1727      * unfortunately it is quite common to specify sockaddr_un
1728      * length as "strlen(x->sun_path)" while it should be
1729      * "strlen(...) + 1". We'll fix that here if needed.
1730      * Linux kernel has a similar feature.
1731      */
1732 
1733     if (sa_family == AF_UNIX) {
1734         if (len < unix_maxlen && len > 0) {
1735             char *cp = (char*)target_saddr;
1736 
1737             if ( cp[len-1] && !cp[len] )
1738                 len++;
1739         }
1740         if (len > unix_maxlen)
1741             len = unix_maxlen;
1742     }
1743 
1744     memcpy(addr, target_saddr, len);
1745     addr->sa_family = sa_family;
1746     if (sa_family == AF_NETLINK) {
1747         struct sockaddr_nl *nladdr;
1748 
1749         nladdr = (struct sockaddr_nl *)addr;
1750         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1751         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1752     } else if (sa_family == AF_PACKET) {
1753 	struct target_sockaddr_ll *lladdr;
1754 
1755 	lladdr = (struct target_sockaddr_ll *)addr;
1756 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1757 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1758     }
1759     unlock_user(target_saddr, target_addr, 0);
1760 
1761     return 0;
1762 }
1763 
1764 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1765                                                struct sockaddr *addr,
1766                                                socklen_t len)
1767 {
1768     struct target_sockaddr *target_saddr;
1769 
1770     if (len == 0) {
1771         return 0;
1772     }
1773     assert(addr);
1774 
1775     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1776     if (!target_saddr)
1777         return -TARGET_EFAULT;
1778     memcpy(target_saddr, addr, len);
1779     if (len >= offsetof(struct target_sockaddr, sa_family) +
1780         sizeof(target_saddr->sa_family)) {
1781         target_saddr->sa_family = tswap16(addr->sa_family);
1782     }
1783     if (addr->sa_family == AF_NETLINK &&
1784         len >= sizeof(struct target_sockaddr_nl)) {
1785         struct target_sockaddr_nl *target_nl =
1786                (struct target_sockaddr_nl *)target_saddr;
1787         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1788         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1789     } else if (addr->sa_family == AF_PACKET) {
1790         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1791         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1792         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1793     } else if (addr->sa_family == AF_INET6 &&
1794                len >= sizeof(struct target_sockaddr_in6)) {
1795         struct target_sockaddr_in6 *target_in6 =
1796                (struct target_sockaddr_in6 *)target_saddr;
1797         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1798     }
1799     unlock_user(target_saddr, target_addr, len);
1800 
1801     return 0;
1802 }
1803 
1804 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1805                                            struct target_msghdr *target_msgh)
1806 {
1807     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1808     abi_long msg_controllen;
1809     abi_ulong target_cmsg_addr;
1810     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1811     socklen_t space = 0;
1812 
1813     msg_controllen = tswapal(target_msgh->msg_controllen);
1814     if (msg_controllen < sizeof (struct target_cmsghdr))
1815         goto the_end;
1816     target_cmsg_addr = tswapal(target_msgh->msg_control);
1817     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1818     target_cmsg_start = target_cmsg;
1819     if (!target_cmsg)
1820         return -TARGET_EFAULT;
1821 
1822     while (cmsg && target_cmsg) {
1823         void *data = CMSG_DATA(cmsg);
1824         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1825 
1826         int len = tswapal(target_cmsg->cmsg_len)
1827             - sizeof(struct target_cmsghdr);
1828 
1829         space += CMSG_SPACE(len);
1830         if (space > msgh->msg_controllen) {
1831             space -= CMSG_SPACE(len);
1832             /* This is a QEMU bug, since we allocated the payload
1833              * area ourselves (unlike overflow in host-to-target
1834              * conversion, which is just the guest giving us a buffer
1835              * that's too small). It can't happen for the payload types
1836              * we currently support; if it becomes an issue in future
1837              * we would need to improve our allocation strategy to
1838              * something more intelligent than "twice the size of the
1839              * target buffer we're reading from".
1840              */
1841             qemu_log_mask(LOG_UNIMP,
1842                           ("Unsupported ancillary data %d/%d: "
1843                            "unhandled msg size\n"),
1844                           tswap32(target_cmsg->cmsg_level),
1845                           tswap32(target_cmsg->cmsg_type));
1846             break;
1847         }
1848 
1849         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1850             cmsg->cmsg_level = SOL_SOCKET;
1851         } else {
1852             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1853         }
1854         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1855         cmsg->cmsg_len = CMSG_LEN(len);
1856 
1857         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1858             int *fd = (int *)data;
1859             int *target_fd = (int *)target_data;
1860             int i, numfds = len / sizeof(int);
1861 
1862             for (i = 0; i < numfds; i++) {
1863                 __get_user(fd[i], target_fd + i);
1864             }
1865         } else if (cmsg->cmsg_level == SOL_SOCKET
1866                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1867             struct ucred *cred = (struct ucred *)data;
1868             struct target_ucred *target_cred =
1869                 (struct target_ucred *)target_data;
1870 
1871             __get_user(cred->pid, &target_cred->pid);
1872             __get_user(cred->uid, &target_cred->uid);
1873             __get_user(cred->gid, &target_cred->gid);
1874         } else {
1875             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1876                           cmsg->cmsg_level, cmsg->cmsg_type);
1877             memcpy(data, target_data, len);
1878         }
1879 
1880         cmsg = CMSG_NXTHDR(msgh, cmsg);
1881         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1882                                          target_cmsg_start);
1883     }
1884     unlock_user(target_cmsg, target_cmsg_addr, 0);
1885  the_end:
1886     msgh->msg_controllen = space;
1887     return 0;
1888 }
1889 
1890 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1891                                            struct msghdr *msgh)
1892 {
1893     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1894     abi_long msg_controllen;
1895     abi_ulong target_cmsg_addr;
1896     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1897     socklen_t space = 0;
1898 
1899     msg_controllen = tswapal(target_msgh->msg_controllen);
1900     if (msg_controllen < sizeof (struct target_cmsghdr))
1901         goto the_end;
1902     target_cmsg_addr = tswapal(target_msgh->msg_control);
1903     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1904     target_cmsg_start = target_cmsg;
1905     if (!target_cmsg)
1906         return -TARGET_EFAULT;
1907 
1908     while (cmsg && target_cmsg) {
1909         void *data = CMSG_DATA(cmsg);
1910         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1911 
1912         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1913         int tgt_len, tgt_space;
1914 
1915         /* We never copy a half-header but may copy half-data;
1916          * this is Linux's behaviour in put_cmsg(). Note that
1917          * truncation here is a guest problem (which we report
1918          * to the guest via the CTRUNC bit), unlike truncation
1919          * in target_to_host_cmsg, which is a QEMU bug.
1920          */
1921         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1922             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1923             break;
1924         }
1925 
1926         if (cmsg->cmsg_level == SOL_SOCKET) {
1927             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1928         } else {
1929             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1930         }
1931         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1932 
1933         /* Payload types which need a different size of payload on
1934          * the target must adjust tgt_len here.
1935          */
1936         tgt_len = len;
1937         switch (cmsg->cmsg_level) {
1938         case SOL_SOCKET:
1939             switch (cmsg->cmsg_type) {
1940             case SO_TIMESTAMP:
1941                 tgt_len = sizeof(struct target_timeval);
1942                 break;
1943             default:
1944                 break;
1945             }
1946             break;
1947         default:
1948             break;
1949         }
1950 
1951         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1952             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1953             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1954         }
1955 
1956         /* We must now copy-and-convert len bytes of payload
1957          * into tgt_len bytes of destination space. Bear in mind
1958          * that in both source and destination we may be dealing
1959          * with a truncated value!
1960          */
1961         switch (cmsg->cmsg_level) {
1962         case SOL_SOCKET:
1963             switch (cmsg->cmsg_type) {
1964             case SCM_RIGHTS:
1965             {
1966                 int *fd = (int *)data;
1967                 int *target_fd = (int *)target_data;
1968                 int i, numfds = tgt_len / sizeof(int);
1969 
1970                 for (i = 0; i < numfds; i++) {
1971                     __put_user(fd[i], target_fd + i);
1972                 }
1973                 break;
1974             }
1975             case SO_TIMESTAMP:
1976             {
1977                 struct timeval *tv = (struct timeval *)data;
1978                 struct target_timeval *target_tv =
1979                     (struct target_timeval *)target_data;
1980 
1981                 if (len != sizeof(struct timeval) ||
1982                     tgt_len != sizeof(struct target_timeval)) {
1983                     goto unimplemented;
1984                 }
1985 
1986                 /* copy struct timeval to target */
1987                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1988                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1989                 break;
1990             }
1991             case SCM_CREDENTIALS:
1992             {
1993                 struct ucred *cred = (struct ucred *)data;
1994                 struct target_ucred *target_cred =
1995                     (struct target_ucred *)target_data;
1996 
1997                 __put_user(cred->pid, &target_cred->pid);
1998                 __put_user(cred->uid, &target_cred->uid);
1999                 __put_user(cred->gid, &target_cred->gid);
2000                 break;
2001             }
2002             default:
2003                 goto unimplemented;
2004             }
2005             break;
2006 
2007         case SOL_IP:
2008             switch (cmsg->cmsg_type) {
2009             case IP_TTL:
2010             {
2011                 uint32_t *v = (uint32_t *)data;
2012                 uint32_t *t_int = (uint32_t *)target_data;
2013 
2014                 if (len != sizeof(uint32_t) ||
2015                     tgt_len != sizeof(uint32_t)) {
2016                     goto unimplemented;
2017                 }
2018                 __put_user(*v, t_int);
2019                 break;
2020             }
2021             case IP_RECVERR:
2022             {
2023                 struct errhdr_t {
2024                    struct sock_extended_err ee;
2025                    struct sockaddr_in offender;
2026                 };
2027                 struct errhdr_t *errh = (struct errhdr_t *)data;
2028                 struct errhdr_t *target_errh =
2029                     (struct errhdr_t *)target_data;
2030 
2031                 if (len != sizeof(struct errhdr_t) ||
2032                     tgt_len != sizeof(struct errhdr_t)) {
2033                     goto unimplemented;
2034                 }
2035                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2036                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2037                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2038                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2039                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2040                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2041                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2042                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2043                     (void *) &errh->offender, sizeof(errh->offender));
2044                 break;
2045             }
2046             default:
2047                 goto unimplemented;
2048             }
2049             break;
2050 
2051         case SOL_IPV6:
2052             switch (cmsg->cmsg_type) {
2053             case IPV6_HOPLIMIT:
2054             {
2055                 uint32_t *v = (uint32_t *)data;
2056                 uint32_t *t_int = (uint32_t *)target_data;
2057 
2058                 if (len != sizeof(uint32_t) ||
2059                     tgt_len != sizeof(uint32_t)) {
2060                     goto unimplemented;
2061                 }
2062                 __put_user(*v, t_int);
2063                 break;
2064             }
2065             case IPV6_RECVERR:
2066             {
2067                 struct errhdr6_t {
2068                    struct sock_extended_err ee;
2069                    struct sockaddr_in6 offender;
2070                 };
2071                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2072                 struct errhdr6_t *target_errh =
2073                     (struct errhdr6_t *)target_data;
2074 
2075                 if (len != sizeof(struct errhdr6_t) ||
2076                     tgt_len != sizeof(struct errhdr6_t)) {
2077                     goto unimplemented;
2078                 }
2079                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2080                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2081                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2082                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2083                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2084                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2085                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2086                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2087                     (void *) &errh->offender, sizeof(errh->offender));
2088                 break;
2089             }
2090             default:
2091                 goto unimplemented;
2092             }
2093             break;
2094 
2095         default:
2096         unimplemented:
2097             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2098                           cmsg->cmsg_level, cmsg->cmsg_type);
2099             memcpy(target_data, data, MIN(len, tgt_len));
2100             if (tgt_len > len) {
2101                 memset(target_data + len, 0, tgt_len - len);
2102             }
2103         }
2104 
2105         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2106         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2107         if (msg_controllen < tgt_space) {
2108             tgt_space = msg_controllen;
2109         }
2110         msg_controllen -= tgt_space;
2111         space += tgt_space;
2112         cmsg = CMSG_NXTHDR(msgh, cmsg);
2113         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2114                                          target_cmsg_start);
2115     }
2116     unlock_user(target_cmsg, target_cmsg_addr, space);
2117  the_end:
2118     target_msgh->msg_controllen = tswapal(space);
2119     return 0;
2120 }
2121 
2122 /* do_setsockopt() Must return target values and target errnos. */
2123 static abi_long do_setsockopt(int sockfd, int level, int optname,
2124                               abi_ulong optval_addr, socklen_t optlen)
2125 {
2126     abi_long ret;
2127     int val;
2128     struct ip_mreqn *ip_mreq;
2129     struct ip_mreq_source *ip_mreq_source;
2130 
2131     switch(level) {
2132     case SOL_TCP:
2133     case SOL_UDP:
2134         /* TCP and UDP options all take an 'int' value.  */
2135         if (optlen < sizeof(uint32_t))
2136             return -TARGET_EINVAL;
2137 
2138         if (get_user_u32(val, optval_addr))
2139             return -TARGET_EFAULT;
2140         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2141         break;
2142     case SOL_IP:
2143         switch(optname) {
2144         case IP_TOS:
2145         case IP_TTL:
2146         case IP_HDRINCL:
2147         case IP_ROUTER_ALERT:
2148         case IP_RECVOPTS:
2149         case IP_RETOPTS:
2150         case IP_PKTINFO:
2151         case IP_MTU_DISCOVER:
2152         case IP_RECVERR:
2153         case IP_RECVTTL:
2154         case IP_RECVTOS:
2155 #ifdef IP_FREEBIND
2156         case IP_FREEBIND:
2157 #endif
2158         case IP_MULTICAST_TTL:
2159         case IP_MULTICAST_LOOP:
2160             val = 0;
2161             if (optlen >= sizeof(uint32_t)) {
2162                 if (get_user_u32(val, optval_addr))
2163                     return -TARGET_EFAULT;
2164             } else if (optlen >= 1) {
2165                 if (get_user_u8(val, optval_addr))
2166                     return -TARGET_EFAULT;
2167             }
2168             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2169             break;
2170         case IP_ADD_MEMBERSHIP:
2171         case IP_DROP_MEMBERSHIP:
2172             if (optlen < sizeof (struct target_ip_mreq) ||
2173                 optlen > sizeof (struct target_ip_mreqn))
2174                 return -TARGET_EINVAL;
2175 
2176             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2177             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2178             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2179             break;
2180 
2181         case IP_BLOCK_SOURCE:
2182         case IP_UNBLOCK_SOURCE:
2183         case IP_ADD_SOURCE_MEMBERSHIP:
2184         case IP_DROP_SOURCE_MEMBERSHIP:
2185             if (optlen != sizeof (struct target_ip_mreq_source))
2186                 return -TARGET_EINVAL;
2187 
2188             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2189             if (!ip_mreq_source) {
2190                 return -TARGET_EFAULT;
2191             }
2192             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2193             unlock_user (ip_mreq_source, optval_addr, 0);
2194             break;
2195 
2196         default:
2197             goto unimplemented;
2198         }
2199         break;
2200     case SOL_IPV6:
2201         switch (optname) {
2202         case IPV6_MTU_DISCOVER:
2203         case IPV6_MTU:
2204         case IPV6_V6ONLY:
2205         case IPV6_RECVPKTINFO:
2206         case IPV6_UNICAST_HOPS:
2207         case IPV6_MULTICAST_HOPS:
2208         case IPV6_MULTICAST_LOOP:
2209         case IPV6_RECVERR:
2210         case IPV6_RECVHOPLIMIT:
2211         case IPV6_2292HOPLIMIT:
2212         case IPV6_CHECKSUM:
2213         case IPV6_ADDRFORM:
2214         case IPV6_2292PKTINFO:
2215         case IPV6_RECVTCLASS:
2216         case IPV6_RECVRTHDR:
2217         case IPV6_2292RTHDR:
2218         case IPV6_RECVHOPOPTS:
2219         case IPV6_2292HOPOPTS:
2220         case IPV6_RECVDSTOPTS:
2221         case IPV6_2292DSTOPTS:
2222         case IPV6_TCLASS:
2223         case IPV6_ADDR_PREFERENCES:
2224 #ifdef IPV6_RECVPATHMTU
2225         case IPV6_RECVPATHMTU:
2226 #endif
2227 #ifdef IPV6_TRANSPARENT
2228         case IPV6_TRANSPARENT:
2229 #endif
2230 #ifdef IPV6_FREEBIND
2231         case IPV6_FREEBIND:
2232 #endif
2233 #ifdef IPV6_RECVORIGDSTADDR
2234         case IPV6_RECVORIGDSTADDR:
2235 #endif
2236             val = 0;
2237             if (optlen < sizeof(uint32_t)) {
2238                 return -TARGET_EINVAL;
2239             }
2240             if (get_user_u32(val, optval_addr)) {
2241                 return -TARGET_EFAULT;
2242             }
2243             ret = get_errno(setsockopt(sockfd, level, optname,
2244                                        &val, sizeof(val)));
2245             break;
2246         case IPV6_PKTINFO:
2247         {
2248             struct in6_pktinfo pki;
2249 
2250             if (optlen < sizeof(pki)) {
2251                 return -TARGET_EINVAL;
2252             }
2253 
2254             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2255                 return -TARGET_EFAULT;
2256             }
2257 
2258             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2259 
2260             ret = get_errno(setsockopt(sockfd, level, optname,
2261                                        &pki, sizeof(pki)));
2262             break;
2263         }
2264         case IPV6_ADD_MEMBERSHIP:
2265         case IPV6_DROP_MEMBERSHIP:
2266         {
2267             struct ipv6_mreq ipv6mreq;
2268 
2269             if (optlen < sizeof(ipv6mreq)) {
2270                 return -TARGET_EINVAL;
2271             }
2272 
2273             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2274                 return -TARGET_EFAULT;
2275             }
2276 
2277             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2278 
2279             ret = get_errno(setsockopt(sockfd, level, optname,
2280                                        &ipv6mreq, sizeof(ipv6mreq)));
2281             break;
2282         }
2283         default:
2284             goto unimplemented;
2285         }
2286         break;
2287     case SOL_ICMPV6:
2288         switch (optname) {
2289         case ICMPV6_FILTER:
2290         {
2291             struct icmp6_filter icmp6f;
2292 
2293             if (optlen > sizeof(icmp6f)) {
2294                 optlen = sizeof(icmp6f);
2295             }
2296 
2297             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2298                 return -TARGET_EFAULT;
2299             }
2300 
2301             for (val = 0; val < 8; val++) {
2302                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2303             }
2304 
2305             ret = get_errno(setsockopt(sockfd, level, optname,
2306                                        &icmp6f, optlen));
2307             break;
2308         }
2309         default:
2310             goto unimplemented;
2311         }
2312         break;
2313     case SOL_RAW:
2314         switch (optname) {
2315         case ICMP_FILTER:
2316         case IPV6_CHECKSUM:
2317             /* those take an u32 value */
2318             if (optlen < sizeof(uint32_t)) {
2319                 return -TARGET_EINVAL;
2320             }
2321 
2322             if (get_user_u32(val, optval_addr)) {
2323                 return -TARGET_EFAULT;
2324             }
2325             ret = get_errno(setsockopt(sockfd, level, optname,
2326                                        &val, sizeof(val)));
2327             break;
2328 
2329         default:
2330             goto unimplemented;
2331         }
2332         break;
2333 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2334     case SOL_ALG:
2335         switch (optname) {
2336         case ALG_SET_KEY:
2337         {
2338             char *alg_key = g_malloc(optlen);
2339 
2340             if (!alg_key) {
2341                 return -TARGET_ENOMEM;
2342             }
2343             if (copy_from_user(alg_key, optval_addr, optlen)) {
2344                 g_free(alg_key);
2345                 return -TARGET_EFAULT;
2346             }
2347             ret = get_errno(setsockopt(sockfd, level, optname,
2348                                        alg_key, optlen));
2349             g_free(alg_key);
2350             break;
2351         }
2352         case ALG_SET_AEAD_AUTHSIZE:
2353         {
2354             ret = get_errno(setsockopt(sockfd, level, optname,
2355                                        NULL, optlen));
2356             break;
2357         }
2358         default:
2359             goto unimplemented;
2360         }
2361         break;
2362 #endif
2363     case TARGET_SOL_SOCKET:
2364         switch (optname) {
2365         case TARGET_SO_RCVTIMEO:
2366         {
2367                 struct timeval tv;
2368 
2369                 optname = SO_RCVTIMEO;
2370 
2371 set_timeout:
2372                 if (optlen != sizeof(struct target_timeval)) {
2373                     return -TARGET_EINVAL;
2374                 }
2375 
2376                 if (copy_from_user_timeval(&tv, optval_addr)) {
2377                     return -TARGET_EFAULT;
2378                 }
2379 
2380                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2381                                 &tv, sizeof(tv)));
2382                 return ret;
2383         }
2384         case TARGET_SO_SNDTIMEO:
2385                 optname = SO_SNDTIMEO;
2386                 goto set_timeout;
2387         case TARGET_SO_ATTACH_FILTER:
2388         {
2389                 struct target_sock_fprog *tfprog;
2390                 struct target_sock_filter *tfilter;
2391                 struct sock_fprog fprog;
2392                 struct sock_filter *filter;
2393                 int i;
2394 
2395                 if (optlen != sizeof(*tfprog)) {
2396                     return -TARGET_EINVAL;
2397                 }
2398                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2399                     return -TARGET_EFAULT;
2400                 }
2401                 if (!lock_user_struct(VERIFY_READ, tfilter,
2402                                       tswapal(tfprog->filter), 0)) {
2403                     unlock_user_struct(tfprog, optval_addr, 1);
2404                     return -TARGET_EFAULT;
2405                 }
2406 
2407                 fprog.len = tswap16(tfprog->len);
2408                 filter = g_try_new(struct sock_filter, fprog.len);
2409                 if (filter == NULL) {
2410                     unlock_user_struct(tfilter, tfprog->filter, 1);
2411                     unlock_user_struct(tfprog, optval_addr, 1);
2412                     return -TARGET_ENOMEM;
2413                 }
2414                 for (i = 0; i < fprog.len; i++) {
2415                     filter[i].code = tswap16(tfilter[i].code);
2416                     filter[i].jt = tfilter[i].jt;
2417                     filter[i].jf = tfilter[i].jf;
2418                     filter[i].k = tswap32(tfilter[i].k);
2419                 }
2420                 fprog.filter = filter;
2421 
2422                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2423                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2424                 g_free(filter);
2425 
2426                 unlock_user_struct(tfilter, tfprog->filter, 1);
2427                 unlock_user_struct(tfprog, optval_addr, 1);
2428                 return ret;
2429         }
2430 	case TARGET_SO_BINDTODEVICE:
2431 	{
2432 		char *dev_ifname, *addr_ifname;
2433 
2434 		if (optlen > IFNAMSIZ - 1) {
2435 		    optlen = IFNAMSIZ - 1;
2436 		}
2437 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2438 		if (!dev_ifname) {
2439 		    return -TARGET_EFAULT;
2440 		}
2441 		optname = SO_BINDTODEVICE;
2442 		addr_ifname = alloca(IFNAMSIZ);
2443 		memcpy(addr_ifname, dev_ifname, optlen);
2444 		addr_ifname[optlen] = 0;
2445 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2446                                            addr_ifname, optlen));
2447 		unlock_user (dev_ifname, optval_addr, 0);
2448 		return ret;
2449 	}
2450         case TARGET_SO_LINGER:
2451         {
2452                 struct linger lg;
2453                 struct target_linger *tlg;
2454 
2455                 if (optlen != sizeof(struct target_linger)) {
2456                     return -TARGET_EINVAL;
2457                 }
2458                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2459                     return -TARGET_EFAULT;
2460                 }
2461                 __get_user(lg.l_onoff, &tlg->l_onoff);
2462                 __get_user(lg.l_linger, &tlg->l_linger);
2463                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2464                                 &lg, sizeof(lg)));
2465                 unlock_user_struct(tlg, optval_addr, 0);
2466                 return ret;
2467         }
2468             /* Options with 'int' argument.  */
2469         case TARGET_SO_DEBUG:
2470 		optname = SO_DEBUG;
2471 		break;
2472         case TARGET_SO_REUSEADDR:
2473 		optname = SO_REUSEADDR;
2474 		break;
2475 #ifdef SO_REUSEPORT
2476         case TARGET_SO_REUSEPORT:
2477                 optname = SO_REUSEPORT;
2478                 break;
2479 #endif
2480         case TARGET_SO_TYPE:
2481 		optname = SO_TYPE;
2482 		break;
2483         case TARGET_SO_ERROR:
2484 		optname = SO_ERROR;
2485 		break;
2486         case TARGET_SO_DONTROUTE:
2487 		optname = SO_DONTROUTE;
2488 		break;
2489         case TARGET_SO_BROADCAST:
2490 		optname = SO_BROADCAST;
2491 		break;
2492         case TARGET_SO_SNDBUF:
2493 		optname = SO_SNDBUF;
2494 		break;
2495         case TARGET_SO_SNDBUFFORCE:
2496                 optname = SO_SNDBUFFORCE;
2497                 break;
2498         case TARGET_SO_RCVBUF:
2499 		optname = SO_RCVBUF;
2500 		break;
2501         case TARGET_SO_RCVBUFFORCE:
2502                 optname = SO_RCVBUFFORCE;
2503                 break;
2504         case TARGET_SO_KEEPALIVE:
2505 		optname = SO_KEEPALIVE;
2506 		break;
2507         case TARGET_SO_OOBINLINE:
2508 		optname = SO_OOBINLINE;
2509 		break;
2510         case TARGET_SO_NO_CHECK:
2511 		optname = SO_NO_CHECK;
2512 		break;
2513         case TARGET_SO_PRIORITY:
2514 		optname = SO_PRIORITY;
2515 		break;
2516 #ifdef SO_BSDCOMPAT
2517         case TARGET_SO_BSDCOMPAT:
2518 		optname = SO_BSDCOMPAT;
2519 		break;
2520 #endif
2521         case TARGET_SO_PASSCRED:
2522 		optname = SO_PASSCRED;
2523 		break;
2524         case TARGET_SO_PASSSEC:
2525                 optname = SO_PASSSEC;
2526                 break;
2527         case TARGET_SO_TIMESTAMP:
2528 		optname = SO_TIMESTAMP;
2529 		break;
2530         case TARGET_SO_RCVLOWAT:
2531 		optname = SO_RCVLOWAT;
2532 		break;
2533         default:
2534             goto unimplemented;
2535         }
2536 	if (optlen < sizeof(uint32_t))
2537             return -TARGET_EINVAL;
2538 
2539 	if (get_user_u32(val, optval_addr))
2540             return -TARGET_EFAULT;
2541 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2542         break;
2543 #ifdef SOL_NETLINK
2544     case SOL_NETLINK:
2545         switch (optname) {
2546         case NETLINK_PKTINFO:
2547         case NETLINK_ADD_MEMBERSHIP:
2548         case NETLINK_DROP_MEMBERSHIP:
2549         case NETLINK_BROADCAST_ERROR:
2550         case NETLINK_NO_ENOBUFS:
2551 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2552         case NETLINK_LISTEN_ALL_NSID:
2553         case NETLINK_CAP_ACK:
2554 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2555 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2556         case NETLINK_EXT_ACK:
2557 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2558 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2559         case NETLINK_GET_STRICT_CHK:
2560 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2561             break;
2562         default:
2563             goto unimplemented;
2564         }
2565         val = 0;
2566         if (optlen < sizeof(uint32_t)) {
2567             return -TARGET_EINVAL;
2568         }
2569         if (get_user_u32(val, optval_addr)) {
2570             return -TARGET_EFAULT;
2571         }
2572         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2573                                    sizeof(val)));
2574         break;
2575 #endif /* SOL_NETLINK */
2576     default:
2577     unimplemented:
2578         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2579                       level, optname);
2580         ret = -TARGET_ENOPROTOOPT;
2581     }
2582     return ret;
2583 }
2584 
2585 /* do_getsockopt() Must return target values and target errnos. */
2586 static abi_long do_getsockopt(int sockfd, int level, int optname,
2587                               abi_ulong optval_addr, abi_ulong optlen)
2588 {
2589     abi_long ret;
2590     int len, val;
2591     socklen_t lv;
2592 
2593     switch(level) {
2594     case TARGET_SOL_SOCKET:
2595         level = SOL_SOCKET;
2596         switch (optname) {
2597         /* These don't just return a single integer */
2598         case TARGET_SO_PEERNAME:
2599             goto unimplemented;
2600         case TARGET_SO_RCVTIMEO: {
2601             struct timeval tv;
2602             socklen_t tvlen;
2603 
2604             optname = SO_RCVTIMEO;
2605 
2606 get_timeout:
2607             if (get_user_u32(len, optlen)) {
2608                 return -TARGET_EFAULT;
2609             }
2610             if (len < 0) {
2611                 return -TARGET_EINVAL;
2612             }
2613 
2614             tvlen = sizeof(tv);
2615             ret = get_errno(getsockopt(sockfd, level, optname,
2616                                        &tv, &tvlen));
2617             if (ret < 0) {
2618                 return ret;
2619             }
2620             if (len > sizeof(struct target_timeval)) {
2621                 len = sizeof(struct target_timeval);
2622             }
2623             if (copy_to_user_timeval(optval_addr, &tv)) {
2624                 return -TARGET_EFAULT;
2625             }
2626             if (put_user_u32(len, optlen)) {
2627                 return -TARGET_EFAULT;
2628             }
2629             break;
2630         }
2631         case TARGET_SO_SNDTIMEO:
2632             optname = SO_SNDTIMEO;
2633             goto get_timeout;
2634         case TARGET_SO_PEERCRED: {
2635             struct ucred cr;
2636             socklen_t crlen;
2637             struct target_ucred *tcr;
2638 
2639             if (get_user_u32(len, optlen)) {
2640                 return -TARGET_EFAULT;
2641             }
2642             if (len < 0) {
2643                 return -TARGET_EINVAL;
2644             }
2645 
2646             crlen = sizeof(cr);
2647             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2648                                        &cr, &crlen));
2649             if (ret < 0) {
2650                 return ret;
2651             }
2652             if (len > crlen) {
2653                 len = crlen;
2654             }
2655             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2656                 return -TARGET_EFAULT;
2657             }
2658             __put_user(cr.pid, &tcr->pid);
2659             __put_user(cr.uid, &tcr->uid);
2660             __put_user(cr.gid, &tcr->gid);
2661             unlock_user_struct(tcr, optval_addr, 1);
2662             if (put_user_u32(len, optlen)) {
2663                 return -TARGET_EFAULT;
2664             }
2665             break;
2666         }
2667         case TARGET_SO_PEERSEC: {
2668             char *name;
2669 
2670             if (get_user_u32(len, optlen)) {
2671                 return -TARGET_EFAULT;
2672             }
2673             if (len < 0) {
2674                 return -TARGET_EINVAL;
2675             }
2676             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2677             if (!name) {
2678                 return -TARGET_EFAULT;
2679             }
2680             lv = len;
2681             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2682                                        name, &lv));
2683             if (put_user_u32(lv, optlen)) {
2684                 ret = -TARGET_EFAULT;
2685             }
2686             unlock_user(name, optval_addr, lv);
2687             break;
2688         }
2689         case TARGET_SO_LINGER:
2690         {
2691             struct linger lg;
2692             socklen_t lglen;
2693             struct target_linger *tlg;
2694 
2695             if (get_user_u32(len, optlen)) {
2696                 return -TARGET_EFAULT;
2697             }
2698             if (len < 0) {
2699                 return -TARGET_EINVAL;
2700             }
2701 
2702             lglen = sizeof(lg);
2703             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2704                                        &lg, &lglen));
2705             if (ret < 0) {
2706                 return ret;
2707             }
2708             if (len > lglen) {
2709                 len = lglen;
2710             }
2711             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2712                 return -TARGET_EFAULT;
2713             }
2714             __put_user(lg.l_onoff, &tlg->l_onoff);
2715             __put_user(lg.l_linger, &tlg->l_linger);
2716             unlock_user_struct(tlg, optval_addr, 1);
2717             if (put_user_u32(len, optlen)) {
2718                 return -TARGET_EFAULT;
2719             }
2720             break;
2721         }
2722         /* Options with 'int' argument.  */
2723         case TARGET_SO_DEBUG:
2724             optname = SO_DEBUG;
2725             goto int_case;
2726         case TARGET_SO_REUSEADDR:
2727             optname = SO_REUSEADDR;
2728             goto int_case;
2729 #ifdef SO_REUSEPORT
2730         case TARGET_SO_REUSEPORT:
2731             optname = SO_REUSEPORT;
2732             goto int_case;
2733 #endif
2734         case TARGET_SO_TYPE:
2735             optname = SO_TYPE;
2736             goto int_case;
2737         case TARGET_SO_ERROR:
2738             optname = SO_ERROR;
2739             goto int_case;
2740         case TARGET_SO_DONTROUTE:
2741             optname = SO_DONTROUTE;
2742             goto int_case;
2743         case TARGET_SO_BROADCAST:
2744             optname = SO_BROADCAST;
2745             goto int_case;
2746         case TARGET_SO_SNDBUF:
2747             optname = SO_SNDBUF;
2748             goto int_case;
2749         case TARGET_SO_RCVBUF:
2750             optname = SO_RCVBUF;
2751             goto int_case;
2752         case TARGET_SO_KEEPALIVE:
2753             optname = SO_KEEPALIVE;
2754             goto int_case;
2755         case TARGET_SO_OOBINLINE:
2756             optname = SO_OOBINLINE;
2757             goto int_case;
2758         case TARGET_SO_NO_CHECK:
2759             optname = SO_NO_CHECK;
2760             goto int_case;
2761         case TARGET_SO_PRIORITY:
2762             optname = SO_PRIORITY;
2763             goto int_case;
2764 #ifdef SO_BSDCOMPAT
2765         case TARGET_SO_BSDCOMPAT:
2766             optname = SO_BSDCOMPAT;
2767             goto int_case;
2768 #endif
2769         case TARGET_SO_PASSCRED:
2770             optname = SO_PASSCRED;
2771             goto int_case;
2772         case TARGET_SO_TIMESTAMP:
2773             optname = SO_TIMESTAMP;
2774             goto int_case;
2775         case TARGET_SO_RCVLOWAT:
2776             optname = SO_RCVLOWAT;
2777             goto int_case;
2778         case TARGET_SO_ACCEPTCONN:
2779             optname = SO_ACCEPTCONN;
2780             goto int_case;
2781         case TARGET_SO_PROTOCOL:
2782             optname = SO_PROTOCOL;
2783             goto int_case;
2784         case TARGET_SO_DOMAIN:
2785             optname = SO_DOMAIN;
2786             goto int_case;
2787         default:
2788             goto int_case;
2789         }
2790         break;
2791     case SOL_TCP:
2792     case SOL_UDP:
2793         /* TCP and UDP options all take an 'int' value.  */
2794     int_case:
2795         if (get_user_u32(len, optlen))
2796             return -TARGET_EFAULT;
2797         if (len < 0)
2798             return -TARGET_EINVAL;
2799         lv = sizeof(lv);
2800         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2801         if (ret < 0)
2802             return ret;
2803         if (optname == SO_TYPE) {
2804             val = host_to_target_sock_type(val);
2805         }
2806         if (len > lv)
2807             len = lv;
2808         if (len == 4) {
2809             if (put_user_u32(val, optval_addr))
2810                 return -TARGET_EFAULT;
2811         } else {
2812             if (put_user_u8(val, optval_addr))
2813                 return -TARGET_EFAULT;
2814         }
2815         if (put_user_u32(len, optlen))
2816             return -TARGET_EFAULT;
2817         break;
2818     case SOL_IP:
2819         switch(optname) {
2820         case IP_TOS:
2821         case IP_TTL:
2822         case IP_HDRINCL:
2823         case IP_ROUTER_ALERT:
2824         case IP_RECVOPTS:
2825         case IP_RETOPTS:
2826         case IP_PKTINFO:
2827         case IP_MTU_DISCOVER:
2828         case IP_RECVERR:
2829         case IP_RECVTOS:
2830 #ifdef IP_FREEBIND
2831         case IP_FREEBIND:
2832 #endif
2833         case IP_MULTICAST_TTL:
2834         case IP_MULTICAST_LOOP:
2835             if (get_user_u32(len, optlen))
2836                 return -TARGET_EFAULT;
2837             if (len < 0)
2838                 return -TARGET_EINVAL;
2839             lv = sizeof(lv);
2840             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2841             if (ret < 0)
2842                 return ret;
2843             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2844                 len = 1;
2845                 if (put_user_u32(len, optlen)
2846                     || put_user_u8(val, optval_addr))
2847                     return -TARGET_EFAULT;
2848             } else {
2849                 if (len > sizeof(int))
2850                     len = sizeof(int);
2851                 if (put_user_u32(len, optlen)
2852                     || put_user_u32(val, optval_addr))
2853                     return -TARGET_EFAULT;
2854             }
2855             break;
2856         default:
2857             ret = -TARGET_ENOPROTOOPT;
2858             break;
2859         }
2860         break;
2861     case SOL_IPV6:
2862         switch (optname) {
2863         case IPV6_MTU_DISCOVER:
2864         case IPV6_MTU:
2865         case IPV6_V6ONLY:
2866         case IPV6_RECVPKTINFO:
2867         case IPV6_UNICAST_HOPS:
2868         case IPV6_MULTICAST_HOPS:
2869         case IPV6_MULTICAST_LOOP:
2870         case IPV6_RECVERR:
2871         case IPV6_RECVHOPLIMIT:
2872         case IPV6_2292HOPLIMIT:
2873         case IPV6_CHECKSUM:
2874         case IPV6_ADDRFORM:
2875         case IPV6_2292PKTINFO:
2876         case IPV6_RECVTCLASS:
2877         case IPV6_RECVRTHDR:
2878         case IPV6_2292RTHDR:
2879         case IPV6_RECVHOPOPTS:
2880         case IPV6_2292HOPOPTS:
2881         case IPV6_RECVDSTOPTS:
2882         case IPV6_2292DSTOPTS:
2883         case IPV6_TCLASS:
2884         case IPV6_ADDR_PREFERENCES:
2885 #ifdef IPV6_RECVPATHMTU
2886         case IPV6_RECVPATHMTU:
2887 #endif
2888 #ifdef IPV6_TRANSPARENT
2889         case IPV6_TRANSPARENT:
2890 #endif
2891 #ifdef IPV6_FREEBIND
2892         case IPV6_FREEBIND:
2893 #endif
2894 #ifdef IPV6_RECVORIGDSTADDR
2895         case IPV6_RECVORIGDSTADDR:
2896 #endif
2897             if (get_user_u32(len, optlen))
2898                 return -TARGET_EFAULT;
2899             if (len < 0)
2900                 return -TARGET_EINVAL;
2901             lv = sizeof(lv);
2902             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2903             if (ret < 0)
2904                 return ret;
2905             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2906                 len = 1;
2907                 if (put_user_u32(len, optlen)
2908                     || put_user_u8(val, optval_addr))
2909                     return -TARGET_EFAULT;
2910             } else {
2911                 if (len > sizeof(int))
2912                     len = sizeof(int);
2913                 if (put_user_u32(len, optlen)
2914                     || put_user_u32(val, optval_addr))
2915                     return -TARGET_EFAULT;
2916             }
2917             break;
2918         default:
2919             ret = -TARGET_ENOPROTOOPT;
2920             break;
2921         }
2922         break;
2923 #ifdef SOL_NETLINK
2924     case SOL_NETLINK:
2925         switch (optname) {
2926         case NETLINK_PKTINFO:
2927         case NETLINK_BROADCAST_ERROR:
2928         case NETLINK_NO_ENOBUFS:
2929 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2930         case NETLINK_LISTEN_ALL_NSID:
2931         case NETLINK_CAP_ACK:
2932 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2933 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2934         case NETLINK_EXT_ACK:
2935 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2936 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2937         case NETLINK_GET_STRICT_CHK:
2938 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2939             if (get_user_u32(len, optlen)) {
2940                 return -TARGET_EFAULT;
2941             }
2942             if (len != sizeof(val)) {
2943                 return -TARGET_EINVAL;
2944             }
2945             lv = len;
2946             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2947             if (ret < 0) {
2948                 return ret;
2949             }
2950             if (put_user_u32(lv, optlen)
2951                 || put_user_u32(val, optval_addr)) {
2952                 return -TARGET_EFAULT;
2953             }
2954             break;
2955 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2956         case NETLINK_LIST_MEMBERSHIPS:
2957         {
2958             uint32_t *results;
2959             int i;
2960             if (get_user_u32(len, optlen)) {
2961                 return -TARGET_EFAULT;
2962             }
2963             if (len < 0) {
2964                 return -TARGET_EINVAL;
2965             }
2966             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2967             if (!results && len > 0) {
2968                 return -TARGET_EFAULT;
2969             }
2970             lv = len;
2971             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2972             if (ret < 0) {
2973                 unlock_user(results, optval_addr, 0);
2974                 return ret;
2975             }
2976             /* swap host endianess to target endianess. */
2977             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2978                 results[i] = tswap32(results[i]);
2979             }
2980             if (put_user_u32(lv, optlen)) {
2981                 return -TARGET_EFAULT;
2982             }
2983             unlock_user(results, optval_addr, 0);
2984             break;
2985         }
2986 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2987         default:
2988             goto unimplemented;
2989         }
2990         break;
2991 #endif /* SOL_NETLINK */
2992     default:
2993     unimplemented:
2994         qemu_log_mask(LOG_UNIMP,
2995                       "getsockopt level=%d optname=%d not yet supported\n",
2996                       level, optname);
2997         ret = -TARGET_EOPNOTSUPP;
2998         break;
2999     }
3000     return ret;
3001 }
3002 
3003 /* Convert target low/high pair representing file offset into the host
3004  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3005  * as the kernel doesn't handle them either.
3006  */
3007 static void target_to_host_low_high(abi_ulong tlow,
3008                                     abi_ulong thigh,
3009                                     unsigned long *hlow,
3010                                     unsigned long *hhigh)
3011 {
3012     uint64_t off = tlow |
3013         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3014         TARGET_LONG_BITS / 2;
3015 
3016     *hlow = off;
3017     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3018 }
3019 
3020 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3021                                 abi_ulong count, int copy)
3022 {
3023     struct target_iovec *target_vec;
3024     struct iovec *vec;
3025     abi_ulong total_len, max_len;
3026     int i;
3027     int err = 0;
3028     bool bad_address = false;
3029 
3030     if (count == 0) {
3031         errno = 0;
3032         return NULL;
3033     }
3034     if (count > IOV_MAX) {
3035         errno = EINVAL;
3036         return NULL;
3037     }
3038 
3039     vec = g_try_new0(struct iovec, count);
3040     if (vec == NULL) {
3041         errno = ENOMEM;
3042         return NULL;
3043     }
3044 
3045     target_vec = lock_user(VERIFY_READ, target_addr,
3046                            count * sizeof(struct target_iovec), 1);
3047     if (target_vec == NULL) {
3048         err = EFAULT;
3049         goto fail2;
3050     }
3051 
3052     /* ??? If host page size > target page size, this will result in a
3053        value larger than what we can actually support.  */
3054     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3055     total_len = 0;
3056 
3057     for (i = 0; i < count; i++) {
3058         abi_ulong base = tswapal(target_vec[i].iov_base);
3059         abi_long len = tswapal(target_vec[i].iov_len);
3060 
3061         if (len < 0) {
3062             err = EINVAL;
3063             goto fail;
3064         } else if (len == 0) {
3065             /* Zero length pointer is ignored.  */
3066             vec[i].iov_base = 0;
3067         } else {
3068             vec[i].iov_base = lock_user(type, base, len, copy);
3069             /* If the first buffer pointer is bad, this is a fault.  But
3070              * subsequent bad buffers will result in a partial write; this
3071              * is realized by filling the vector with null pointers and
3072              * zero lengths. */
3073             if (!vec[i].iov_base) {
3074                 if (i == 0) {
3075                     err = EFAULT;
3076                     goto fail;
3077                 } else {
3078                     bad_address = true;
3079                 }
3080             }
3081             if (bad_address) {
3082                 len = 0;
3083             }
3084             if (len > max_len - total_len) {
3085                 len = max_len - total_len;
3086             }
3087         }
3088         vec[i].iov_len = len;
3089         total_len += len;
3090     }
3091 
3092     unlock_user(target_vec, target_addr, 0);
3093     return vec;
3094 
3095  fail:
3096     while (--i >= 0) {
3097         if (tswapal(target_vec[i].iov_len) > 0) {
3098             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3099         }
3100     }
3101     unlock_user(target_vec, target_addr, 0);
3102  fail2:
3103     g_free(vec);
3104     errno = err;
3105     return NULL;
3106 }
3107 
3108 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3109                          abi_ulong count, int copy)
3110 {
3111     struct target_iovec *target_vec;
3112     int i;
3113 
3114     target_vec = lock_user(VERIFY_READ, target_addr,
3115                            count * sizeof(struct target_iovec), 1);
3116     if (target_vec) {
3117         for (i = 0; i < count; i++) {
3118             abi_ulong base = tswapal(target_vec[i].iov_base);
3119             abi_long len = tswapal(target_vec[i].iov_len);
3120             if (len < 0) {
3121                 break;
3122             }
3123             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3124         }
3125         unlock_user(target_vec, target_addr, 0);
3126     }
3127 
3128     g_free(vec);
3129 }
3130 
3131 static inline int target_to_host_sock_type(int *type)
3132 {
3133     int host_type = 0;
3134     int target_type = *type;
3135 
3136     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3137     case TARGET_SOCK_DGRAM:
3138         host_type = SOCK_DGRAM;
3139         break;
3140     case TARGET_SOCK_STREAM:
3141         host_type = SOCK_STREAM;
3142         break;
3143     default:
3144         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3145         break;
3146     }
3147     if (target_type & TARGET_SOCK_CLOEXEC) {
3148 #if defined(SOCK_CLOEXEC)
3149         host_type |= SOCK_CLOEXEC;
3150 #else
3151         return -TARGET_EINVAL;
3152 #endif
3153     }
3154     if (target_type & TARGET_SOCK_NONBLOCK) {
3155 #if defined(SOCK_NONBLOCK)
3156         host_type |= SOCK_NONBLOCK;
3157 #elif !defined(O_NONBLOCK)
3158         return -TARGET_EINVAL;
3159 #endif
3160     }
3161     *type = host_type;
3162     return 0;
3163 }
3164 
3165 /* Try to emulate socket type flags after socket creation.  */
3166 static int sock_flags_fixup(int fd, int target_type)
3167 {
3168 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3169     if (target_type & TARGET_SOCK_NONBLOCK) {
3170         int flags = fcntl(fd, F_GETFL);
3171         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3172             close(fd);
3173             return -TARGET_EINVAL;
3174         }
3175     }
3176 #endif
3177     return fd;
3178 }
3179 
3180 /* do_socket() Must return target values and target errnos. */
3181 static abi_long do_socket(int domain, int type, int protocol)
3182 {
3183     int target_type = type;
3184     int ret;
3185 
3186     ret = target_to_host_sock_type(&type);
3187     if (ret) {
3188         return ret;
3189     }
3190 
3191     if (domain == PF_NETLINK && !(
3192 #ifdef CONFIG_RTNETLINK
3193          protocol == NETLINK_ROUTE ||
3194 #endif
3195          protocol == NETLINK_KOBJECT_UEVENT ||
3196          protocol == NETLINK_AUDIT)) {
3197         return -TARGET_EPROTONOSUPPORT;
3198     }
3199 
3200     if (domain == AF_PACKET ||
3201         (domain == AF_INET && type == SOCK_PACKET)) {
3202         protocol = tswap16(protocol);
3203     }
3204 
3205     ret = get_errno(socket(domain, type, protocol));
3206     if (ret >= 0) {
3207         ret = sock_flags_fixup(ret, target_type);
3208         if (type == SOCK_PACKET) {
3209             /* Manage an obsolete case :
3210              * if socket type is SOCK_PACKET, bind by name
3211              */
3212             fd_trans_register(ret, &target_packet_trans);
3213         } else if (domain == PF_NETLINK) {
3214             switch (protocol) {
3215 #ifdef CONFIG_RTNETLINK
3216             case NETLINK_ROUTE:
3217                 fd_trans_register(ret, &target_netlink_route_trans);
3218                 break;
3219 #endif
3220             case NETLINK_KOBJECT_UEVENT:
3221                 /* nothing to do: messages are strings */
3222                 break;
3223             case NETLINK_AUDIT:
3224                 fd_trans_register(ret, &target_netlink_audit_trans);
3225                 break;
3226             default:
3227                 g_assert_not_reached();
3228             }
3229         }
3230     }
3231     return ret;
3232 }
3233 
3234 /* do_bind() Must return target values and target errnos. */
3235 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3236                         socklen_t addrlen)
3237 {
3238     void *addr;
3239     abi_long ret;
3240 
3241     if ((int)addrlen < 0) {
3242         return -TARGET_EINVAL;
3243     }
3244 
3245     addr = alloca(addrlen+1);
3246 
3247     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3248     if (ret)
3249         return ret;
3250 
3251     return get_errno(bind(sockfd, addr, addrlen));
3252 }
3253 
3254 /* do_connect() Must return target values and target errnos. */
3255 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3256                            socklen_t addrlen)
3257 {
3258     void *addr;
3259     abi_long ret;
3260 
3261     if ((int)addrlen < 0) {
3262         return -TARGET_EINVAL;
3263     }
3264 
3265     addr = alloca(addrlen+1);
3266 
3267     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3268     if (ret)
3269         return ret;
3270 
3271     return get_errno(safe_connect(sockfd, addr, addrlen));
3272 }
3273 
3274 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3275 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3276                                       int flags, int send)
3277 {
3278     abi_long ret, len;
3279     struct msghdr msg;
3280     abi_ulong count;
3281     struct iovec *vec;
3282     abi_ulong target_vec;
3283 
3284     if (msgp->msg_name) {
3285         msg.msg_namelen = tswap32(msgp->msg_namelen);
3286         msg.msg_name = alloca(msg.msg_namelen+1);
3287         ret = target_to_host_sockaddr(fd, msg.msg_name,
3288                                       tswapal(msgp->msg_name),
3289                                       msg.msg_namelen);
3290         if (ret == -TARGET_EFAULT) {
3291             /* For connected sockets msg_name and msg_namelen must
3292              * be ignored, so returning EFAULT immediately is wrong.
3293              * Instead, pass a bad msg_name to the host kernel, and
3294              * let it decide whether to return EFAULT or not.
3295              */
3296             msg.msg_name = (void *)-1;
3297         } else if (ret) {
3298             goto out2;
3299         }
3300     } else {
3301         msg.msg_name = NULL;
3302         msg.msg_namelen = 0;
3303     }
3304     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3305     msg.msg_control = alloca(msg.msg_controllen);
3306     memset(msg.msg_control, 0, msg.msg_controllen);
3307 
3308     msg.msg_flags = tswap32(msgp->msg_flags);
3309 
3310     count = tswapal(msgp->msg_iovlen);
3311     target_vec = tswapal(msgp->msg_iov);
3312 
3313     if (count > IOV_MAX) {
3314         /* sendrcvmsg returns a different errno for this condition than
3315          * readv/writev, so we must catch it here before lock_iovec() does.
3316          */
3317         ret = -TARGET_EMSGSIZE;
3318         goto out2;
3319     }
3320 
3321     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3322                      target_vec, count, send);
3323     if (vec == NULL) {
3324         ret = -host_to_target_errno(errno);
3325         goto out2;
3326     }
3327     msg.msg_iovlen = count;
3328     msg.msg_iov = vec;
3329 
3330     if (send) {
3331         if (fd_trans_target_to_host_data(fd)) {
3332             void *host_msg;
3333 
3334             host_msg = g_malloc(msg.msg_iov->iov_len);
3335             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3336             ret = fd_trans_target_to_host_data(fd)(host_msg,
3337                                                    msg.msg_iov->iov_len);
3338             if (ret >= 0) {
3339                 msg.msg_iov->iov_base = host_msg;
3340                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3341             }
3342             g_free(host_msg);
3343         } else {
3344             ret = target_to_host_cmsg(&msg, msgp);
3345             if (ret == 0) {
3346                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3347             }
3348         }
3349     } else {
3350         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3351         if (!is_error(ret)) {
3352             len = ret;
3353             if (fd_trans_host_to_target_data(fd)) {
3354                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3355                                                MIN(msg.msg_iov->iov_len, len));
3356             } else {
3357                 ret = host_to_target_cmsg(msgp, &msg);
3358             }
3359             if (!is_error(ret)) {
3360                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3361                 msgp->msg_flags = tswap32(msg.msg_flags);
3362                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3363                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3364                                     msg.msg_name, msg.msg_namelen);
3365                     if (ret) {
3366                         goto out;
3367                     }
3368                 }
3369 
3370                 ret = len;
3371             }
3372         }
3373     }
3374 
3375 out:
3376     unlock_iovec(vec, target_vec, count, !send);
3377 out2:
3378     return ret;
3379 }
3380 
3381 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3382                                int flags, int send)
3383 {
3384     abi_long ret;
3385     struct target_msghdr *msgp;
3386 
3387     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3388                           msgp,
3389                           target_msg,
3390                           send ? 1 : 0)) {
3391         return -TARGET_EFAULT;
3392     }
3393     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3394     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3395     return ret;
3396 }
3397 
3398 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3399  * so it might not have this *mmsg-specific flag either.
3400  */
3401 #ifndef MSG_WAITFORONE
3402 #define MSG_WAITFORONE 0x10000
3403 #endif
3404 
3405 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3406                                 unsigned int vlen, unsigned int flags,
3407                                 int send)
3408 {
3409     struct target_mmsghdr *mmsgp;
3410     abi_long ret = 0;
3411     int i;
3412 
3413     if (vlen > UIO_MAXIOV) {
3414         vlen = UIO_MAXIOV;
3415     }
3416 
3417     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3418     if (!mmsgp) {
3419         return -TARGET_EFAULT;
3420     }
3421 
3422     for (i = 0; i < vlen; i++) {
3423         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3424         if (is_error(ret)) {
3425             break;
3426         }
3427         mmsgp[i].msg_len = tswap32(ret);
3428         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3429         if (flags & MSG_WAITFORONE) {
3430             flags |= MSG_DONTWAIT;
3431         }
3432     }
3433 
3434     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3435 
3436     /* Return number of datagrams sent if we sent any at all;
3437      * otherwise return the error.
3438      */
3439     if (i) {
3440         return i;
3441     }
3442     return ret;
3443 }
3444 
3445 /* do_accept4() Must return target values and target errnos. */
3446 static abi_long do_accept4(int fd, abi_ulong target_addr,
3447                            abi_ulong target_addrlen_addr, int flags)
3448 {
3449     socklen_t addrlen, ret_addrlen;
3450     void *addr;
3451     abi_long ret;
3452     int host_flags;
3453 
3454     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3455 
3456     if (target_addr == 0) {
3457         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3458     }
3459 
3460     /* linux returns EFAULT if addrlen pointer is invalid */
3461     if (get_user_u32(addrlen, target_addrlen_addr))
3462         return -TARGET_EFAULT;
3463 
3464     if ((int)addrlen < 0) {
3465         return -TARGET_EINVAL;
3466     }
3467 
3468     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3469         return -TARGET_EFAULT;
3470     }
3471 
3472     addr = alloca(addrlen);
3473 
3474     ret_addrlen = addrlen;
3475     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3476     if (!is_error(ret)) {
3477         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3478         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3479             ret = -TARGET_EFAULT;
3480         }
3481     }
3482     return ret;
3483 }
3484 
3485 /* do_getpeername() Must return target values and target errnos. */
3486 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3487                                abi_ulong target_addrlen_addr)
3488 {
3489     socklen_t addrlen, ret_addrlen;
3490     void *addr;
3491     abi_long ret;
3492 
3493     if (get_user_u32(addrlen, target_addrlen_addr))
3494         return -TARGET_EFAULT;
3495 
3496     if ((int)addrlen < 0) {
3497         return -TARGET_EINVAL;
3498     }
3499 
3500     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3501         return -TARGET_EFAULT;
3502     }
3503 
3504     addr = alloca(addrlen);
3505 
3506     ret_addrlen = addrlen;
3507     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3508     if (!is_error(ret)) {
3509         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3510         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3511             ret = -TARGET_EFAULT;
3512         }
3513     }
3514     return ret;
3515 }
3516 
3517 /* do_getsockname() Must return target values and target errnos. */
3518 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3519                                abi_ulong target_addrlen_addr)
3520 {
3521     socklen_t addrlen, ret_addrlen;
3522     void *addr;
3523     abi_long ret;
3524 
3525     if (get_user_u32(addrlen, target_addrlen_addr))
3526         return -TARGET_EFAULT;
3527 
3528     if ((int)addrlen < 0) {
3529         return -TARGET_EINVAL;
3530     }
3531 
3532     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3533         return -TARGET_EFAULT;
3534     }
3535 
3536     addr = alloca(addrlen);
3537 
3538     ret_addrlen = addrlen;
3539     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3540     if (!is_error(ret)) {
3541         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3542         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3543             ret = -TARGET_EFAULT;
3544         }
3545     }
3546     return ret;
3547 }
3548 
3549 /* do_socketpair() Must return target values and target errnos. */
3550 static abi_long do_socketpair(int domain, int type, int protocol,
3551                               abi_ulong target_tab_addr)
3552 {
3553     int tab[2];
3554     abi_long ret;
3555 
3556     target_to_host_sock_type(&type);
3557 
3558     ret = get_errno(socketpair(domain, type, protocol, tab));
3559     if (!is_error(ret)) {
3560         if (put_user_s32(tab[0], target_tab_addr)
3561             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3562             ret = -TARGET_EFAULT;
3563     }
3564     return ret;
3565 }
3566 
3567 /* do_sendto() Must return target values and target errnos. */
3568 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3569                           abi_ulong target_addr, socklen_t addrlen)
3570 {
3571     void *addr;
3572     void *host_msg;
3573     void *copy_msg = NULL;
3574     abi_long ret;
3575 
3576     if ((int)addrlen < 0) {
3577         return -TARGET_EINVAL;
3578     }
3579 
3580     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3581     if (!host_msg)
3582         return -TARGET_EFAULT;
3583     if (fd_trans_target_to_host_data(fd)) {
3584         copy_msg = host_msg;
3585         host_msg = g_malloc(len);
3586         memcpy(host_msg, copy_msg, len);
3587         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3588         if (ret < 0) {
3589             goto fail;
3590         }
3591     }
3592     if (target_addr) {
3593         addr = alloca(addrlen+1);
3594         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3595         if (ret) {
3596             goto fail;
3597         }
3598         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3599     } else {
3600         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3601     }
3602 fail:
3603     if (copy_msg) {
3604         g_free(host_msg);
3605         host_msg = copy_msg;
3606     }
3607     unlock_user(host_msg, msg, 0);
3608     return ret;
3609 }
3610 
3611 /* do_recvfrom() Must return target values and target errnos. */
3612 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3613                             abi_ulong target_addr,
3614                             abi_ulong target_addrlen)
3615 {
3616     socklen_t addrlen, ret_addrlen;
3617     void *addr;
3618     void *host_msg;
3619     abi_long ret;
3620 
3621     if (!msg) {
3622         host_msg = NULL;
3623     } else {
3624         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3625         if (!host_msg) {
3626             return -TARGET_EFAULT;
3627         }
3628     }
3629     if (target_addr) {
3630         if (get_user_u32(addrlen, target_addrlen)) {
3631             ret = -TARGET_EFAULT;
3632             goto fail;
3633         }
3634         if ((int)addrlen < 0) {
3635             ret = -TARGET_EINVAL;
3636             goto fail;
3637         }
3638         addr = alloca(addrlen);
3639         ret_addrlen = addrlen;
3640         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3641                                       addr, &ret_addrlen));
3642     } else {
3643         addr = NULL; /* To keep compiler quiet.  */
3644         addrlen = 0; /* To keep compiler quiet.  */
3645         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3646     }
3647     if (!is_error(ret)) {
3648         if (fd_trans_host_to_target_data(fd)) {
3649             abi_long trans;
3650             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3651             if (is_error(trans)) {
3652                 ret = trans;
3653                 goto fail;
3654             }
3655         }
3656         if (target_addr) {
3657             host_to_target_sockaddr(target_addr, addr,
3658                                     MIN(addrlen, ret_addrlen));
3659             if (put_user_u32(ret_addrlen, target_addrlen)) {
3660                 ret = -TARGET_EFAULT;
3661                 goto fail;
3662             }
3663         }
3664         unlock_user(host_msg, msg, len);
3665     } else {
3666 fail:
3667         unlock_user(host_msg, msg, 0);
3668     }
3669     return ret;
3670 }
3671 
3672 #ifdef TARGET_NR_socketcall
3673 /* do_socketcall() must return target values and target errnos. */
3674 static abi_long do_socketcall(int num, abi_ulong vptr)
3675 {
3676     static const unsigned nargs[] = { /* number of arguments per operation */
3677         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3678         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3679         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3680         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3681         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3682         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3683         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3684         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3685         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3686         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3687         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3688         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3689         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3690         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3691         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3692         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3693         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3694         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3695         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3696         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3697     };
3698     abi_long a[6]; /* max 6 args */
3699     unsigned i;
3700 
3701     /* check the range of the first argument num */
3702     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3703     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3704         return -TARGET_EINVAL;
3705     }
3706     /* ensure we have space for args */
3707     if (nargs[num] > ARRAY_SIZE(a)) {
3708         return -TARGET_EINVAL;
3709     }
3710     /* collect the arguments in a[] according to nargs[] */
3711     for (i = 0; i < nargs[num]; ++i) {
3712         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3713             return -TARGET_EFAULT;
3714         }
3715     }
3716     /* now when we have the args, invoke the appropriate underlying function */
3717     switch (num) {
3718     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3719         return do_socket(a[0], a[1], a[2]);
3720     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3721         return do_bind(a[0], a[1], a[2]);
3722     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3723         return do_connect(a[0], a[1], a[2]);
3724     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3725         return get_errno(listen(a[0], a[1]));
3726     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3727         return do_accept4(a[0], a[1], a[2], 0);
3728     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3729         return do_getsockname(a[0], a[1], a[2]);
3730     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3731         return do_getpeername(a[0], a[1], a[2]);
3732     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3733         return do_socketpair(a[0], a[1], a[2], a[3]);
3734     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3735         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3736     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3737         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3738     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3739         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3740     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3741         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3742     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3743         return get_errno(shutdown(a[0], a[1]));
3744     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3745         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3746     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3747         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3748     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3749         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3750     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3751         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3752     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3753         return do_accept4(a[0], a[1], a[2], a[3]);
3754     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3755         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3756     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3757         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3758     default:
3759         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3760         return -TARGET_EINVAL;
3761     }
3762 }
3763 #endif
3764 
3765 #define N_SHM_REGIONS	32
3766 
3767 static struct shm_region {
3768     abi_ulong start;
3769     abi_ulong size;
3770     bool in_use;
3771 } shm_regions[N_SHM_REGIONS];
3772 
3773 #ifndef TARGET_SEMID64_DS
3774 /* asm-generic version of this struct */
3775 struct target_semid64_ds
3776 {
3777   struct target_ipc_perm sem_perm;
3778   abi_ulong sem_otime;
3779 #if TARGET_ABI_BITS == 32
3780   abi_ulong __unused1;
3781 #endif
3782   abi_ulong sem_ctime;
3783 #if TARGET_ABI_BITS == 32
3784   abi_ulong __unused2;
3785 #endif
3786   abi_ulong sem_nsems;
3787   abi_ulong __unused3;
3788   abi_ulong __unused4;
3789 };
3790 #endif
3791 
3792 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3793                                                abi_ulong target_addr)
3794 {
3795     struct target_ipc_perm *target_ip;
3796     struct target_semid64_ds *target_sd;
3797 
3798     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3799         return -TARGET_EFAULT;
3800     target_ip = &(target_sd->sem_perm);
3801     host_ip->__key = tswap32(target_ip->__key);
3802     host_ip->uid = tswap32(target_ip->uid);
3803     host_ip->gid = tswap32(target_ip->gid);
3804     host_ip->cuid = tswap32(target_ip->cuid);
3805     host_ip->cgid = tswap32(target_ip->cgid);
3806 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3807     host_ip->mode = tswap32(target_ip->mode);
3808 #else
3809     host_ip->mode = tswap16(target_ip->mode);
3810 #endif
3811 #if defined(TARGET_PPC)
3812     host_ip->__seq = tswap32(target_ip->__seq);
3813 #else
3814     host_ip->__seq = tswap16(target_ip->__seq);
3815 #endif
3816     unlock_user_struct(target_sd, target_addr, 0);
3817     return 0;
3818 }
3819 
3820 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3821                                                struct ipc_perm *host_ip)
3822 {
3823     struct target_ipc_perm *target_ip;
3824     struct target_semid64_ds *target_sd;
3825 
3826     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3827         return -TARGET_EFAULT;
3828     target_ip = &(target_sd->sem_perm);
3829     target_ip->__key = tswap32(host_ip->__key);
3830     target_ip->uid = tswap32(host_ip->uid);
3831     target_ip->gid = tswap32(host_ip->gid);
3832     target_ip->cuid = tswap32(host_ip->cuid);
3833     target_ip->cgid = tswap32(host_ip->cgid);
3834 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3835     target_ip->mode = tswap32(host_ip->mode);
3836 #else
3837     target_ip->mode = tswap16(host_ip->mode);
3838 #endif
3839 #if defined(TARGET_PPC)
3840     target_ip->__seq = tswap32(host_ip->__seq);
3841 #else
3842     target_ip->__seq = tswap16(host_ip->__seq);
3843 #endif
3844     unlock_user_struct(target_sd, target_addr, 1);
3845     return 0;
3846 }
3847 
3848 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3849                                                abi_ulong target_addr)
3850 {
3851     struct target_semid64_ds *target_sd;
3852 
3853     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3854         return -TARGET_EFAULT;
3855     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3856         return -TARGET_EFAULT;
3857     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3858     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3859     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3860     unlock_user_struct(target_sd, target_addr, 0);
3861     return 0;
3862 }
3863 
3864 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3865                                                struct semid_ds *host_sd)
3866 {
3867     struct target_semid64_ds *target_sd;
3868 
3869     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3870         return -TARGET_EFAULT;
3871     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3872         return -TARGET_EFAULT;
3873     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3874     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3875     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3876     unlock_user_struct(target_sd, target_addr, 1);
3877     return 0;
3878 }
3879 
3880 struct target_seminfo {
3881     int semmap;
3882     int semmni;
3883     int semmns;
3884     int semmnu;
3885     int semmsl;
3886     int semopm;
3887     int semume;
3888     int semusz;
3889     int semvmx;
3890     int semaem;
3891 };
3892 
3893 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3894                                               struct seminfo *host_seminfo)
3895 {
3896     struct target_seminfo *target_seminfo;
3897     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3898         return -TARGET_EFAULT;
3899     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3900     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3901     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3902     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3903     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3904     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3905     __put_user(host_seminfo->semume, &target_seminfo->semume);
3906     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3907     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3908     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3909     unlock_user_struct(target_seminfo, target_addr, 1);
3910     return 0;
3911 }
3912 
3913 union semun {
3914 	int val;
3915 	struct semid_ds *buf;
3916 	unsigned short *array;
3917 	struct seminfo *__buf;
3918 };
3919 
3920 union target_semun {
3921 	int val;
3922 	abi_ulong buf;
3923 	abi_ulong array;
3924 	abi_ulong __buf;
3925 };
3926 
3927 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3928                                                abi_ulong target_addr)
3929 {
3930     int nsems;
3931     unsigned short *array;
3932     union semun semun;
3933     struct semid_ds semid_ds;
3934     int i, ret;
3935 
3936     semun.buf = &semid_ds;
3937 
3938     ret = semctl(semid, 0, IPC_STAT, semun);
3939     if (ret == -1)
3940         return get_errno(ret);
3941 
3942     nsems = semid_ds.sem_nsems;
3943 
3944     *host_array = g_try_new(unsigned short, nsems);
3945     if (!*host_array) {
3946         return -TARGET_ENOMEM;
3947     }
3948     array = lock_user(VERIFY_READ, target_addr,
3949                       nsems*sizeof(unsigned short), 1);
3950     if (!array) {
3951         g_free(*host_array);
3952         return -TARGET_EFAULT;
3953     }
3954 
3955     for(i=0; i<nsems; i++) {
3956         __get_user((*host_array)[i], &array[i]);
3957     }
3958     unlock_user(array, target_addr, 0);
3959 
3960     return 0;
3961 }
3962 
3963 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3964                                                unsigned short **host_array)
3965 {
3966     int nsems;
3967     unsigned short *array;
3968     union semun semun;
3969     struct semid_ds semid_ds;
3970     int i, ret;
3971 
3972     semun.buf = &semid_ds;
3973 
3974     ret = semctl(semid, 0, IPC_STAT, semun);
3975     if (ret == -1)
3976         return get_errno(ret);
3977 
3978     nsems = semid_ds.sem_nsems;
3979 
3980     array = lock_user(VERIFY_WRITE, target_addr,
3981                       nsems*sizeof(unsigned short), 0);
3982     if (!array)
3983         return -TARGET_EFAULT;
3984 
3985     for(i=0; i<nsems; i++) {
3986         __put_user((*host_array)[i], &array[i]);
3987     }
3988     g_free(*host_array);
3989     unlock_user(array, target_addr, 1);
3990 
3991     return 0;
3992 }
3993 
3994 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3995                                  abi_ulong target_arg)
3996 {
3997     union target_semun target_su = { .buf = target_arg };
3998     union semun arg;
3999     struct semid_ds dsarg;
4000     unsigned short *array = NULL;
4001     struct seminfo seminfo;
4002     abi_long ret = -TARGET_EINVAL;
4003     abi_long err;
4004     cmd &= 0xff;
4005 
4006     switch( cmd ) {
4007 	case GETVAL:
4008 	case SETVAL:
4009             /* In 64 bit cross-endian situations, we will erroneously pick up
4010              * the wrong half of the union for the "val" element.  To rectify
4011              * this, the entire 8-byte structure is byteswapped, followed by
4012 	     * a swap of the 4 byte val field. In other cases, the data is
4013 	     * already in proper host byte order. */
4014 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4015 		target_su.buf = tswapal(target_su.buf);
4016 		arg.val = tswap32(target_su.val);
4017 	    } else {
4018 		arg.val = target_su.val;
4019 	    }
4020             ret = get_errno(semctl(semid, semnum, cmd, arg));
4021             break;
4022 	case GETALL:
4023 	case SETALL:
4024             err = target_to_host_semarray(semid, &array, target_su.array);
4025             if (err)
4026                 return err;
4027             arg.array = array;
4028             ret = get_errno(semctl(semid, semnum, cmd, arg));
4029             err = host_to_target_semarray(semid, target_su.array, &array);
4030             if (err)
4031                 return err;
4032             break;
4033 	case IPC_STAT:
4034 	case IPC_SET:
4035 	case SEM_STAT:
4036             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4037             if (err)
4038                 return err;
4039             arg.buf = &dsarg;
4040             ret = get_errno(semctl(semid, semnum, cmd, arg));
4041             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4042             if (err)
4043                 return err;
4044             break;
4045 	case IPC_INFO:
4046 	case SEM_INFO:
4047             arg.__buf = &seminfo;
4048             ret = get_errno(semctl(semid, semnum, cmd, arg));
4049             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4050             if (err)
4051                 return err;
4052             break;
4053 	case IPC_RMID:
4054 	case GETPID:
4055 	case GETNCNT:
4056 	case GETZCNT:
4057             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4058             break;
4059     }
4060 
4061     return ret;
4062 }
4063 
4064 struct target_sembuf {
4065     unsigned short sem_num;
4066     short sem_op;
4067     short sem_flg;
4068 };
4069 
4070 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4071                                              abi_ulong target_addr,
4072                                              unsigned nsops)
4073 {
4074     struct target_sembuf *target_sembuf;
4075     int i;
4076 
4077     target_sembuf = lock_user(VERIFY_READ, target_addr,
4078                               nsops*sizeof(struct target_sembuf), 1);
4079     if (!target_sembuf)
4080         return -TARGET_EFAULT;
4081 
4082     for(i=0; i<nsops; i++) {
4083         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4084         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4085         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4086     }
4087 
4088     unlock_user(target_sembuf, target_addr, 0);
4089 
4090     return 0;
4091 }
4092 
4093 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4094     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4095 
4096 /*
4097  * This macro is required to handle the s390 variants, which passes the
4098  * arguments in a different order than default.
4099  */
4100 #ifdef __s390x__
4101 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4102   (__nsops), (__timeout), (__sops)
4103 #else
4104 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4105   (__nsops), 0, (__sops), (__timeout)
4106 #endif
4107 
4108 static inline abi_long do_semtimedop(int semid,
4109                                      abi_long ptr,
4110                                      unsigned nsops,
4111                                      abi_long timeout, bool time64)
4112 {
4113     struct sembuf *sops;
4114     struct timespec ts, *pts = NULL;
4115     abi_long ret;
4116 
4117     if (timeout) {
4118         pts = &ts;
4119         if (time64) {
4120             if (target_to_host_timespec64(pts, timeout)) {
4121                 return -TARGET_EFAULT;
4122             }
4123         } else {
4124             if (target_to_host_timespec(pts, timeout)) {
4125                 return -TARGET_EFAULT;
4126             }
4127         }
4128     }
4129 
4130     if (nsops > TARGET_SEMOPM) {
4131         return -TARGET_E2BIG;
4132     }
4133 
4134     sops = g_new(struct sembuf, nsops);
4135 
4136     if (target_to_host_sembuf(sops, ptr, nsops)) {
4137         g_free(sops);
4138         return -TARGET_EFAULT;
4139     }
4140 
4141     ret = -TARGET_ENOSYS;
4142 #ifdef __NR_semtimedop
4143     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4144 #endif
4145 #ifdef __NR_ipc
4146     if (ret == -TARGET_ENOSYS) {
4147         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4148                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4149     }
4150 #endif
4151     g_free(sops);
4152     return ret;
4153 }
4154 #endif
4155 
4156 struct target_msqid_ds
4157 {
4158     struct target_ipc_perm msg_perm;
4159     abi_ulong msg_stime;
4160 #if TARGET_ABI_BITS == 32
4161     abi_ulong __unused1;
4162 #endif
4163     abi_ulong msg_rtime;
4164 #if TARGET_ABI_BITS == 32
4165     abi_ulong __unused2;
4166 #endif
4167     abi_ulong msg_ctime;
4168 #if TARGET_ABI_BITS == 32
4169     abi_ulong __unused3;
4170 #endif
4171     abi_ulong __msg_cbytes;
4172     abi_ulong msg_qnum;
4173     abi_ulong msg_qbytes;
4174     abi_ulong msg_lspid;
4175     abi_ulong msg_lrpid;
4176     abi_ulong __unused4;
4177     abi_ulong __unused5;
4178 };
4179 
4180 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4181                                                abi_ulong target_addr)
4182 {
4183     struct target_msqid_ds *target_md;
4184 
4185     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4186         return -TARGET_EFAULT;
4187     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4188         return -TARGET_EFAULT;
4189     host_md->msg_stime = tswapal(target_md->msg_stime);
4190     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4191     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4192     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4193     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4194     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4195     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4196     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4197     unlock_user_struct(target_md, target_addr, 0);
4198     return 0;
4199 }
4200 
4201 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4202                                                struct msqid_ds *host_md)
4203 {
4204     struct target_msqid_ds *target_md;
4205 
4206     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4207         return -TARGET_EFAULT;
4208     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4209         return -TARGET_EFAULT;
4210     target_md->msg_stime = tswapal(host_md->msg_stime);
4211     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4212     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4213     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4214     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4215     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4216     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4217     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4218     unlock_user_struct(target_md, target_addr, 1);
4219     return 0;
4220 }
4221 
4222 struct target_msginfo {
4223     int msgpool;
4224     int msgmap;
4225     int msgmax;
4226     int msgmnb;
4227     int msgmni;
4228     int msgssz;
4229     int msgtql;
4230     unsigned short int msgseg;
4231 };
4232 
4233 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4234                                               struct msginfo *host_msginfo)
4235 {
4236     struct target_msginfo *target_msginfo;
4237     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4238         return -TARGET_EFAULT;
4239     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4240     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4241     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4242     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4243     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4244     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4245     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4246     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4247     unlock_user_struct(target_msginfo, target_addr, 1);
4248     return 0;
4249 }
4250 
4251 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4252 {
4253     struct msqid_ds dsarg;
4254     struct msginfo msginfo;
4255     abi_long ret = -TARGET_EINVAL;
4256 
4257     cmd &= 0xff;
4258 
4259     switch (cmd) {
4260     case IPC_STAT:
4261     case IPC_SET:
4262     case MSG_STAT:
4263         if (target_to_host_msqid_ds(&dsarg,ptr))
4264             return -TARGET_EFAULT;
4265         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4266         if (host_to_target_msqid_ds(ptr,&dsarg))
4267             return -TARGET_EFAULT;
4268         break;
4269     case IPC_RMID:
4270         ret = get_errno(msgctl(msgid, cmd, NULL));
4271         break;
4272     case IPC_INFO:
4273     case MSG_INFO:
4274         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4275         if (host_to_target_msginfo(ptr, &msginfo))
4276             return -TARGET_EFAULT;
4277         break;
4278     }
4279 
4280     return ret;
4281 }
4282 
4283 struct target_msgbuf {
4284     abi_long mtype;
4285     char	mtext[1];
4286 };
4287 
4288 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4289                                  ssize_t msgsz, int msgflg)
4290 {
4291     struct target_msgbuf *target_mb;
4292     struct msgbuf *host_mb;
4293     abi_long ret = 0;
4294 
4295     if (msgsz < 0) {
4296         return -TARGET_EINVAL;
4297     }
4298 
4299     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4300         return -TARGET_EFAULT;
4301     host_mb = g_try_malloc(msgsz + sizeof(long));
4302     if (!host_mb) {
4303         unlock_user_struct(target_mb, msgp, 0);
4304         return -TARGET_ENOMEM;
4305     }
4306     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4307     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4308     ret = -TARGET_ENOSYS;
4309 #ifdef __NR_msgsnd
4310     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4311 #endif
4312 #ifdef __NR_ipc
4313     if (ret == -TARGET_ENOSYS) {
4314 #ifdef __s390x__
4315         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4316                                  host_mb));
4317 #else
4318         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4319                                  host_mb, 0));
4320 #endif
4321     }
4322 #endif
4323     g_free(host_mb);
4324     unlock_user_struct(target_mb, msgp, 0);
4325 
4326     return ret;
4327 }
4328 
4329 #ifdef __NR_ipc
4330 #if defined(__sparc__)
4331 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4332 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4333 #elif defined(__s390x__)
4334 /* The s390 sys_ipc variant has only five parameters.  */
4335 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4336     ((long int[]){(long int)__msgp, __msgtyp})
4337 #else
4338 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4339     ((long int[]){(long int)__msgp, __msgtyp}), 0
4340 #endif
4341 #endif
4342 
4343 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4344                                  ssize_t msgsz, abi_long msgtyp,
4345                                  int msgflg)
4346 {
4347     struct target_msgbuf *target_mb;
4348     char *target_mtext;
4349     struct msgbuf *host_mb;
4350     abi_long ret = 0;
4351 
4352     if (msgsz < 0) {
4353         return -TARGET_EINVAL;
4354     }
4355 
4356     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4357         return -TARGET_EFAULT;
4358 
4359     host_mb = g_try_malloc(msgsz + sizeof(long));
4360     if (!host_mb) {
4361         ret = -TARGET_ENOMEM;
4362         goto end;
4363     }
4364     ret = -TARGET_ENOSYS;
4365 #ifdef __NR_msgrcv
4366     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4367 #endif
4368 #ifdef __NR_ipc
4369     if (ret == -TARGET_ENOSYS) {
4370         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4371                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4372     }
4373 #endif
4374 
4375     if (ret > 0) {
4376         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4377         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4378         if (!target_mtext) {
4379             ret = -TARGET_EFAULT;
4380             goto end;
4381         }
4382         memcpy(target_mb->mtext, host_mb->mtext, ret);
4383         unlock_user(target_mtext, target_mtext_addr, ret);
4384     }
4385 
4386     target_mb->mtype = tswapal(host_mb->mtype);
4387 
4388 end:
4389     if (target_mb)
4390         unlock_user_struct(target_mb, msgp, 1);
4391     g_free(host_mb);
4392     return ret;
4393 }
4394 
4395 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4396                                                abi_ulong target_addr)
4397 {
4398     struct target_shmid_ds *target_sd;
4399 
4400     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4401         return -TARGET_EFAULT;
4402     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4403         return -TARGET_EFAULT;
4404     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4405     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4406     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4407     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4408     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4409     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4410     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4411     unlock_user_struct(target_sd, target_addr, 0);
4412     return 0;
4413 }
4414 
4415 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4416                                                struct shmid_ds *host_sd)
4417 {
4418     struct target_shmid_ds *target_sd;
4419 
4420     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4421         return -TARGET_EFAULT;
4422     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4423         return -TARGET_EFAULT;
4424     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4425     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4426     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4427     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4428     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4429     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4430     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4431     unlock_user_struct(target_sd, target_addr, 1);
4432     return 0;
4433 }
4434 
4435 struct  target_shminfo {
4436     abi_ulong shmmax;
4437     abi_ulong shmmin;
4438     abi_ulong shmmni;
4439     abi_ulong shmseg;
4440     abi_ulong shmall;
4441 };
4442 
4443 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4444                                               struct shminfo *host_shminfo)
4445 {
4446     struct target_shminfo *target_shminfo;
4447     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4448         return -TARGET_EFAULT;
4449     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4450     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4451     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4452     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4453     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4454     unlock_user_struct(target_shminfo, target_addr, 1);
4455     return 0;
4456 }
4457 
4458 struct target_shm_info {
4459     int used_ids;
4460     abi_ulong shm_tot;
4461     abi_ulong shm_rss;
4462     abi_ulong shm_swp;
4463     abi_ulong swap_attempts;
4464     abi_ulong swap_successes;
4465 };
4466 
4467 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4468                                                struct shm_info *host_shm_info)
4469 {
4470     struct target_shm_info *target_shm_info;
4471     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4472         return -TARGET_EFAULT;
4473     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4474     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4475     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4476     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4477     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4478     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4479     unlock_user_struct(target_shm_info, target_addr, 1);
4480     return 0;
4481 }
4482 
4483 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4484 {
4485     struct shmid_ds dsarg;
4486     struct shminfo shminfo;
4487     struct shm_info shm_info;
4488     abi_long ret = -TARGET_EINVAL;
4489 
4490     cmd &= 0xff;
4491 
4492     switch(cmd) {
4493     case IPC_STAT:
4494     case IPC_SET:
4495     case SHM_STAT:
4496         if (target_to_host_shmid_ds(&dsarg, buf))
4497             return -TARGET_EFAULT;
4498         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4499         if (host_to_target_shmid_ds(buf, &dsarg))
4500             return -TARGET_EFAULT;
4501         break;
4502     case IPC_INFO:
4503         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4504         if (host_to_target_shminfo(buf, &shminfo))
4505             return -TARGET_EFAULT;
4506         break;
4507     case SHM_INFO:
4508         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4509         if (host_to_target_shm_info(buf, &shm_info))
4510             return -TARGET_EFAULT;
4511         break;
4512     case IPC_RMID:
4513     case SHM_LOCK:
4514     case SHM_UNLOCK:
4515         ret = get_errno(shmctl(shmid, cmd, NULL));
4516         break;
4517     }
4518 
4519     return ret;
4520 }
4521 
4522 #ifndef TARGET_FORCE_SHMLBA
4523 /* For most architectures, SHMLBA is the same as the page size;
4524  * some architectures have larger values, in which case they should
4525  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4526  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4527  * and defining its own value for SHMLBA.
4528  *
4529  * The kernel also permits SHMLBA to be set by the architecture to a
4530  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4531  * this means that addresses are rounded to the large size if
4532  * SHM_RND is set but addresses not aligned to that size are not rejected
4533  * as long as they are at least page-aligned. Since the only architecture
4534  * which uses this is ia64 this code doesn't provide for that oddity.
4535  */
4536 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4537 {
4538     return TARGET_PAGE_SIZE;
4539 }
4540 #endif
4541 
4542 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4543                                  int shmid, abi_ulong shmaddr, int shmflg)
4544 {
4545     CPUState *cpu = env_cpu(cpu_env);
4546     abi_long raddr;
4547     void *host_raddr;
4548     struct shmid_ds shm_info;
4549     int i,ret;
4550     abi_ulong shmlba;
4551 
4552     /* shmat pointers are always untagged */
4553 
4554     /* find out the length of the shared memory segment */
4555     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4556     if (is_error(ret)) {
4557         /* can't get length, bail out */
4558         return ret;
4559     }
4560 
4561     shmlba = target_shmlba(cpu_env);
4562 
4563     if (shmaddr & (shmlba - 1)) {
4564         if (shmflg & SHM_RND) {
4565             shmaddr &= ~(shmlba - 1);
4566         } else {
4567             return -TARGET_EINVAL;
4568         }
4569     }
4570     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4571         return -TARGET_EINVAL;
4572     }
4573 
4574     mmap_lock();
4575 
4576     /*
4577      * We're mapping shared memory, so ensure we generate code for parallel
4578      * execution and flush old translations.  This will work up to the level
4579      * supported by the host -- anything that requires EXCP_ATOMIC will not
4580      * be atomic with respect to an external process.
4581      */
4582     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4583         cpu->tcg_cflags |= CF_PARALLEL;
4584         tb_flush(cpu);
4585     }
4586 
4587     if (shmaddr)
4588         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4589     else {
4590         abi_ulong mmap_start;
4591 
4592         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4593         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4594 
4595         if (mmap_start == -1) {
4596             errno = ENOMEM;
4597             host_raddr = (void *)-1;
4598         } else
4599             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4600                                shmflg | SHM_REMAP);
4601     }
4602 
4603     if (host_raddr == (void *)-1) {
4604         mmap_unlock();
4605         return get_errno((long)host_raddr);
4606     }
4607     raddr=h2g((unsigned long)host_raddr);
4608 
4609     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4610                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4611                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4612 
4613     for (i = 0; i < N_SHM_REGIONS; i++) {
4614         if (!shm_regions[i].in_use) {
4615             shm_regions[i].in_use = true;
4616             shm_regions[i].start = raddr;
4617             shm_regions[i].size = shm_info.shm_segsz;
4618             break;
4619         }
4620     }
4621 
4622     mmap_unlock();
4623     return raddr;
4624 
4625 }
4626 
4627 static inline abi_long do_shmdt(abi_ulong shmaddr)
4628 {
4629     int i;
4630     abi_long rv;
4631 
4632     /* shmdt pointers are always untagged */
4633 
4634     mmap_lock();
4635 
4636     for (i = 0; i < N_SHM_REGIONS; ++i) {
4637         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4638             shm_regions[i].in_use = false;
4639             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4640             break;
4641         }
4642     }
4643     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4644 
4645     mmap_unlock();
4646 
4647     return rv;
4648 }
4649 
4650 #ifdef TARGET_NR_ipc
4651 /* ??? This only works with linear mappings.  */
4652 /* do_ipc() must return target values and target errnos. */
4653 static abi_long do_ipc(CPUArchState *cpu_env,
4654                        unsigned int call, abi_long first,
4655                        abi_long second, abi_long third,
4656                        abi_long ptr, abi_long fifth)
4657 {
4658     int version;
4659     abi_long ret = 0;
4660 
4661     version = call >> 16;
4662     call &= 0xffff;
4663 
4664     switch (call) {
4665     case IPCOP_semop:
4666         ret = do_semtimedop(first, ptr, second, 0, false);
4667         break;
4668     case IPCOP_semtimedop:
4669     /*
4670      * The s390 sys_ipc variant has only five parameters instead of six
4671      * (as for default variant) and the only difference is the handling of
4672      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4673      * to a struct timespec where the generic variant uses fifth parameter.
4674      */
4675 #if defined(TARGET_S390X)
4676         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4677 #else
4678         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4679 #endif
4680         break;
4681 
4682     case IPCOP_semget:
4683         ret = get_errno(semget(first, second, third));
4684         break;
4685 
4686     case IPCOP_semctl: {
4687         /* The semun argument to semctl is passed by value, so dereference the
4688          * ptr argument. */
4689         abi_ulong atptr;
4690         get_user_ual(atptr, ptr);
4691         ret = do_semctl(first, second, third, atptr);
4692         break;
4693     }
4694 
4695     case IPCOP_msgget:
4696         ret = get_errno(msgget(first, second));
4697         break;
4698 
4699     case IPCOP_msgsnd:
4700         ret = do_msgsnd(first, ptr, second, third);
4701         break;
4702 
4703     case IPCOP_msgctl:
4704         ret = do_msgctl(first, second, ptr);
4705         break;
4706 
4707     case IPCOP_msgrcv:
4708         switch (version) {
4709         case 0:
4710             {
4711                 struct target_ipc_kludge {
4712                     abi_long msgp;
4713                     abi_long msgtyp;
4714                 } *tmp;
4715 
4716                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4717                     ret = -TARGET_EFAULT;
4718                     break;
4719                 }
4720 
4721                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4722 
4723                 unlock_user_struct(tmp, ptr, 0);
4724                 break;
4725             }
4726         default:
4727             ret = do_msgrcv(first, ptr, second, fifth, third);
4728         }
4729         break;
4730 
4731     case IPCOP_shmat:
4732         switch (version) {
4733         default:
4734         {
4735             abi_ulong raddr;
4736             raddr = do_shmat(cpu_env, first, ptr, second);
4737             if (is_error(raddr))
4738                 return get_errno(raddr);
4739             if (put_user_ual(raddr, third))
4740                 return -TARGET_EFAULT;
4741             break;
4742         }
4743         case 1:
4744             ret = -TARGET_EINVAL;
4745             break;
4746         }
4747 	break;
4748     case IPCOP_shmdt:
4749         ret = do_shmdt(ptr);
4750 	break;
4751 
4752     case IPCOP_shmget:
4753 	/* IPC_* flag values are the same on all linux platforms */
4754 	ret = get_errno(shmget(first, second, third));
4755 	break;
4756 
4757 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4758     case IPCOP_shmctl:
4759         ret = do_shmctl(first, second, ptr);
4760         break;
4761     default:
4762         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4763                       call, version);
4764 	ret = -TARGET_ENOSYS;
4765 	break;
4766     }
4767     return ret;
4768 }
4769 #endif
4770 
4771 /* kernel structure types definitions */
4772 
4773 #define STRUCT(name, ...) STRUCT_ ## name,
4774 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4775 enum {
4776 #include "syscall_types.h"
4777 STRUCT_MAX
4778 };
4779 #undef STRUCT
4780 #undef STRUCT_SPECIAL
4781 
4782 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4783 #define STRUCT_SPECIAL(name)
4784 #include "syscall_types.h"
4785 #undef STRUCT
4786 #undef STRUCT_SPECIAL
4787 
4788 #define MAX_STRUCT_SIZE 4096
4789 
4790 #ifdef CONFIG_FIEMAP
4791 /* So fiemap access checks don't overflow on 32 bit systems.
4792  * This is very slightly smaller than the limit imposed by
4793  * the underlying kernel.
4794  */
4795 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4796                             / sizeof(struct fiemap_extent))
4797 
4798 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4799                                        int fd, int cmd, abi_long arg)
4800 {
4801     /* The parameter for this ioctl is a struct fiemap followed
4802      * by an array of struct fiemap_extent whose size is set
4803      * in fiemap->fm_extent_count. The array is filled in by the
4804      * ioctl.
4805      */
4806     int target_size_in, target_size_out;
4807     struct fiemap *fm;
4808     const argtype *arg_type = ie->arg_type;
4809     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4810     void *argptr, *p;
4811     abi_long ret;
4812     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4813     uint32_t outbufsz;
4814     int free_fm = 0;
4815 
4816     assert(arg_type[0] == TYPE_PTR);
4817     assert(ie->access == IOC_RW);
4818     arg_type++;
4819     target_size_in = thunk_type_size(arg_type, 0);
4820     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4821     if (!argptr) {
4822         return -TARGET_EFAULT;
4823     }
4824     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4825     unlock_user(argptr, arg, 0);
4826     fm = (struct fiemap *)buf_temp;
4827     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4828         return -TARGET_EINVAL;
4829     }
4830 
4831     outbufsz = sizeof (*fm) +
4832         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4833 
4834     if (outbufsz > MAX_STRUCT_SIZE) {
4835         /* We can't fit all the extents into the fixed size buffer.
4836          * Allocate one that is large enough and use it instead.
4837          */
4838         fm = g_try_malloc(outbufsz);
4839         if (!fm) {
4840             return -TARGET_ENOMEM;
4841         }
4842         memcpy(fm, buf_temp, sizeof(struct fiemap));
4843         free_fm = 1;
4844     }
4845     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4846     if (!is_error(ret)) {
4847         target_size_out = target_size_in;
4848         /* An extent_count of 0 means we were only counting the extents
4849          * so there are no structs to copy
4850          */
4851         if (fm->fm_extent_count != 0) {
4852             target_size_out += fm->fm_mapped_extents * extent_size;
4853         }
4854         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4855         if (!argptr) {
4856             ret = -TARGET_EFAULT;
4857         } else {
4858             /* Convert the struct fiemap */
4859             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4860             if (fm->fm_extent_count != 0) {
4861                 p = argptr + target_size_in;
4862                 /* ...and then all the struct fiemap_extents */
4863                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4864                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4865                                   THUNK_TARGET);
4866                     p += extent_size;
4867                 }
4868             }
4869             unlock_user(argptr, arg, target_size_out);
4870         }
4871     }
4872     if (free_fm) {
4873         g_free(fm);
4874     }
4875     return ret;
4876 }
4877 #endif
4878 
4879 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4880                                 int fd, int cmd, abi_long arg)
4881 {
4882     const argtype *arg_type = ie->arg_type;
4883     int target_size;
4884     void *argptr;
4885     int ret;
4886     struct ifconf *host_ifconf;
4887     uint32_t outbufsz;
4888     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4889     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4890     int target_ifreq_size;
4891     int nb_ifreq;
4892     int free_buf = 0;
4893     int i;
4894     int target_ifc_len;
4895     abi_long target_ifc_buf;
4896     int host_ifc_len;
4897     char *host_ifc_buf;
4898 
4899     assert(arg_type[0] == TYPE_PTR);
4900     assert(ie->access == IOC_RW);
4901 
4902     arg_type++;
4903     target_size = thunk_type_size(arg_type, 0);
4904 
4905     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4906     if (!argptr)
4907         return -TARGET_EFAULT;
4908     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4909     unlock_user(argptr, arg, 0);
4910 
4911     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4912     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4913     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4914 
4915     if (target_ifc_buf != 0) {
4916         target_ifc_len = host_ifconf->ifc_len;
4917         nb_ifreq = target_ifc_len / target_ifreq_size;
4918         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4919 
4920         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4921         if (outbufsz > MAX_STRUCT_SIZE) {
4922             /*
4923              * We can't fit all the extents into the fixed size buffer.
4924              * Allocate one that is large enough and use it instead.
4925              */
4926             host_ifconf = g_try_malloc(outbufsz);
4927             if (!host_ifconf) {
4928                 return -TARGET_ENOMEM;
4929             }
4930             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4931             free_buf = 1;
4932         }
4933         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4934 
4935         host_ifconf->ifc_len = host_ifc_len;
4936     } else {
4937       host_ifc_buf = NULL;
4938     }
4939     host_ifconf->ifc_buf = host_ifc_buf;
4940 
4941     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4942     if (!is_error(ret)) {
4943 	/* convert host ifc_len to target ifc_len */
4944 
4945         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4946         target_ifc_len = nb_ifreq * target_ifreq_size;
4947         host_ifconf->ifc_len = target_ifc_len;
4948 
4949 	/* restore target ifc_buf */
4950 
4951         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4952 
4953 	/* copy struct ifconf to target user */
4954 
4955         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4956         if (!argptr)
4957             return -TARGET_EFAULT;
4958         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4959         unlock_user(argptr, arg, target_size);
4960 
4961         if (target_ifc_buf != 0) {
4962             /* copy ifreq[] to target user */
4963             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4964             for (i = 0; i < nb_ifreq ; i++) {
4965                 thunk_convert(argptr + i * target_ifreq_size,
4966                               host_ifc_buf + i * sizeof(struct ifreq),
4967                               ifreq_arg_type, THUNK_TARGET);
4968             }
4969             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4970         }
4971     }
4972 
4973     if (free_buf) {
4974         g_free(host_ifconf);
4975     }
4976 
4977     return ret;
4978 }
4979 
4980 #if defined(CONFIG_USBFS)
4981 #if HOST_LONG_BITS > 64
4982 #error USBDEVFS thunks do not support >64 bit hosts yet.
4983 #endif
4984 struct live_urb {
4985     uint64_t target_urb_adr;
4986     uint64_t target_buf_adr;
4987     char *target_buf_ptr;
4988     struct usbdevfs_urb host_urb;
4989 };
4990 
4991 static GHashTable *usbdevfs_urb_hashtable(void)
4992 {
4993     static GHashTable *urb_hashtable;
4994 
4995     if (!urb_hashtable) {
4996         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4997     }
4998     return urb_hashtable;
4999 }
5000 
5001 static void urb_hashtable_insert(struct live_urb *urb)
5002 {
5003     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5004     g_hash_table_insert(urb_hashtable, urb, urb);
5005 }
5006 
5007 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5008 {
5009     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5010     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5011 }
5012 
5013 static void urb_hashtable_remove(struct live_urb *urb)
5014 {
5015     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5016     g_hash_table_remove(urb_hashtable, urb);
5017 }
5018 
5019 static abi_long
5020 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5021                           int fd, int cmd, abi_long arg)
5022 {
5023     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5024     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5025     struct live_urb *lurb;
5026     void *argptr;
5027     uint64_t hurb;
5028     int target_size;
5029     uintptr_t target_urb_adr;
5030     abi_long ret;
5031 
5032     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5033 
5034     memset(buf_temp, 0, sizeof(uint64_t));
5035     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5036     if (is_error(ret)) {
5037         return ret;
5038     }
5039 
5040     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5041     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5042     if (!lurb->target_urb_adr) {
5043         return -TARGET_EFAULT;
5044     }
5045     urb_hashtable_remove(lurb);
5046     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5047         lurb->host_urb.buffer_length);
5048     lurb->target_buf_ptr = NULL;
5049 
5050     /* restore the guest buffer pointer */
5051     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5052 
5053     /* update the guest urb struct */
5054     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5055     if (!argptr) {
5056         g_free(lurb);
5057         return -TARGET_EFAULT;
5058     }
5059     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5060     unlock_user(argptr, lurb->target_urb_adr, target_size);
5061 
5062     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5063     /* write back the urb handle */
5064     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5065     if (!argptr) {
5066         g_free(lurb);
5067         return -TARGET_EFAULT;
5068     }
5069 
5070     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5071     target_urb_adr = lurb->target_urb_adr;
5072     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5073     unlock_user(argptr, arg, target_size);
5074 
5075     g_free(lurb);
5076     return ret;
5077 }
5078 
5079 static abi_long
5080 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5081                              uint8_t *buf_temp __attribute__((unused)),
5082                              int fd, int cmd, abi_long arg)
5083 {
5084     struct live_urb *lurb;
5085 
5086     /* map target address back to host URB with metadata. */
5087     lurb = urb_hashtable_lookup(arg);
5088     if (!lurb) {
5089         return -TARGET_EFAULT;
5090     }
5091     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5092 }
5093 
5094 static abi_long
5095 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5096                             int fd, int cmd, abi_long arg)
5097 {
5098     const argtype *arg_type = ie->arg_type;
5099     int target_size;
5100     abi_long ret;
5101     void *argptr;
5102     int rw_dir;
5103     struct live_urb *lurb;
5104 
5105     /*
5106      * each submitted URB needs to map to a unique ID for the
5107      * kernel, and that unique ID needs to be a pointer to
5108      * host memory.  hence, we need to malloc for each URB.
5109      * isochronous transfers have a variable length struct.
5110      */
5111     arg_type++;
5112     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5113 
5114     /* construct host copy of urb and metadata */
5115     lurb = g_try_new0(struct live_urb, 1);
5116     if (!lurb) {
5117         return -TARGET_ENOMEM;
5118     }
5119 
5120     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5121     if (!argptr) {
5122         g_free(lurb);
5123         return -TARGET_EFAULT;
5124     }
5125     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5126     unlock_user(argptr, arg, 0);
5127 
5128     lurb->target_urb_adr = arg;
5129     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5130 
5131     /* buffer space used depends on endpoint type so lock the entire buffer */
5132     /* control type urbs should check the buffer contents for true direction */
5133     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5134     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5135         lurb->host_urb.buffer_length, 1);
5136     if (lurb->target_buf_ptr == NULL) {
5137         g_free(lurb);
5138         return -TARGET_EFAULT;
5139     }
5140 
5141     /* update buffer pointer in host copy */
5142     lurb->host_urb.buffer = lurb->target_buf_ptr;
5143 
5144     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5145     if (is_error(ret)) {
5146         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5147         g_free(lurb);
5148     } else {
5149         urb_hashtable_insert(lurb);
5150     }
5151 
5152     return ret;
5153 }
5154 #endif /* CONFIG_USBFS */
5155 
5156 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5157                             int cmd, abi_long arg)
5158 {
5159     void *argptr;
5160     struct dm_ioctl *host_dm;
5161     abi_long guest_data;
5162     uint32_t guest_data_size;
5163     int target_size;
5164     const argtype *arg_type = ie->arg_type;
5165     abi_long ret;
5166     void *big_buf = NULL;
5167     char *host_data;
5168 
5169     arg_type++;
5170     target_size = thunk_type_size(arg_type, 0);
5171     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5172     if (!argptr) {
5173         ret = -TARGET_EFAULT;
5174         goto out;
5175     }
5176     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5177     unlock_user(argptr, arg, 0);
5178 
5179     /* buf_temp is too small, so fetch things into a bigger buffer */
5180     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5181     memcpy(big_buf, buf_temp, target_size);
5182     buf_temp = big_buf;
5183     host_dm = big_buf;
5184 
5185     guest_data = arg + host_dm->data_start;
5186     if ((guest_data - arg) < 0) {
5187         ret = -TARGET_EINVAL;
5188         goto out;
5189     }
5190     guest_data_size = host_dm->data_size - host_dm->data_start;
5191     host_data = (char*)host_dm + host_dm->data_start;
5192 
5193     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5194     if (!argptr) {
5195         ret = -TARGET_EFAULT;
5196         goto out;
5197     }
5198 
5199     switch (ie->host_cmd) {
5200     case DM_REMOVE_ALL:
5201     case DM_LIST_DEVICES:
5202     case DM_DEV_CREATE:
5203     case DM_DEV_REMOVE:
5204     case DM_DEV_SUSPEND:
5205     case DM_DEV_STATUS:
5206     case DM_DEV_WAIT:
5207     case DM_TABLE_STATUS:
5208     case DM_TABLE_CLEAR:
5209     case DM_TABLE_DEPS:
5210     case DM_LIST_VERSIONS:
5211         /* no input data */
5212         break;
5213     case DM_DEV_RENAME:
5214     case DM_DEV_SET_GEOMETRY:
5215         /* data contains only strings */
5216         memcpy(host_data, argptr, guest_data_size);
5217         break;
5218     case DM_TARGET_MSG:
5219         memcpy(host_data, argptr, guest_data_size);
5220         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5221         break;
5222     case DM_TABLE_LOAD:
5223     {
5224         void *gspec = argptr;
5225         void *cur_data = host_data;
5226         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5227         int spec_size = thunk_type_size(arg_type, 0);
5228         int i;
5229 
5230         for (i = 0; i < host_dm->target_count; i++) {
5231             struct dm_target_spec *spec = cur_data;
5232             uint32_t next;
5233             int slen;
5234 
5235             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5236             slen = strlen((char*)gspec + spec_size) + 1;
5237             next = spec->next;
5238             spec->next = sizeof(*spec) + slen;
5239             strcpy((char*)&spec[1], gspec + spec_size);
5240             gspec += next;
5241             cur_data += spec->next;
5242         }
5243         break;
5244     }
5245     default:
5246         ret = -TARGET_EINVAL;
5247         unlock_user(argptr, guest_data, 0);
5248         goto out;
5249     }
5250     unlock_user(argptr, guest_data, 0);
5251 
5252     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5253     if (!is_error(ret)) {
5254         guest_data = arg + host_dm->data_start;
5255         guest_data_size = host_dm->data_size - host_dm->data_start;
5256         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5257         switch (ie->host_cmd) {
5258         case DM_REMOVE_ALL:
5259         case DM_DEV_CREATE:
5260         case DM_DEV_REMOVE:
5261         case DM_DEV_RENAME:
5262         case DM_DEV_SUSPEND:
5263         case DM_DEV_STATUS:
5264         case DM_TABLE_LOAD:
5265         case DM_TABLE_CLEAR:
5266         case DM_TARGET_MSG:
5267         case DM_DEV_SET_GEOMETRY:
5268             /* no return data */
5269             break;
5270         case DM_LIST_DEVICES:
5271         {
5272             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5273             uint32_t remaining_data = guest_data_size;
5274             void *cur_data = argptr;
5275             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5276             int nl_size = 12; /* can't use thunk_size due to alignment */
5277 
5278             while (1) {
5279                 uint32_t next = nl->next;
5280                 if (next) {
5281                     nl->next = nl_size + (strlen(nl->name) + 1);
5282                 }
5283                 if (remaining_data < nl->next) {
5284                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5285                     break;
5286                 }
5287                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5288                 strcpy(cur_data + nl_size, nl->name);
5289                 cur_data += nl->next;
5290                 remaining_data -= nl->next;
5291                 if (!next) {
5292                     break;
5293                 }
5294                 nl = (void*)nl + next;
5295             }
5296             break;
5297         }
5298         case DM_DEV_WAIT:
5299         case DM_TABLE_STATUS:
5300         {
5301             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5302             void *cur_data = argptr;
5303             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5304             int spec_size = thunk_type_size(arg_type, 0);
5305             int i;
5306 
5307             for (i = 0; i < host_dm->target_count; i++) {
5308                 uint32_t next = spec->next;
5309                 int slen = strlen((char*)&spec[1]) + 1;
5310                 spec->next = (cur_data - argptr) + spec_size + slen;
5311                 if (guest_data_size < spec->next) {
5312                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5313                     break;
5314                 }
5315                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5316                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5317                 cur_data = argptr + spec->next;
5318                 spec = (void*)host_dm + host_dm->data_start + next;
5319             }
5320             break;
5321         }
5322         case DM_TABLE_DEPS:
5323         {
5324             void *hdata = (void*)host_dm + host_dm->data_start;
5325             int count = *(uint32_t*)hdata;
5326             uint64_t *hdev = hdata + 8;
5327             uint64_t *gdev = argptr + 8;
5328             int i;
5329 
5330             *(uint32_t*)argptr = tswap32(count);
5331             for (i = 0; i < count; i++) {
5332                 *gdev = tswap64(*hdev);
5333                 gdev++;
5334                 hdev++;
5335             }
5336             break;
5337         }
5338         case DM_LIST_VERSIONS:
5339         {
5340             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5341             uint32_t remaining_data = guest_data_size;
5342             void *cur_data = argptr;
5343             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5344             int vers_size = thunk_type_size(arg_type, 0);
5345 
5346             while (1) {
5347                 uint32_t next = vers->next;
5348                 if (next) {
5349                     vers->next = vers_size + (strlen(vers->name) + 1);
5350                 }
5351                 if (remaining_data < vers->next) {
5352                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5353                     break;
5354                 }
5355                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5356                 strcpy(cur_data + vers_size, vers->name);
5357                 cur_data += vers->next;
5358                 remaining_data -= vers->next;
5359                 if (!next) {
5360                     break;
5361                 }
5362                 vers = (void*)vers + next;
5363             }
5364             break;
5365         }
5366         default:
5367             unlock_user(argptr, guest_data, 0);
5368             ret = -TARGET_EINVAL;
5369             goto out;
5370         }
5371         unlock_user(argptr, guest_data, guest_data_size);
5372 
5373         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5374         if (!argptr) {
5375             ret = -TARGET_EFAULT;
5376             goto out;
5377         }
5378         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5379         unlock_user(argptr, arg, target_size);
5380     }
5381 out:
5382     g_free(big_buf);
5383     return ret;
5384 }
5385 
5386 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5387                                int cmd, abi_long arg)
5388 {
5389     void *argptr;
5390     int target_size;
5391     const argtype *arg_type = ie->arg_type;
5392     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5393     abi_long ret;
5394 
5395     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5396     struct blkpg_partition host_part;
5397 
5398     /* Read and convert blkpg */
5399     arg_type++;
5400     target_size = thunk_type_size(arg_type, 0);
5401     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5402     if (!argptr) {
5403         ret = -TARGET_EFAULT;
5404         goto out;
5405     }
5406     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5407     unlock_user(argptr, arg, 0);
5408 
5409     switch (host_blkpg->op) {
5410     case BLKPG_ADD_PARTITION:
5411     case BLKPG_DEL_PARTITION:
5412         /* payload is struct blkpg_partition */
5413         break;
5414     default:
5415         /* Unknown opcode */
5416         ret = -TARGET_EINVAL;
5417         goto out;
5418     }
5419 
5420     /* Read and convert blkpg->data */
5421     arg = (abi_long)(uintptr_t)host_blkpg->data;
5422     target_size = thunk_type_size(part_arg_type, 0);
5423     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5424     if (!argptr) {
5425         ret = -TARGET_EFAULT;
5426         goto out;
5427     }
5428     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5429     unlock_user(argptr, arg, 0);
5430 
5431     /* Swizzle the data pointer to our local copy and call! */
5432     host_blkpg->data = &host_part;
5433     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5434 
5435 out:
5436     return ret;
5437 }
5438 
5439 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5440                                 int fd, int cmd, abi_long arg)
5441 {
5442     const argtype *arg_type = ie->arg_type;
5443     const StructEntry *se;
5444     const argtype *field_types;
5445     const int *dst_offsets, *src_offsets;
5446     int target_size;
5447     void *argptr;
5448     abi_ulong *target_rt_dev_ptr = NULL;
5449     unsigned long *host_rt_dev_ptr = NULL;
5450     abi_long ret;
5451     int i;
5452 
5453     assert(ie->access == IOC_W);
5454     assert(*arg_type == TYPE_PTR);
5455     arg_type++;
5456     assert(*arg_type == TYPE_STRUCT);
5457     target_size = thunk_type_size(arg_type, 0);
5458     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5459     if (!argptr) {
5460         return -TARGET_EFAULT;
5461     }
5462     arg_type++;
5463     assert(*arg_type == (int)STRUCT_rtentry);
5464     se = struct_entries + *arg_type++;
5465     assert(se->convert[0] == NULL);
5466     /* convert struct here to be able to catch rt_dev string */
5467     field_types = se->field_types;
5468     dst_offsets = se->field_offsets[THUNK_HOST];
5469     src_offsets = se->field_offsets[THUNK_TARGET];
5470     for (i = 0; i < se->nb_fields; i++) {
5471         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5472             assert(*field_types == TYPE_PTRVOID);
5473             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5474             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5475             if (*target_rt_dev_ptr != 0) {
5476                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5477                                                   tswapal(*target_rt_dev_ptr));
5478                 if (!*host_rt_dev_ptr) {
5479                     unlock_user(argptr, arg, 0);
5480                     return -TARGET_EFAULT;
5481                 }
5482             } else {
5483                 *host_rt_dev_ptr = 0;
5484             }
5485             field_types++;
5486             continue;
5487         }
5488         field_types = thunk_convert(buf_temp + dst_offsets[i],
5489                                     argptr + src_offsets[i],
5490                                     field_types, THUNK_HOST);
5491     }
5492     unlock_user(argptr, arg, 0);
5493 
5494     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5495 
5496     assert(host_rt_dev_ptr != NULL);
5497     assert(target_rt_dev_ptr != NULL);
5498     if (*host_rt_dev_ptr != 0) {
5499         unlock_user((void *)*host_rt_dev_ptr,
5500                     *target_rt_dev_ptr, 0);
5501     }
5502     return ret;
5503 }
5504 
5505 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5506                                      int fd, int cmd, abi_long arg)
5507 {
5508     int sig = target_to_host_signal(arg);
5509     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5510 }
5511 
5512 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5513                                     int fd, int cmd, abi_long arg)
5514 {
5515     struct timeval tv;
5516     abi_long ret;
5517 
5518     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5519     if (is_error(ret)) {
5520         return ret;
5521     }
5522 
5523     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5524         if (copy_to_user_timeval(arg, &tv)) {
5525             return -TARGET_EFAULT;
5526         }
5527     } else {
5528         if (copy_to_user_timeval64(arg, &tv)) {
5529             return -TARGET_EFAULT;
5530         }
5531     }
5532 
5533     return ret;
5534 }
5535 
5536 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5537                                       int fd, int cmd, abi_long arg)
5538 {
5539     struct timespec ts;
5540     abi_long ret;
5541 
5542     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5543     if (is_error(ret)) {
5544         return ret;
5545     }
5546 
5547     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5548         if (host_to_target_timespec(arg, &ts)) {
5549             return -TARGET_EFAULT;
5550         }
5551     } else{
5552         if (host_to_target_timespec64(arg, &ts)) {
5553             return -TARGET_EFAULT;
5554         }
5555     }
5556 
5557     return ret;
5558 }
5559 
5560 #ifdef TIOCGPTPEER
5561 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5562                                      int fd, int cmd, abi_long arg)
5563 {
5564     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5565     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5566 }
5567 #endif
5568 
5569 #ifdef HAVE_DRM_H
5570 
5571 static void unlock_drm_version(struct drm_version *host_ver,
5572                                struct target_drm_version *target_ver,
5573                                bool copy)
5574 {
5575     unlock_user(host_ver->name, target_ver->name,
5576                                 copy ? host_ver->name_len : 0);
5577     unlock_user(host_ver->date, target_ver->date,
5578                                 copy ? host_ver->date_len : 0);
5579     unlock_user(host_ver->desc, target_ver->desc,
5580                                 copy ? host_ver->desc_len : 0);
5581 }
5582 
5583 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5584                                           struct target_drm_version *target_ver)
5585 {
5586     memset(host_ver, 0, sizeof(*host_ver));
5587 
5588     __get_user(host_ver->name_len, &target_ver->name_len);
5589     if (host_ver->name_len) {
5590         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5591                                    target_ver->name_len, 0);
5592         if (!host_ver->name) {
5593             return -EFAULT;
5594         }
5595     }
5596 
5597     __get_user(host_ver->date_len, &target_ver->date_len);
5598     if (host_ver->date_len) {
5599         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5600                                    target_ver->date_len, 0);
5601         if (!host_ver->date) {
5602             goto err;
5603         }
5604     }
5605 
5606     __get_user(host_ver->desc_len, &target_ver->desc_len);
5607     if (host_ver->desc_len) {
5608         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5609                                    target_ver->desc_len, 0);
5610         if (!host_ver->desc) {
5611             goto err;
5612         }
5613     }
5614 
5615     return 0;
5616 err:
5617     unlock_drm_version(host_ver, target_ver, false);
5618     return -EFAULT;
5619 }
5620 
5621 static inline void host_to_target_drmversion(
5622                                           struct target_drm_version *target_ver,
5623                                           struct drm_version *host_ver)
5624 {
5625     __put_user(host_ver->version_major, &target_ver->version_major);
5626     __put_user(host_ver->version_minor, &target_ver->version_minor);
5627     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5628     __put_user(host_ver->name_len, &target_ver->name_len);
5629     __put_user(host_ver->date_len, &target_ver->date_len);
5630     __put_user(host_ver->desc_len, &target_ver->desc_len);
5631     unlock_drm_version(host_ver, target_ver, true);
5632 }
5633 
5634 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5635                              int fd, int cmd, abi_long arg)
5636 {
5637     struct drm_version *ver;
5638     struct target_drm_version *target_ver;
5639     abi_long ret;
5640 
5641     switch (ie->host_cmd) {
5642     case DRM_IOCTL_VERSION:
5643         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5644             return -TARGET_EFAULT;
5645         }
5646         ver = (struct drm_version *)buf_temp;
5647         ret = target_to_host_drmversion(ver, target_ver);
5648         if (!is_error(ret)) {
5649             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5650             if (is_error(ret)) {
5651                 unlock_drm_version(ver, target_ver, false);
5652             } else {
5653                 host_to_target_drmversion(target_ver, ver);
5654             }
5655         }
5656         unlock_user_struct(target_ver, arg, 0);
5657         return ret;
5658     }
5659     return -TARGET_ENOSYS;
5660 }
5661 
5662 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5663                                            struct drm_i915_getparam *gparam,
5664                                            int fd, abi_long arg)
5665 {
5666     abi_long ret;
5667     int value;
5668     struct target_drm_i915_getparam *target_gparam;
5669 
5670     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5671         return -TARGET_EFAULT;
5672     }
5673 
5674     __get_user(gparam->param, &target_gparam->param);
5675     gparam->value = &value;
5676     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5677     put_user_s32(value, target_gparam->value);
5678 
5679     unlock_user_struct(target_gparam, arg, 0);
5680     return ret;
5681 }
5682 
5683 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5684                                   int fd, int cmd, abi_long arg)
5685 {
5686     switch (ie->host_cmd) {
5687     case DRM_IOCTL_I915_GETPARAM:
5688         return do_ioctl_drm_i915_getparam(ie,
5689                                           (struct drm_i915_getparam *)buf_temp,
5690                                           fd, arg);
5691     default:
5692         return -TARGET_ENOSYS;
5693     }
5694 }
5695 
5696 #endif
5697 
5698 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5699                                         int fd, int cmd, abi_long arg)
5700 {
5701     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5702     struct tun_filter *target_filter;
5703     char *target_addr;
5704 
5705     assert(ie->access == IOC_W);
5706 
5707     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5708     if (!target_filter) {
5709         return -TARGET_EFAULT;
5710     }
5711     filter->flags = tswap16(target_filter->flags);
5712     filter->count = tswap16(target_filter->count);
5713     unlock_user(target_filter, arg, 0);
5714 
5715     if (filter->count) {
5716         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5717             MAX_STRUCT_SIZE) {
5718             return -TARGET_EFAULT;
5719         }
5720 
5721         target_addr = lock_user(VERIFY_READ,
5722                                 arg + offsetof(struct tun_filter, addr),
5723                                 filter->count * ETH_ALEN, 1);
5724         if (!target_addr) {
5725             return -TARGET_EFAULT;
5726         }
5727         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5728         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5729     }
5730 
5731     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5732 }
5733 
5734 IOCTLEntry ioctl_entries[] = {
5735 #define IOCTL(cmd, access, ...) \
5736     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5737 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5738     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5739 #define IOCTL_IGNORE(cmd) \
5740     { TARGET_ ## cmd, 0, #cmd },
5741 #include "ioctls.h"
5742     { 0, 0, },
5743 };
5744 
5745 /* ??? Implement proper locking for ioctls.  */
5746 /* do_ioctl() Must return target values and target errnos. */
5747 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5748 {
5749     const IOCTLEntry *ie;
5750     const argtype *arg_type;
5751     abi_long ret;
5752     uint8_t buf_temp[MAX_STRUCT_SIZE];
5753     int target_size;
5754     void *argptr;
5755 
5756     ie = ioctl_entries;
5757     for(;;) {
5758         if (ie->target_cmd == 0) {
5759             qemu_log_mask(
5760                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5761             return -TARGET_ENOSYS;
5762         }
5763         if (ie->target_cmd == cmd)
5764             break;
5765         ie++;
5766     }
5767     arg_type = ie->arg_type;
5768     if (ie->do_ioctl) {
5769         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5770     } else if (!ie->host_cmd) {
5771         /* Some architectures define BSD ioctls in their headers
5772            that are not implemented in Linux.  */
5773         return -TARGET_ENOSYS;
5774     }
5775 
5776     switch(arg_type[0]) {
5777     case TYPE_NULL:
5778         /* no argument */
5779         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5780         break;
5781     case TYPE_PTRVOID:
5782     case TYPE_INT:
5783     case TYPE_LONG:
5784     case TYPE_ULONG:
5785         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5786         break;
5787     case TYPE_PTR:
5788         arg_type++;
5789         target_size = thunk_type_size(arg_type, 0);
5790         switch(ie->access) {
5791         case IOC_R:
5792             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5793             if (!is_error(ret)) {
5794                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5795                 if (!argptr)
5796                     return -TARGET_EFAULT;
5797                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5798                 unlock_user(argptr, arg, target_size);
5799             }
5800             break;
5801         case IOC_W:
5802             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5803             if (!argptr)
5804                 return -TARGET_EFAULT;
5805             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5806             unlock_user(argptr, arg, 0);
5807             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5808             break;
5809         default:
5810         case IOC_RW:
5811             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5812             if (!argptr)
5813                 return -TARGET_EFAULT;
5814             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5815             unlock_user(argptr, arg, 0);
5816             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5817             if (!is_error(ret)) {
5818                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5819                 if (!argptr)
5820                     return -TARGET_EFAULT;
5821                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5822                 unlock_user(argptr, arg, target_size);
5823             }
5824             break;
5825         }
5826         break;
5827     default:
5828         qemu_log_mask(LOG_UNIMP,
5829                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5830                       (long)cmd, arg_type[0]);
5831         ret = -TARGET_ENOSYS;
5832         break;
5833     }
5834     return ret;
5835 }
5836 
5837 static const bitmask_transtbl iflag_tbl[] = {
5838         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5839         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5840         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5841         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5842         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5843         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5844         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5845         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5846         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5847         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5848         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5849         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5850         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5851         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5852         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5853         { 0, 0, 0, 0 }
5854 };
5855 
5856 static const bitmask_transtbl oflag_tbl[] = {
5857 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5858 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5859 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5860 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5861 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5862 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5863 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5864 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5865 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5866 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5867 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5868 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5869 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5870 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5871 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5872 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5873 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5874 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5875 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5876 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5877 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5878 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5879 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5880 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5881 	{ 0, 0, 0, 0 }
5882 };
5883 
5884 static const bitmask_transtbl cflag_tbl[] = {
5885 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5886 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5887 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5888 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5889 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5890 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5891 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5892 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5893 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5894 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5895 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5896 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5897 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5898 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5899 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5900 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5901 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5902 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5903 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5904 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5905 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5906 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5907 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5908 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5909 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5910 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5911 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5912 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5913 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5914 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5915 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5916 	{ 0, 0, 0, 0 }
5917 };
5918 
5919 static const bitmask_transtbl lflag_tbl[] = {
5920   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5921   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5922   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5923   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5924   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5925   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5926   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5927   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5928   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5929   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5930   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5931   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5932   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5933   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5934   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5935   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5936   { 0, 0, 0, 0 }
5937 };
5938 
5939 static void target_to_host_termios (void *dst, const void *src)
5940 {
5941     struct host_termios *host = dst;
5942     const struct target_termios *target = src;
5943 
5944     host->c_iflag =
5945         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5946     host->c_oflag =
5947         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5948     host->c_cflag =
5949         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5950     host->c_lflag =
5951         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5952     host->c_line = target->c_line;
5953 
5954     memset(host->c_cc, 0, sizeof(host->c_cc));
5955     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5956     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5957     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5958     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5959     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5960     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5961     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5962     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5963     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5964     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5965     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5966     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5967     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5968     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5969     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5970     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5971     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5972 }
5973 
5974 static void host_to_target_termios (void *dst, const void *src)
5975 {
5976     struct target_termios *target = dst;
5977     const struct host_termios *host = src;
5978 
5979     target->c_iflag =
5980         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5981     target->c_oflag =
5982         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5983     target->c_cflag =
5984         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5985     target->c_lflag =
5986         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5987     target->c_line = host->c_line;
5988 
5989     memset(target->c_cc, 0, sizeof(target->c_cc));
5990     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5991     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5992     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5993     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5994     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5995     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5996     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5997     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5998     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5999     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6000     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6001     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6002     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6003     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6004     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6005     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6006     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6007 }
6008 
6009 static const StructEntry struct_termios_def = {
6010     .convert = { host_to_target_termios, target_to_host_termios },
6011     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6012     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6013     .print = print_termios,
6014 };
6015 
6016 static const bitmask_transtbl mmap_flags_tbl[] = {
6017     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6018     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6019     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6020     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6021       MAP_ANONYMOUS, MAP_ANONYMOUS },
6022     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6023       MAP_GROWSDOWN, MAP_GROWSDOWN },
6024     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6025       MAP_DENYWRITE, MAP_DENYWRITE },
6026     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6027       MAP_EXECUTABLE, MAP_EXECUTABLE },
6028     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6029     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6030       MAP_NORESERVE, MAP_NORESERVE },
6031     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6032     /* MAP_STACK had been ignored by the kernel for quite some time.
6033        Recognize it for the target insofar as we do not want to pass
6034        it through to the host.  */
6035     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6036     { 0, 0, 0, 0 }
6037 };
6038 
6039 /*
6040  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6041  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6042  */
6043 #if defined(TARGET_I386)
6044 
6045 /* NOTE: there is really one LDT for all the threads */
6046 static uint8_t *ldt_table;
6047 
6048 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6049 {
6050     int size;
6051     void *p;
6052 
6053     if (!ldt_table)
6054         return 0;
6055     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6056     if (size > bytecount)
6057         size = bytecount;
6058     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6059     if (!p)
6060         return -TARGET_EFAULT;
6061     /* ??? Should this by byteswapped?  */
6062     memcpy(p, ldt_table, size);
6063     unlock_user(p, ptr, size);
6064     return size;
6065 }
6066 
6067 /* XXX: add locking support */
6068 static abi_long write_ldt(CPUX86State *env,
6069                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6070 {
6071     struct target_modify_ldt_ldt_s ldt_info;
6072     struct target_modify_ldt_ldt_s *target_ldt_info;
6073     int seg_32bit, contents, read_exec_only, limit_in_pages;
6074     int seg_not_present, useable, lm;
6075     uint32_t *lp, entry_1, entry_2;
6076 
6077     if (bytecount != sizeof(ldt_info))
6078         return -TARGET_EINVAL;
6079     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6080         return -TARGET_EFAULT;
6081     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6082     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6083     ldt_info.limit = tswap32(target_ldt_info->limit);
6084     ldt_info.flags = tswap32(target_ldt_info->flags);
6085     unlock_user_struct(target_ldt_info, ptr, 0);
6086 
6087     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6088         return -TARGET_EINVAL;
6089     seg_32bit = ldt_info.flags & 1;
6090     contents = (ldt_info.flags >> 1) & 3;
6091     read_exec_only = (ldt_info.flags >> 3) & 1;
6092     limit_in_pages = (ldt_info.flags >> 4) & 1;
6093     seg_not_present = (ldt_info.flags >> 5) & 1;
6094     useable = (ldt_info.flags >> 6) & 1;
6095 #ifdef TARGET_ABI32
6096     lm = 0;
6097 #else
6098     lm = (ldt_info.flags >> 7) & 1;
6099 #endif
6100     if (contents == 3) {
6101         if (oldmode)
6102             return -TARGET_EINVAL;
6103         if (seg_not_present == 0)
6104             return -TARGET_EINVAL;
6105     }
6106     /* allocate the LDT */
6107     if (!ldt_table) {
6108         env->ldt.base = target_mmap(0,
6109                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6110                                     PROT_READ|PROT_WRITE,
6111                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6112         if (env->ldt.base == -1)
6113             return -TARGET_ENOMEM;
6114         memset(g2h_untagged(env->ldt.base), 0,
6115                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6116         env->ldt.limit = 0xffff;
6117         ldt_table = g2h_untagged(env->ldt.base);
6118     }
6119 
6120     /* NOTE: same code as Linux kernel */
6121     /* Allow LDTs to be cleared by the user. */
6122     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6123         if (oldmode ||
6124             (contents == 0		&&
6125              read_exec_only == 1	&&
6126              seg_32bit == 0		&&
6127              limit_in_pages == 0	&&
6128              seg_not_present == 1	&&
6129              useable == 0 )) {
6130             entry_1 = 0;
6131             entry_2 = 0;
6132             goto install;
6133         }
6134     }
6135 
6136     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6137         (ldt_info.limit & 0x0ffff);
6138     entry_2 = (ldt_info.base_addr & 0xff000000) |
6139         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6140         (ldt_info.limit & 0xf0000) |
6141         ((read_exec_only ^ 1) << 9) |
6142         (contents << 10) |
6143         ((seg_not_present ^ 1) << 15) |
6144         (seg_32bit << 22) |
6145         (limit_in_pages << 23) |
6146         (lm << 21) |
6147         0x7000;
6148     if (!oldmode)
6149         entry_2 |= (useable << 20);
6150 
6151     /* Install the new entry ...  */
6152 install:
6153     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6154     lp[0] = tswap32(entry_1);
6155     lp[1] = tswap32(entry_2);
6156     return 0;
6157 }
6158 
6159 /* specific and weird i386 syscalls */
6160 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6161                               unsigned long bytecount)
6162 {
6163     abi_long ret;
6164 
6165     switch (func) {
6166     case 0:
6167         ret = read_ldt(ptr, bytecount);
6168         break;
6169     case 1:
6170         ret = write_ldt(env, ptr, bytecount, 1);
6171         break;
6172     case 0x11:
6173         ret = write_ldt(env, ptr, bytecount, 0);
6174         break;
6175     default:
6176         ret = -TARGET_ENOSYS;
6177         break;
6178     }
6179     return ret;
6180 }
6181 
6182 #if defined(TARGET_ABI32)
6183 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6184 {
6185     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6186     struct target_modify_ldt_ldt_s ldt_info;
6187     struct target_modify_ldt_ldt_s *target_ldt_info;
6188     int seg_32bit, contents, read_exec_only, limit_in_pages;
6189     int seg_not_present, useable, lm;
6190     uint32_t *lp, entry_1, entry_2;
6191     int i;
6192 
6193     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6194     if (!target_ldt_info)
6195         return -TARGET_EFAULT;
6196     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6197     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6198     ldt_info.limit = tswap32(target_ldt_info->limit);
6199     ldt_info.flags = tswap32(target_ldt_info->flags);
6200     if (ldt_info.entry_number == -1) {
6201         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6202             if (gdt_table[i] == 0) {
6203                 ldt_info.entry_number = i;
6204                 target_ldt_info->entry_number = tswap32(i);
6205                 break;
6206             }
6207         }
6208     }
6209     unlock_user_struct(target_ldt_info, ptr, 1);
6210 
6211     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6212         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6213            return -TARGET_EINVAL;
6214     seg_32bit = ldt_info.flags & 1;
6215     contents = (ldt_info.flags >> 1) & 3;
6216     read_exec_only = (ldt_info.flags >> 3) & 1;
6217     limit_in_pages = (ldt_info.flags >> 4) & 1;
6218     seg_not_present = (ldt_info.flags >> 5) & 1;
6219     useable = (ldt_info.flags >> 6) & 1;
6220 #ifdef TARGET_ABI32
6221     lm = 0;
6222 #else
6223     lm = (ldt_info.flags >> 7) & 1;
6224 #endif
6225 
6226     if (contents == 3) {
6227         if (seg_not_present == 0)
6228             return -TARGET_EINVAL;
6229     }
6230 
6231     /* NOTE: same code as Linux kernel */
6232     /* Allow LDTs to be cleared by the user. */
6233     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6234         if ((contents == 0             &&
6235              read_exec_only == 1       &&
6236              seg_32bit == 0            &&
6237              limit_in_pages == 0       &&
6238              seg_not_present == 1      &&
6239              useable == 0 )) {
6240             entry_1 = 0;
6241             entry_2 = 0;
6242             goto install;
6243         }
6244     }
6245 
6246     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6247         (ldt_info.limit & 0x0ffff);
6248     entry_2 = (ldt_info.base_addr & 0xff000000) |
6249         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6250         (ldt_info.limit & 0xf0000) |
6251         ((read_exec_only ^ 1) << 9) |
6252         (contents << 10) |
6253         ((seg_not_present ^ 1) << 15) |
6254         (seg_32bit << 22) |
6255         (limit_in_pages << 23) |
6256         (useable << 20) |
6257         (lm << 21) |
6258         0x7000;
6259 
6260     /* Install the new entry ...  */
6261 install:
6262     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6263     lp[0] = tswap32(entry_1);
6264     lp[1] = tswap32(entry_2);
6265     return 0;
6266 }
6267 
6268 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6269 {
6270     struct target_modify_ldt_ldt_s *target_ldt_info;
6271     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6272     uint32_t base_addr, limit, flags;
6273     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6274     int seg_not_present, useable, lm;
6275     uint32_t *lp, entry_1, entry_2;
6276 
6277     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6278     if (!target_ldt_info)
6279         return -TARGET_EFAULT;
6280     idx = tswap32(target_ldt_info->entry_number);
6281     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6282         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6283         unlock_user_struct(target_ldt_info, ptr, 1);
6284         return -TARGET_EINVAL;
6285     }
6286     lp = (uint32_t *)(gdt_table + idx);
6287     entry_1 = tswap32(lp[0]);
6288     entry_2 = tswap32(lp[1]);
6289 
6290     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6291     contents = (entry_2 >> 10) & 3;
6292     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6293     seg_32bit = (entry_2 >> 22) & 1;
6294     limit_in_pages = (entry_2 >> 23) & 1;
6295     useable = (entry_2 >> 20) & 1;
6296 #ifdef TARGET_ABI32
6297     lm = 0;
6298 #else
6299     lm = (entry_2 >> 21) & 1;
6300 #endif
6301     flags = (seg_32bit << 0) | (contents << 1) |
6302         (read_exec_only << 3) | (limit_in_pages << 4) |
6303         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6304     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6305     base_addr = (entry_1 >> 16) |
6306         (entry_2 & 0xff000000) |
6307         ((entry_2 & 0xff) << 16);
6308     target_ldt_info->base_addr = tswapal(base_addr);
6309     target_ldt_info->limit = tswap32(limit);
6310     target_ldt_info->flags = tswap32(flags);
6311     unlock_user_struct(target_ldt_info, ptr, 1);
6312     return 0;
6313 }
6314 
6315 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6316 {
6317     return -TARGET_ENOSYS;
6318 }
6319 #else
6320 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6321 {
6322     abi_long ret = 0;
6323     abi_ulong val;
6324     int idx;
6325 
6326     switch(code) {
6327     case TARGET_ARCH_SET_GS:
6328     case TARGET_ARCH_SET_FS:
6329         if (code == TARGET_ARCH_SET_GS)
6330             idx = R_GS;
6331         else
6332             idx = R_FS;
6333         cpu_x86_load_seg(env, idx, 0);
6334         env->segs[idx].base = addr;
6335         break;
6336     case TARGET_ARCH_GET_GS:
6337     case TARGET_ARCH_GET_FS:
6338         if (code == TARGET_ARCH_GET_GS)
6339             idx = R_GS;
6340         else
6341             idx = R_FS;
6342         val = env->segs[idx].base;
6343         if (put_user(val, addr, abi_ulong))
6344             ret = -TARGET_EFAULT;
6345         break;
6346     default:
6347         ret = -TARGET_EINVAL;
6348         break;
6349     }
6350     return ret;
6351 }
6352 #endif /* defined(TARGET_ABI32 */
6353 #endif /* defined(TARGET_I386) */
6354 
6355 /*
6356  * These constants are generic.  Supply any that are missing from the host.
6357  */
6358 #ifndef PR_SET_NAME
6359 # define PR_SET_NAME    15
6360 # define PR_GET_NAME    16
6361 #endif
6362 #ifndef PR_SET_FP_MODE
6363 # define PR_SET_FP_MODE 45
6364 # define PR_GET_FP_MODE 46
6365 # define PR_FP_MODE_FR   (1 << 0)
6366 # define PR_FP_MODE_FRE  (1 << 1)
6367 #endif
6368 #ifndef PR_SVE_SET_VL
6369 # define PR_SVE_SET_VL  50
6370 # define PR_SVE_GET_VL  51
6371 # define PR_SVE_VL_LEN_MASK  0xffff
6372 # define PR_SVE_VL_INHERIT   (1 << 17)
6373 #endif
6374 #ifndef PR_PAC_RESET_KEYS
6375 # define PR_PAC_RESET_KEYS  54
6376 # define PR_PAC_APIAKEY   (1 << 0)
6377 # define PR_PAC_APIBKEY   (1 << 1)
6378 # define PR_PAC_APDAKEY   (1 << 2)
6379 # define PR_PAC_APDBKEY   (1 << 3)
6380 # define PR_PAC_APGAKEY   (1 << 4)
6381 #endif
6382 #ifndef PR_SET_TAGGED_ADDR_CTRL
6383 # define PR_SET_TAGGED_ADDR_CTRL 55
6384 # define PR_GET_TAGGED_ADDR_CTRL 56
6385 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6386 #endif
6387 #ifndef PR_MTE_TCF_SHIFT
6388 # define PR_MTE_TCF_SHIFT       1
6389 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6390 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6391 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6392 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6393 # define PR_MTE_TAG_SHIFT       3
6394 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6395 #endif
6396 #ifndef PR_SET_IO_FLUSHER
6397 # define PR_SET_IO_FLUSHER 57
6398 # define PR_GET_IO_FLUSHER 58
6399 #endif
6400 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6401 # define PR_SET_SYSCALL_USER_DISPATCH 59
6402 #endif
6403 #ifndef PR_SME_SET_VL
6404 # define PR_SME_SET_VL  63
6405 # define PR_SME_GET_VL  64
6406 # define PR_SME_VL_LEN_MASK  0xffff
6407 # define PR_SME_VL_INHERIT   (1 << 17)
6408 #endif
6409 
6410 #include "target_prctl.h"
6411 
6412 static abi_long do_prctl_inval0(CPUArchState *env)
6413 {
6414     return -TARGET_EINVAL;
6415 }
6416 
6417 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6418 {
6419     return -TARGET_EINVAL;
6420 }
6421 
6422 #ifndef do_prctl_get_fp_mode
6423 #define do_prctl_get_fp_mode do_prctl_inval0
6424 #endif
6425 #ifndef do_prctl_set_fp_mode
6426 #define do_prctl_set_fp_mode do_prctl_inval1
6427 #endif
6428 #ifndef do_prctl_sve_get_vl
6429 #define do_prctl_sve_get_vl do_prctl_inval0
6430 #endif
6431 #ifndef do_prctl_sve_set_vl
6432 #define do_prctl_sve_set_vl do_prctl_inval1
6433 #endif
6434 #ifndef do_prctl_reset_keys
6435 #define do_prctl_reset_keys do_prctl_inval1
6436 #endif
6437 #ifndef do_prctl_set_tagged_addr_ctrl
6438 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6439 #endif
6440 #ifndef do_prctl_get_tagged_addr_ctrl
6441 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6442 #endif
6443 #ifndef do_prctl_get_unalign
6444 #define do_prctl_get_unalign do_prctl_inval1
6445 #endif
6446 #ifndef do_prctl_set_unalign
6447 #define do_prctl_set_unalign do_prctl_inval1
6448 #endif
6449 #ifndef do_prctl_sme_get_vl
6450 #define do_prctl_sme_get_vl do_prctl_inval0
6451 #endif
6452 #ifndef do_prctl_sme_set_vl
6453 #define do_prctl_sme_set_vl do_prctl_inval1
6454 #endif
6455 
6456 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6457                          abi_long arg3, abi_long arg4, abi_long arg5)
6458 {
6459     abi_long ret;
6460 
6461     switch (option) {
6462     case PR_GET_PDEATHSIG:
6463         {
6464             int deathsig;
6465             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6466                                   arg3, arg4, arg5));
6467             if (!is_error(ret) &&
6468                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6469                 return -TARGET_EFAULT;
6470             }
6471             return ret;
6472         }
6473     case PR_SET_PDEATHSIG:
6474         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6475                                arg3, arg4, arg5));
6476     case PR_GET_NAME:
6477         {
6478             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6479             if (!name) {
6480                 return -TARGET_EFAULT;
6481             }
6482             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6483                                   arg3, arg4, arg5));
6484             unlock_user(name, arg2, 16);
6485             return ret;
6486         }
6487     case PR_SET_NAME:
6488         {
6489             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6490             if (!name) {
6491                 return -TARGET_EFAULT;
6492             }
6493             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6494                                   arg3, arg4, arg5));
6495             unlock_user(name, arg2, 0);
6496             return ret;
6497         }
6498     case PR_GET_FP_MODE:
6499         return do_prctl_get_fp_mode(env);
6500     case PR_SET_FP_MODE:
6501         return do_prctl_set_fp_mode(env, arg2);
6502     case PR_SVE_GET_VL:
6503         return do_prctl_sve_get_vl(env);
6504     case PR_SVE_SET_VL:
6505         return do_prctl_sve_set_vl(env, arg2);
6506     case PR_SME_GET_VL:
6507         return do_prctl_sme_get_vl(env);
6508     case PR_SME_SET_VL:
6509         return do_prctl_sme_set_vl(env, arg2);
6510     case PR_PAC_RESET_KEYS:
6511         if (arg3 || arg4 || arg5) {
6512             return -TARGET_EINVAL;
6513         }
6514         return do_prctl_reset_keys(env, arg2);
6515     case PR_SET_TAGGED_ADDR_CTRL:
6516         if (arg3 || arg4 || arg5) {
6517             return -TARGET_EINVAL;
6518         }
6519         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6520     case PR_GET_TAGGED_ADDR_CTRL:
6521         if (arg2 || arg3 || arg4 || arg5) {
6522             return -TARGET_EINVAL;
6523         }
6524         return do_prctl_get_tagged_addr_ctrl(env);
6525 
6526     case PR_GET_UNALIGN:
6527         return do_prctl_get_unalign(env, arg2);
6528     case PR_SET_UNALIGN:
6529         return do_prctl_set_unalign(env, arg2);
6530 
6531     case PR_CAP_AMBIENT:
6532     case PR_CAPBSET_READ:
6533     case PR_CAPBSET_DROP:
6534     case PR_GET_DUMPABLE:
6535     case PR_SET_DUMPABLE:
6536     case PR_GET_KEEPCAPS:
6537     case PR_SET_KEEPCAPS:
6538     case PR_GET_SECUREBITS:
6539     case PR_SET_SECUREBITS:
6540     case PR_GET_TIMING:
6541     case PR_SET_TIMING:
6542     case PR_GET_TIMERSLACK:
6543     case PR_SET_TIMERSLACK:
6544     case PR_MCE_KILL:
6545     case PR_MCE_KILL_GET:
6546     case PR_GET_NO_NEW_PRIVS:
6547     case PR_SET_NO_NEW_PRIVS:
6548     case PR_GET_IO_FLUSHER:
6549     case PR_SET_IO_FLUSHER:
6550         /* Some prctl options have no pointer arguments and we can pass on. */
6551         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6552 
6553     case PR_GET_CHILD_SUBREAPER:
6554     case PR_SET_CHILD_SUBREAPER:
6555     case PR_GET_SPECULATION_CTRL:
6556     case PR_SET_SPECULATION_CTRL:
6557     case PR_GET_TID_ADDRESS:
6558         /* TODO */
6559         return -TARGET_EINVAL;
6560 
6561     case PR_GET_FPEXC:
6562     case PR_SET_FPEXC:
6563         /* Was used for SPE on PowerPC. */
6564         return -TARGET_EINVAL;
6565 
6566     case PR_GET_ENDIAN:
6567     case PR_SET_ENDIAN:
6568     case PR_GET_FPEMU:
6569     case PR_SET_FPEMU:
6570     case PR_SET_MM:
6571     case PR_GET_SECCOMP:
6572     case PR_SET_SECCOMP:
6573     case PR_SET_SYSCALL_USER_DISPATCH:
6574     case PR_GET_THP_DISABLE:
6575     case PR_SET_THP_DISABLE:
6576     case PR_GET_TSC:
6577     case PR_SET_TSC:
6578         /* Disable to prevent the target disabling stuff we need. */
6579         return -TARGET_EINVAL;
6580 
6581     default:
6582         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6583                       option);
6584         return -TARGET_EINVAL;
6585     }
6586 }
6587 
6588 #define NEW_STACK_SIZE 0x40000
6589 
6590 
6591 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6592 typedef struct {
6593     CPUArchState *env;
6594     pthread_mutex_t mutex;
6595     pthread_cond_t cond;
6596     pthread_t thread;
6597     uint32_t tid;
6598     abi_ulong child_tidptr;
6599     abi_ulong parent_tidptr;
6600     sigset_t sigmask;
6601 } new_thread_info;
6602 
6603 static void *clone_func(void *arg)
6604 {
6605     new_thread_info *info = arg;
6606     CPUArchState *env;
6607     CPUState *cpu;
6608     TaskState *ts;
6609 
6610     rcu_register_thread();
6611     tcg_register_thread();
6612     env = info->env;
6613     cpu = env_cpu(env);
6614     thread_cpu = cpu;
6615     ts = (TaskState *)cpu->opaque;
6616     info->tid = sys_gettid();
6617     task_settid(ts);
6618     if (info->child_tidptr)
6619         put_user_u32(info->tid, info->child_tidptr);
6620     if (info->parent_tidptr)
6621         put_user_u32(info->tid, info->parent_tidptr);
6622     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6623     /* Enable signals.  */
6624     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6625     /* Signal to the parent that we're ready.  */
6626     pthread_mutex_lock(&info->mutex);
6627     pthread_cond_broadcast(&info->cond);
6628     pthread_mutex_unlock(&info->mutex);
6629     /* Wait until the parent has finished initializing the tls state.  */
6630     pthread_mutex_lock(&clone_lock);
6631     pthread_mutex_unlock(&clone_lock);
6632     cpu_loop(env);
6633     /* never exits */
6634     return NULL;
6635 }
6636 
6637 /* do_fork() Must return host values and target errnos (unlike most
6638    do_*() functions). */
6639 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6640                    abi_ulong parent_tidptr, target_ulong newtls,
6641                    abi_ulong child_tidptr)
6642 {
6643     CPUState *cpu = env_cpu(env);
6644     int ret;
6645     TaskState *ts;
6646     CPUState *new_cpu;
6647     CPUArchState *new_env;
6648     sigset_t sigmask;
6649 
6650     flags &= ~CLONE_IGNORED_FLAGS;
6651 
6652     /* Emulate vfork() with fork() */
6653     if (flags & CLONE_VFORK)
6654         flags &= ~(CLONE_VFORK | CLONE_VM);
6655 
6656     if (flags & CLONE_VM) {
6657         TaskState *parent_ts = (TaskState *)cpu->opaque;
6658         new_thread_info info;
6659         pthread_attr_t attr;
6660 
6661         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6662             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6663             return -TARGET_EINVAL;
6664         }
6665 
6666         ts = g_new0(TaskState, 1);
6667         init_task_state(ts);
6668 
6669         /* Grab a mutex so that thread setup appears atomic.  */
6670         pthread_mutex_lock(&clone_lock);
6671 
6672         /*
6673          * If this is our first additional thread, we need to ensure we
6674          * generate code for parallel execution and flush old translations.
6675          * Do this now so that the copy gets CF_PARALLEL too.
6676          */
6677         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6678             cpu->tcg_cflags |= CF_PARALLEL;
6679             tb_flush(cpu);
6680         }
6681 
6682         /* we create a new CPU instance. */
6683         new_env = cpu_copy(env);
6684         /* Init regs that differ from the parent.  */
6685         cpu_clone_regs_child(new_env, newsp, flags);
6686         cpu_clone_regs_parent(env, flags);
6687         new_cpu = env_cpu(new_env);
6688         new_cpu->opaque = ts;
6689         ts->bprm = parent_ts->bprm;
6690         ts->info = parent_ts->info;
6691         ts->signal_mask = parent_ts->signal_mask;
6692 
6693         if (flags & CLONE_CHILD_CLEARTID) {
6694             ts->child_tidptr = child_tidptr;
6695         }
6696 
6697         if (flags & CLONE_SETTLS) {
6698             cpu_set_tls (new_env, newtls);
6699         }
6700 
6701         memset(&info, 0, sizeof(info));
6702         pthread_mutex_init(&info.mutex, NULL);
6703         pthread_mutex_lock(&info.mutex);
6704         pthread_cond_init(&info.cond, NULL);
6705         info.env = new_env;
6706         if (flags & CLONE_CHILD_SETTID) {
6707             info.child_tidptr = child_tidptr;
6708         }
6709         if (flags & CLONE_PARENT_SETTID) {
6710             info.parent_tidptr = parent_tidptr;
6711         }
6712 
6713         ret = pthread_attr_init(&attr);
6714         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6715         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6716         /* It is not safe to deliver signals until the child has finished
6717            initializing, so temporarily block all signals.  */
6718         sigfillset(&sigmask);
6719         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6720         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6721 
6722         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6723         /* TODO: Free new CPU state if thread creation failed.  */
6724 
6725         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6726         pthread_attr_destroy(&attr);
6727         if (ret == 0) {
6728             /* Wait for the child to initialize.  */
6729             pthread_cond_wait(&info.cond, &info.mutex);
6730             ret = info.tid;
6731         } else {
6732             ret = -1;
6733         }
6734         pthread_mutex_unlock(&info.mutex);
6735         pthread_cond_destroy(&info.cond);
6736         pthread_mutex_destroy(&info.mutex);
6737         pthread_mutex_unlock(&clone_lock);
6738     } else {
6739         /* if no CLONE_VM, we consider it is a fork */
6740         if (flags & CLONE_INVALID_FORK_FLAGS) {
6741             return -TARGET_EINVAL;
6742         }
6743 
6744         /* We can't support custom termination signals */
6745         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6746             return -TARGET_EINVAL;
6747         }
6748 
6749         if (block_signals()) {
6750             return -QEMU_ERESTARTSYS;
6751         }
6752 
6753         fork_start();
6754         ret = fork();
6755         if (ret == 0) {
6756             /* Child Process.  */
6757             cpu_clone_regs_child(env, newsp, flags);
6758             fork_end(1);
6759             /* There is a race condition here.  The parent process could
6760                theoretically read the TID in the child process before the child
6761                tid is set.  This would require using either ptrace
6762                (not implemented) or having *_tidptr to point at a shared memory
6763                mapping.  We can't repeat the spinlock hack used above because
6764                the child process gets its own copy of the lock.  */
6765             if (flags & CLONE_CHILD_SETTID)
6766                 put_user_u32(sys_gettid(), child_tidptr);
6767             if (flags & CLONE_PARENT_SETTID)
6768                 put_user_u32(sys_gettid(), parent_tidptr);
6769             ts = (TaskState *)cpu->opaque;
6770             if (flags & CLONE_SETTLS)
6771                 cpu_set_tls (env, newtls);
6772             if (flags & CLONE_CHILD_CLEARTID)
6773                 ts->child_tidptr = child_tidptr;
6774         } else {
6775             cpu_clone_regs_parent(env, flags);
6776             fork_end(0);
6777         }
6778     }
6779     return ret;
6780 }
6781 
6782 /* warning : doesn't handle linux specific flags... */
6783 static int target_to_host_fcntl_cmd(int cmd)
6784 {
6785     int ret;
6786 
6787     switch(cmd) {
6788     case TARGET_F_DUPFD:
6789     case TARGET_F_GETFD:
6790     case TARGET_F_SETFD:
6791     case TARGET_F_GETFL:
6792     case TARGET_F_SETFL:
6793     case TARGET_F_OFD_GETLK:
6794     case TARGET_F_OFD_SETLK:
6795     case TARGET_F_OFD_SETLKW:
6796         ret = cmd;
6797         break;
6798     case TARGET_F_GETLK:
6799         ret = F_GETLK64;
6800         break;
6801     case TARGET_F_SETLK:
6802         ret = F_SETLK64;
6803         break;
6804     case TARGET_F_SETLKW:
6805         ret = F_SETLKW64;
6806         break;
6807     case TARGET_F_GETOWN:
6808         ret = F_GETOWN;
6809         break;
6810     case TARGET_F_SETOWN:
6811         ret = F_SETOWN;
6812         break;
6813     case TARGET_F_GETSIG:
6814         ret = F_GETSIG;
6815         break;
6816     case TARGET_F_SETSIG:
6817         ret = F_SETSIG;
6818         break;
6819 #if TARGET_ABI_BITS == 32
6820     case TARGET_F_GETLK64:
6821         ret = F_GETLK64;
6822         break;
6823     case TARGET_F_SETLK64:
6824         ret = F_SETLK64;
6825         break;
6826     case TARGET_F_SETLKW64:
6827         ret = F_SETLKW64;
6828         break;
6829 #endif
6830     case TARGET_F_SETLEASE:
6831         ret = F_SETLEASE;
6832         break;
6833     case TARGET_F_GETLEASE:
6834         ret = F_GETLEASE;
6835         break;
6836 #ifdef F_DUPFD_CLOEXEC
6837     case TARGET_F_DUPFD_CLOEXEC:
6838         ret = F_DUPFD_CLOEXEC;
6839         break;
6840 #endif
6841     case TARGET_F_NOTIFY:
6842         ret = F_NOTIFY;
6843         break;
6844 #ifdef F_GETOWN_EX
6845     case TARGET_F_GETOWN_EX:
6846         ret = F_GETOWN_EX;
6847         break;
6848 #endif
6849 #ifdef F_SETOWN_EX
6850     case TARGET_F_SETOWN_EX:
6851         ret = F_SETOWN_EX;
6852         break;
6853 #endif
6854 #ifdef F_SETPIPE_SZ
6855     case TARGET_F_SETPIPE_SZ:
6856         ret = F_SETPIPE_SZ;
6857         break;
6858     case TARGET_F_GETPIPE_SZ:
6859         ret = F_GETPIPE_SZ;
6860         break;
6861 #endif
6862 #ifdef F_ADD_SEALS
6863     case TARGET_F_ADD_SEALS:
6864         ret = F_ADD_SEALS;
6865         break;
6866     case TARGET_F_GET_SEALS:
6867         ret = F_GET_SEALS;
6868         break;
6869 #endif
6870     default:
6871         ret = -TARGET_EINVAL;
6872         break;
6873     }
6874 
6875 #if defined(__powerpc64__)
6876     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6877      * is not supported by kernel. The glibc fcntl call actually adjusts
6878      * them to 5, 6 and 7 before making the syscall(). Since we make the
6879      * syscall directly, adjust to what is supported by the kernel.
6880      */
6881     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6882         ret -= F_GETLK64 - 5;
6883     }
6884 #endif
6885 
6886     return ret;
6887 }
6888 
6889 #define FLOCK_TRANSTBL \
6890     switch (type) { \
6891     TRANSTBL_CONVERT(F_RDLCK); \
6892     TRANSTBL_CONVERT(F_WRLCK); \
6893     TRANSTBL_CONVERT(F_UNLCK); \
6894     }
6895 
6896 static int target_to_host_flock(int type)
6897 {
6898 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6899     FLOCK_TRANSTBL
6900 #undef  TRANSTBL_CONVERT
6901     return -TARGET_EINVAL;
6902 }
6903 
6904 static int host_to_target_flock(int type)
6905 {
6906 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6907     FLOCK_TRANSTBL
6908 #undef  TRANSTBL_CONVERT
6909     /* if we don't know how to convert the value coming
6910      * from the host we copy to the target field as-is
6911      */
6912     return type;
6913 }
6914 
6915 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6916                                             abi_ulong target_flock_addr)
6917 {
6918     struct target_flock *target_fl;
6919     int l_type;
6920 
6921     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6922         return -TARGET_EFAULT;
6923     }
6924 
6925     __get_user(l_type, &target_fl->l_type);
6926     l_type = target_to_host_flock(l_type);
6927     if (l_type < 0) {
6928         return l_type;
6929     }
6930     fl->l_type = l_type;
6931     __get_user(fl->l_whence, &target_fl->l_whence);
6932     __get_user(fl->l_start, &target_fl->l_start);
6933     __get_user(fl->l_len, &target_fl->l_len);
6934     __get_user(fl->l_pid, &target_fl->l_pid);
6935     unlock_user_struct(target_fl, target_flock_addr, 0);
6936     return 0;
6937 }
6938 
6939 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6940                                           const struct flock64 *fl)
6941 {
6942     struct target_flock *target_fl;
6943     short l_type;
6944 
6945     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6946         return -TARGET_EFAULT;
6947     }
6948 
6949     l_type = host_to_target_flock(fl->l_type);
6950     __put_user(l_type, &target_fl->l_type);
6951     __put_user(fl->l_whence, &target_fl->l_whence);
6952     __put_user(fl->l_start, &target_fl->l_start);
6953     __put_user(fl->l_len, &target_fl->l_len);
6954     __put_user(fl->l_pid, &target_fl->l_pid);
6955     unlock_user_struct(target_fl, target_flock_addr, 1);
6956     return 0;
6957 }
6958 
6959 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6960 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6961 
6962 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6963 struct target_oabi_flock64 {
6964     abi_short l_type;
6965     abi_short l_whence;
6966     abi_llong l_start;
6967     abi_llong l_len;
6968     abi_int   l_pid;
6969 } QEMU_PACKED;
6970 
6971 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6972                                                    abi_ulong target_flock_addr)
6973 {
6974     struct target_oabi_flock64 *target_fl;
6975     int l_type;
6976 
6977     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6978         return -TARGET_EFAULT;
6979     }
6980 
6981     __get_user(l_type, &target_fl->l_type);
6982     l_type = target_to_host_flock(l_type);
6983     if (l_type < 0) {
6984         return l_type;
6985     }
6986     fl->l_type = l_type;
6987     __get_user(fl->l_whence, &target_fl->l_whence);
6988     __get_user(fl->l_start, &target_fl->l_start);
6989     __get_user(fl->l_len, &target_fl->l_len);
6990     __get_user(fl->l_pid, &target_fl->l_pid);
6991     unlock_user_struct(target_fl, target_flock_addr, 0);
6992     return 0;
6993 }
6994 
6995 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6996                                                  const struct flock64 *fl)
6997 {
6998     struct target_oabi_flock64 *target_fl;
6999     short l_type;
7000 
7001     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7002         return -TARGET_EFAULT;
7003     }
7004 
7005     l_type = host_to_target_flock(fl->l_type);
7006     __put_user(l_type, &target_fl->l_type);
7007     __put_user(fl->l_whence, &target_fl->l_whence);
7008     __put_user(fl->l_start, &target_fl->l_start);
7009     __put_user(fl->l_len, &target_fl->l_len);
7010     __put_user(fl->l_pid, &target_fl->l_pid);
7011     unlock_user_struct(target_fl, target_flock_addr, 1);
7012     return 0;
7013 }
7014 #endif
7015 
7016 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7017                                               abi_ulong target_flock_addr)
7018 {
7019     struct target_flock64 *target_fl;
7020     int l_type;
7021 
7022     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7023         return -TARGET_EFAULT;
7024     }
7025 
7026     __get_user(l_type, &target_fl->l_type);
7027     l_type = target_to_host_flock(l_type);
7028     if (l_type < 0) {
7029         return l_type;
7030     }
7031     fl->l_type = l_type;
7032     __get_user(fl->l_whence, &target_fl->l_whence);
7033     __get_user(fl->l_start, &target_fl->l_start);
7034     __get_user(fl->l_len, &target_fl->l_len);
7035     __get_user(fl->l_pid, &target_fl->l_pid);
7036     unlock_user_struct(target_fl, target_flock_addr, 0);
7037     return 0;
7038 }
7039 
7040 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7041                                             const struct flock64 *fl)
7042 {
7043     struct target_flock64 *target_fl;
7044     short l_type;
7045 
7046     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7047         return -TARGET_EFAULT;
7048     }
7049 
7050     l_type = host_to_target_flock(fl->l_type);
7051     __put_user(l_type, &target_fl->l_type);
7052     __put_user(fl->l_whence, &target_fl->l_whence);
7053     __put_user(fl->l_start, &target_fl->l_start);
7054     __put_user(fl->l_len, &target_fl->l_len);
7055     __put_user(fl->l_pid, &target_fl->l_pid);
7056     unlock_user_struct(target_fl, target_flock_addr, 1);
7057     return 0;
7058 }
7059 
7060 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7061 {
7062     struct flock64 fl64;
7063 #ifdef F_GETOWN_EX
7064     struct f_owner_ex fox;
7065     struct target_f_owner_ex *target_fox;
7066 #endif
7067     abi_long ret;
7068     int host_cmd = target_to_host_fcntl_cmd(cmd);
7069 
7070     if (host_cmd == -TARGET_EINVAL)
7071 	    return host_cmd;
7072 
7073     switch(cmd) {
7074     case TARGET_F_GETLK:
7075         ret = copy_from_user_flock(&fl64, arg);
7076         if (ret) {
7077             return ret;
7078         }
7079         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7080         if (ret == 0) {
7081             ret = copy_to_user_flock(arg, &fl64);
7082         }
7083         break;
7084 
7085     case TARGET_F_SETLK:
7086     case TARGET_F_SETLKW:
7087         ret = copy_from_user_flock(&fl64, arg);
7088         if (ret) {
7089             return ret;
7090         }
7091         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7092         break;
7093 
7094     case TARGET_F_GETLK64:
7095     case TARGET_F_OFD_GETLK:
7096         ret = copy_from_user_flock64(&fl64, arg);
7097         if (ret) {
7098             return ret;
7099         }
7100         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7101         if (ret == 0) {
7102             ret = copy_to_user_flock64(arg, &fl64);
7103         }
7104         break;
7105     case TARGET_F_SETLK64:
7106     case TARGET_F_SETLKW64:
7107     case TARGET_F_OFD_SETLK:
7108     case TARGET_F_OFD_SETLKW:
7109         ret = copy_from_user_flock64(&fl64, arg);
7110         if (ret) {
7111             return ret;
7112         }
7113         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7114         break;
7115 
7116     case TARGET_F_GETFL:
7117         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7118         if (ret >= 0) {
7119             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7120         }
7121         break;
7122 
7123     case TARGET_F_SETFL:
7124         ret = get_errno(safe_fcntl(fd, host_cmd,
7125                                    target_to_host_bitmask(arg,
7126                                                           fcntl_flags_tbl)));
7127         break;
7128 
7129 #ifdef F_GETOWN_EX
7130     case TARGET_F_GETOWN_EX:
7131         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7132         if (ret >= 0) {
7133             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7134                 return -TARGET_EFAULT;
7135             target_fox->type = tswap32(fox.type);
7136             target_fox->pid = tswap32(fox.pid);
7137             unlock_user_struct(target_fox, arg, 1);
7138         }
7139         break;
7140 #endif
7141 
7142 #ifdef F_SETOWN_EX
7143     case TARGET_F_SETOWN_EX:
7144         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7145             return -TARGET_EFAULT;
7146         fox.type = tswap32(target_fox->type);
7147         fox.pid = tswap32(target_fox->pid);
7148         unlock_user_struct(target_fox, arg, 0);
7149         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7150         break;
7151 #endif
7152 
7153     case TARGET_F_SETSIG:
7154         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7155         break;
7156 
7157     case TARGET_F_GETSIG:
7158         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7159         break;
7160 
7161     case TARGET_F_SETOWN:
7162     case TARGET_F_GETOWN:
7163     case TARGET_F_SETLEASE:
7164     case TARGET_F_GETLEASE:
7165     case TARGET_F_SETPIPE_SZ:
7166     case TARGET_F_GETPIPE_SZ:
7167     case TARGET_F_ADD_SEALS:
7168     case TARGET_F_GET_SEALS:
7169         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7170         break;
7171 
7172     default:
7173         ret = get_errno(safe_fcntl(fd, cmd, arg));
7174         break;
7175     }
7176     return ret;
7177 }
7178 
7179 #ifdef USE_UID16
7180 
7181 static inline int high2lowuid(int uid)
7182 {
7183     if (uid > 65535)
7184         return 65534;
7185     else
7186         return uid;
7187 }
7188 
7189 static inline int high2lowgid(int gid)
7190 {
7191     if (gid > 65535)
7192         return 65534;
7193     else
7194         return gid;
7195 }
7196 
7197 static inline int low2highuid(int uid)
7198 {
7199     if ((int16_t)uid == -1)
7200         return -1;
7201     else
7202         return uid;
7203 }
7204 
7205 static inline int low2highgid(int gid)
7206 {
7207     if ((int16_t)gid == -1)
7208         return -1;
7209     else
7210         return gid;
7211 }
7212 static inline int tswapid(int id)
7213 {
7214     return tswap16(id);
7215 }
7216 
7217 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7218 
7219 #else /* !USE_UID16 */
7220 static inline int high2lowuid(int uid)
7221 {
7222     return uid;
7223 }
7224 static inline int high2lowgid(int gid)
7225 {
7226     return gid;
7227 }
7228 static inline int low2highuid(int uid)
7229 {
7230     return uid;
7231 }
7232 static inline int low2highgid(int gid)
7233 {
7234     return gid;
7235 }
7236 static inline int tswapid(int id)
7237 {
7238     return tswap32(id);
7239 }
7240 
7241 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7242 
7243 #endif /* USE_UID16 */
7244 
7245 /* We must do direct syscalls for setting UID/GID, because we want to
7246  * implement the Linux system call semantics of "change only for this thread",
7247  * not the libc/POSIX semantics of "change for all threads in process".
7248  * (See http://ewontfix.com/17/ for more details.)
7249  * We use the 32-bit version of the syscalls if present; if it is not
7250  * then either the host architecture supports 32-bit UIDs natively with
7251  * the standard syscall, or the 16-bit UID is the best we can do.
7252  */
7253 #ifdef __NR_setuid32
7254 #define __NR_sys_setuid __NR_setuid32
7255 #else
7256 #define __NR_sys_setuid __NR_setuid
7257 #endif
7258 #ifdef __NR_setgid32
7259 #define __NR_sys_setgid __NR_setgid32
7260 #else
7261 #define __NR_sys_setgid __NR_setgid
7262 #endif
7263 #ifdef __NR_setresuid32
7264 #define __NR_sys_setresuid __NR_setresuid32
7265 #else
7266 #define __NR_sys_setresuid __NR_setresuid
7267 #endif
7268 #ifdef __NR_setresgid32
7269 #define __NR_sys_setresgid __NR_setresgid32
7270 #else
7271 #define __NR_sys_setresgid __NR_setresgid
7272 #endif
7273 
7274 _syscall1(int, sys_setuid, uid_t, uid)
7275 _syscall1(int, sys_setgid, gid_t, gid)
7276 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7277 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7278 
7279 void syscall_init(void)
7280 {
7281     IOCTLEntry *ie;
7282     const argtype *arg_type;
7283     int size;
7284 
7285     thunk_init(STRUCT_MAX);
7286 
7287 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7288 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7289 #include "syscall_types.h"
7290 #undef STRUCT
7291 #undef STRUCT_SPECIAL
7292 
7293     /* we patch the ioctl size if necessary. We rely on the fact that
7294        no ioctl has all the bits at '1' in the size field */
7295     ie = ioctl_entries;
7296     while (ie->target_cmd != 0) {
7297         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7298             TARGET_IOC_SIZEMASK) {
7299             arg_type = ie->arg_type;
7300             if (arg_type[0] != TYPE_PTR) {
7301                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7302                         ie->target_cmd);
7303                 exit(1);
7304             }
7305             arg_type++;
7306             size = thunk_type_size(arg_type, 0);
7307             ie->target_cmd = (ie->target_cmd &
7308                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7309                 (size << TARGET_IOC_SIZESHIFT);
7310         }
7311 
7312         /* automatic consistency check if same arch */
7313 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7314     (defined(__x86_64__) && defined(TARGET_X86_64))
7315         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7316             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7317                     ie->name, ie->target_cmd, ie->host_cmd);
7318         }
7319 #endif
7320         ie++;
7321     }
7322 }
7323 
7324 #ifdef TARGET_NR_truncate64
7325 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7326                                          abi_long arg2,
7327                                          abi_long arg3,
7328                                          abi_long arg4)
7329 {
7330     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7331         arg2 = arg3;
7332         arg3 = arg4;
7333     }
7334     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7335 }
7336 #endif
7337 
7338 #ifdef TARGET_NR_ftruncate64
7339 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7340                                           abi_long arg2,
7341                                           abi_long arg3,
7342                                           abi_long arg4)
7343 {
7344     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7345         arg2 = arg3;
7346         arg3 = arg4;
7347     }
7348     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7349 }
7350 #endif
7351 
7352 #if defined(TARGET_NR_timer_settime) || \
7353     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7354 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7355                                                  abi_ulong target_addr)
7356 {
7357     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7358                                 offsetof(struct target_itimerspec,
7359                                          it_interval)) ||
7360         target_to_host_timespec(&host_its->it_value, target_addr +
7361                                 offsetof(struct target_itimerspec,
7362                                          it_value))) {
7363         return -TARGET_EFAULT;
7364     }
7365 
7366     return 0;
7367 }
7368 #endif
7369 
7370 #if defined(TARGET_NR_timer_settime64) || \
7371     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7372 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7373                                                    abi_ulong target_addr)
7374 {
7375     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7376                                   offsetof(struct target__kernel_itimerspec,
7377                                            it_interval)) ||
7378         target_to_host_timespec64(&host_its->it_value, target_addr +
7379                                   offsetof(struct target__kernel_itimerspec,
7380                                            it_value))) {
7381         return -TARGET_EFAULT;
7382     }
7383 
7384     return 0;
7385 }
7386 #endif
7387 
7388 #if ((defined(TARGET_NR_timerfd_gettime) || \
7389       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7390       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7391 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7392                                                  struct itimerspec *host_its)
7393 {
7394     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7395                                                        it_interval),
7396                                 &host_its->it_interval) ||
7397         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7398                                                        it_value),
7399                                 &host_its->it_value)) {
7400         return -TARGET_EFAULT;
7401     }
7402     return 0;
7403 }
7404 #endif
7405 
7406 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7407       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7408       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7409 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7410                                                    struct itimerspec *host_its)
7411 {
7412     if (host_to_target_timespec64(target_addr +
7413                                   offsetof(struct target__kernel_itimerspec,
7414                                            it_interval),
7415                                   &host_its->it_interval) ||
7416         host_to_target_timespec64(target_addr +
7417                                   offsetof(struct target__kernel_itimerspec,
7418                                            it_value),
7419                                   &host_its->it_value)) {
7420         return -TARGET_EFAULT;
7421     }
7422     return 0;
7423 }
7424 #endif
7425 
7426 #if defined(TARGET_NR_adjtimex) || \
7427     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7428 static inline abi_long target_to_host_timex(struct timex *host_tx,
7429                                             abi_long target_addr)
7430 {
7431     struct target_timex *target_tx;
7432 
7433     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7434         return -TARGET_EFAULT;
7435     }
7436 
7437     __get_user(host_tx->modes, &target_tx->modes);
7438     __get_user(host_tx->offset, &target_tx->offset);
7439     __get_user(host_tx->freq, &target_tx->freq);
7440     __get_user(host_tx->maxerror, &target_tx->maxerror);
7441     __get_user(host_tx->esterror, &target_tx->esterror);
7442     __get_user(host_tx->status, &target_tx->status);
7443     __get_user(host_tx->constant, &target_tx->constant);
7444     __get_user(host_tx->precision, &target_tx->precision);
7445     __get_user(host_tx->tolerance, &target_tx->tolerance);
7446     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7447     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7448     __get_user(host_tx->tick, &target_tx->tick);
7449     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7450     __get_user(host_tx->jitter, &target_tx->jitter);
7451     __get_user(host_tx->shift, &target_tx->shift);
7452     __get_user(host_tx->stabil, &target_tx->stabil);
7453     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7454     __get_user(host_tx->calcnt, &target_tx->calcnt);
7455     __get_user(host_tx->errcnt, &target_tx->errcnt);
7456     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7457     __get_user(host_tx->tai, &target_tx->tai);
7458 
7459     unlock_user_struct(target_tx, target_addr, 0);
7460     return 0;
7461 }
7462 
7463 static inline abi_long host_to_target_timex(abi_long target_addr,
7464                                             struct timex *host_tx)
7465 {
7466     struct target_timex *target_tx;
7467 
7468     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7469         return -TARGET_EFAULT;
7470     }
7471 
7472     __put_user(host_tx->modes, &target_tx->modes);
7473     __put_user(host_tx->offset, &target_tx->offset);
7474     __put_user(host_tx->freq, &target_tx->freq);
7475     __put_user(host_tx->maxerror, &target_tx->maxerror);
7476     __put_user(host_tx->esterror, &target_tx->esterror);
7477     __put_user(host_tx->status, &target_tx->status);
7478     __put_user(host_tx->constant, &target_tx->constant);
7479     __put_user(host_tx->precision, &target_tx->precision);
7480     __put_user(host_tx->tolerance, &target_tx->tolerance);
7481     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7482     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7483     __put_user(host_tx->tick, &target_tx->tick);
7484     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7485     __put_user(host_tx->jitter, &target_tx->jitter);
7486     __put_user(host_tx->shift, &target_tx->shift);
7487     __put_user(host_tx->stabil, &target_tx->stabil);
7488     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7489     __put_user(host_tx->calcnt, &target_tx->calcnt);
7490     __put_user(host_tx->errcnt, &target_tx->errcnt);
7491     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7492     __put_user(host_tx->tai, &target_tx->tai);
7493 
7494     unlock_user_struct(target_tx, target_addr, 1);
7495     return 0;
7496 }
7497 #endif
7498 
7499 
7500 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7501 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7502                                               abi_long target_addr)
7503 {
7504     struct target__kernel_timex *target_tx;
7505 
7506     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7507                                  offsetof(struct target__kernel_timex,
7508                                           time))) {
7509         return -TARGET_EFAULT;
7510     }
7511 
7512     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7513         return -TARGET_EFAULT;
7514     }
7515 
7516     __get_user(host_tx->modes, &target_tx->modes);
7517     __get_user(host_tx->offset, &target_tx->offset);
7518     __get_user(host_tx->freq, &target_tx->freq);
7519     __get_user(host_tx->maxerror, &target_tx->maxerror);
7520     __get_user(host_tx->esterror, &target_tx->esterror);
7521     __get_user(host_tx->status, &target_tx->status);
7522     __get_user(host_tx->constant, &target_tx->constant);
7523     __get_user(host_tx->precision, &target_tx->precision);
7524     __get_user(host_tx->tolerance, &target_tx->tolerance);
7525     __get_user(host_tx->tick, &target_tx->tick);
7526     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7527     __get_user(host_tx->jitter, &target_tx->jitter);
7528     __get_user(host_tx->shift, &target_tx->shift);
7529     __get_user(host_tx->stabil, &target_tx->stabil);
7530     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7531     __get_user(host_tx->calcnt, &target_tx->calcnt);
7532     __get_user(host_tx->errcnt, &target_tx->errcnt);
7533     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7534     __get_user(host_tx->tai, &target_tx->tai);
7535 
7536     unlock_user_struct(target_tx, target_addr, 0);
7537     return 0;
7538 }
7539 
7540 static inline abi_long host_to_target_timex64(abi_long target_addr,
7541                                               struct timex *host_tx)
7542 {
7543     struct target__kernel_timex *target_tx;
7544 
7545    if (copy_to_user_timeval64(target_addr +
7546                               offsetof(struct target__kernel_timex, time),
7547                               &host_tx->time)) {
7548         return -TARGET_EFAULT;
7549     }
7550 
7551     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7552         return -TARGET_EFAULT;
7553     }
7554 
7555     __put_user(host_tx->modes, &target_tx->modes);
7556     __put_user(host_tx->offset, &target_tx->offset);
7557     __put_user(host_tx->freq, &target_tx->freq);
7558     __put_user(host_tx->maxerror, &target_tx->maxerror);
7559     __put_user(host_tx->esterror, &target_tx->esterror);
7560     __put_user(host_tx->status, &target_tx->status);
7561     __put_user(host_tx->constant, &target_tx->constant);
7562     __put_user(host_tx->precision, &target_tx->precision);
7563     __put_user(host_tx->tolerance, &target_tx->tolerance);
7564     __put_user(host_tx->tick, &target_tx->tick);
7565     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7566     __put_user(host_tx->jitter, &target_tx->jitter);
7567     __put_user(host_tx->shift, &target_tx->shift);
7568     __put_user(host_tx->stabil, &target_tx->stabil);
7569     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7570     __put_user(host_tx->calcnt, &target_tx->calcnt);
7571     __put_user(host_tx->errcnt, &target_tx->errcnt);
7572     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7573     __put_user(host_tx->tai, &target_tx->tai);
7574 
7575     unlock_user_struct(target_tx, target_addr, 1);
7576     return 0;
7577 }
7578 #endif
7579 
7580 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7581 #define sigev_notify_thread_id _sigev_un._tid
7582 #endif
7583 
7584 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7585                                                abi_ulong target_addr)
7586 {
7587     struct target_sigevent *target_sevp;
7588 
7589     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7590         return -TARGET_EFAULT;
7591     }
7592 
7593     /* This union is awkward on 64 bit systems because it has a 32 bit
7594      * integer and a pointer in it; we follow the conversion approach
7595      * used for handling sigval types in signal.c so the guest should get
7596      * the correct value back even if we did a 64 bit byteswap and it's
7597      * using the 32 bit integer.
7598      */
7599     host_sevp->sigev_value.sival_ptr =
7600         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7601     host_sevp->sigev_signo =
7602         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7603     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7604     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7605 
7606     unlock_user_struct(target_sevp, target_addr, 1);
7607     return 0;
7608 }
7609 
7610 #if defined(TARGET_NR_mlockall)
7611 static inline int target_to_host_mlockall_arg(int arg)
7612 {
7613     int result = 0;
7614 
7615     if (arg & TARGET_MCL_CURRENT) {
7616         result |= MCL_CURRENT;
7617     }
7618     if (arg & TARGET_MCL_FUTURE) {
7619         result |= MCL_FUTURE;
7620     }
7621 #ifdef MCL_ONFAULT
7622     if (arg & TARGET_MCL_ONFAULT) {
7623         result |= MCL_ONFAULT;
7624     }
7625 #endif
7626 
7627     return result;
7628 }
7629 #endif
7630 
7631 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7632      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7633      defined(TARGET_NR_newfstatat))
7634 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7635                                              abi_ulong target_addr,
7636                                              struct stat *host_st)
7637 {
7638 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7639     if (cpu_env->eabi) {
7640         struct target_eabi_stat64 *target_st;
7641 
7642         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7643             return -TARGET_EFAULT;
7644         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7645         __put_user(host_st->st_dev, &target_st->st_dev);
7646         __put_user(host_st->st_ino, &target_st->st_ino);
7647 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7648         __put_user(host_st->st_ino, &target_st->__st_ino);
7649 #endif
7650         __put_user(host_st->st_mode, &target_st->st_mode);
7651         __put_user(host_st->st_nlink, &target_st->st_nlink);
7652         __put_user(host_st->st_uid, &target_st->st_uid);
7653         __put_user(host_st->st_gid, &target_st->st_gid);
7654         __put_user(host_st->st_rdev, &target_st->st_rdev);
7655         __put_user(host_st->st_size, &target_st->st_size);
7656         __put_user(host_st->st_blksize, &target_st->st_blksize);
7657         __put_user(host_st->st_blocks, &target_st->st_blocks);
7658         __put_user(host_st->st_atime, &target_st->target_st_atime);
7659         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7660         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7661 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7662         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7663         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7664         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7665 #endif
7666         unlock_user_struct(target_st, target_addr, 1);
7667     } else
7668 #endif
7669     {
7670 #if defined(TARGET_HAS_STRUCT_STAT64)
7671         struct target_stat64 *target_st;
7672 #else
7673         struct target_stat *target_st;
7674 #endif
7675 
7676         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7677             return -TARGET_EFAULT;
7678         memset(target_st, 0, sizeof(*target_st));
7679         __put_user(host_st->st_dev, &target_st->st_dev);
7680         __put_user(host_st->st_ino, &target_st->st_ino);
7681 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7682         __put_user(host_st->st_ino, &target_st->__st_ino);
7683 #endif
7684         __put_user(host_st->st_mode, &target_st->st_mode);
7685         __put_user(host_st->st_nlink, &target_st->st_nlink);
7686         __put_user(host_st->st_uid, &target_st->st_uid);
7687         __put_user(host_st->st_gid, &target_st->st_gid);
7688         __put_user(host_st->st_rdev, &target_st->st_rdev);
7689         /* XXX: better use of kernel struct */
7690         __put_user(host_st->st_size, &target_st->st_size);
7691         __put_user(host_st->st_blksize, &target_st->st_blksize);
7692         __put_user(host_st->st_blocks, &target_st->st_blocks);
7693         __put_user(host_st->st_atime, &target_st->target_st_atime);
7694         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7695         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7696 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7697         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7698         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7699         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7700 #endif
7701         unlock_user_struct(target_st, target_addr, 1);
7702     }
7703 
7704     return 0;
7705 }
7706 #endif
7707 
7708 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7709 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7710                                             abi_ulong target_addr)
7711 {
7712     struct target_statx *target_stx;
7713 
7714     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7715         return -TARGET_EFAULT;
7716     }
7717     memset(target_stx, 0, sizeof(*target_stx));
7718 
7719     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7720     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7721     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7722     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7723     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7724     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7725     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7726     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7727     __put_user(host_stx->stx_size, &target_stx->stx_size);
7728     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7729     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7730     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7731     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7732     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7733     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7734     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7735     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7736     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7737     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7738     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7739     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7740     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7741     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7742 
7743     unlock_user_struct(target_stx, target_addr, 1);
7744 
7745     return 0;
7746 }
7747 #endif
7748 
7749 static int do_sys_futex(int *uaddr, int op, int val,
7750                          const struct timespec *timeout, int *uaddr2,
7751                          int val3)
7752 {
7753 #if HOST_LONG_BITS == 64
7754 #if defined(__NR_futex)
7755     /* always a 64-bit time_t, it doesn't define _time64 version  */
7756     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7757 
7758 #endif
7759 #else /* HOST_LONG_BITS == 64 */
7760 #if defined(__NR_futex_time64)
7761     if (sizeof(timeout->tv_sec) == 8) {
7762         /* _time64 function on 32bit arch */
7763         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7764     }
7765 #endif
7766 #if defined(__NR_futex)
7767     /* old function on 32bit arch */
7768     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7769 #endif
7770 #endif /* HOST_LONG_BITS == 64 */
7771     g_assert_not_reached();
7772 }
7773 
7774 static int do_safe_futex(int *uaddr, int op, int val,
7775                          const struct timespec *timeout, int *uaddr2,
7776                          int val3)
7777 {
7778 #if HOST_LONG_BITS == 64
7779 #if defined(__NR_futex)
7780     /* always a 64-bit time_t, it doesn't define _time64 version  */
7781     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7782 #endif
7783 #else /* HOST_LONG_BITS == 64 */
7784 #if defined(__NR_futex_time64)
7785     if (sizeof(timeout->tv_sec) == 8) {
7786         /* _time64 function on 32bit arch */
7787         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7788                                            val3));
7789     }
7790 #endif
7791 #if defined(__NR_futex)
7792     /* old function on 32bit arch */
7793     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7794 #endif
7795 #endif /* HOST_LONG_BITS == 64 */
7796     return -TARGET_ENOSYS;
7797 }
7798 
7799 /* ??? Using host futex calls even when target atomic operations
7800    are not really atomic probably breaks things.  However implementing
7801    futexes locally would make futexes shared between multiple processes
7802    tricky.  However they're probably useless because guest atomic
7803    operations won't work either.  */
7804 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7805 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7806                     int op, int val, target_ulong timeout,
7807                     target_ulong uaddr2, int val3)
7808 {
7809     struct timespec ts, *pts = NULL;
7810     void *haddr2 = NULL;
7811     int base_op;
7812 
7813     /* We assume FUTEX_* constants are the same on both host and target. */
7814 #ifdef FUTEX_CMD_MASK
7815     base_op = op & FUTEX_CMD_MASK;
7816 #else
7817     base_op = op;
7818 #endif
7819     switch (base_op) {
7820     case FUTEX_WAIT:
7821     case FUTEX_WAIT_BITSET:
7822         val = tswap32(val);
7823         break;
7824     case FUTEX_WAIT_REQUEUE_PI:
7825         val = tswap32(val);
7826         haddr2 = g2h(cpu, uaddr2);
7827         break;
7828     case FUTEX_LOCK_PI:
7829     case FUTEX_LOCK_PI2:
7830         break;
7831     case FUTEX_WAKE:
7832     case FUTEX_WAKE_BITSET:
7833     case FUTEX_TRYLOCK_PI:
7834     case FUTEX_UNLOCK_PI:
7835         timeout = 0;
7836         break;
7837     case FUTEX_FD:
7838         val = target_to_host_signal(val);
7839         timeout = 0;
7840         break;
7841     case FUTEX_CMP_REQUEUE:
7842     case FUTEX_CMP_REQUEUE_PI:
7843         val3 = tswap32(val3);
7844         /* fall through */
7845     case FUTEX_REQUEUE:
7846     case FUTEX_WAKE_OP:
7847         /*
7848          * For these, the 4th argument is not TIMEOUT, but VAL2.
7849          * But the prototype of do_safe_futex takes a pointer, so
7850          * insert casts to satisfy the compiler.  We do not need
7851          * to tswap VAL2 since it's not compared to guest memory.
7852           */
7853         pts = (struct timespec *)(uintptr_t)timeout;
7854         timeout = 0;
7855         haddr2 = g2h(cpu, uaddr2);
7856         break;
7857     default:
7858         return -TARGET_ENOSYS;
7859     }
7860     if (timeout) {
7861         pts = &ts;
7862         if (time64
7863             ? target_to_host_timespec64(pts, timeout)
7864             : target_to_host_timespec(pts, timeout)) {
7865             return -TARGET_EFAULT;
7866         }
7867     }
7868     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7869 }
7870 #endif
7871 
7872 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7873 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7874                                      abi_long handle, abi_long mount_id,
7875                                      abi_long flags)
7876 {
7877     struct file_handle *target_fh;
7878     struct file_handle *fh;
7879     int mid = 0;
7880     abi_long ret;
7881     char *name;
7882     unsigned int size, total_size;
7883 
7884     if (get_user_s32(size, handle)) {
7885         return -TARGET_EFAULT;
7886     }
7887 
7888     name = lock_user_string(pathname);
7889     if (!name) {
7890         return -TARGET_EFAULT;
7891     }
7892 
7893     total_size = sizeof(struct file_handle) + size;
7894     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7895     if (!target_fh) {
7896         unlock_user(name, pathname, 0);
7897         return -TARGET_EFAULT;
7898     }
7899 
7900     fh = g_malloc0(total_size);
7901     fh->handle_bytes = size;
7902 
7903     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7904     unlock_user(name, pathname, 0);
7905 
7906     /* man name_to_handle_at(2):
7907      * Other than the use of the handle_bytes field, the caller should treat
7908      * the file_handle structure as an opaque data type
7909      */
7910 
7911     memcpy(target_fh, fh, total_size);
7912     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7913     target_fh->handle_type = tswap32(fh->handle_type);
7914     g_free(fh);
7915     unlock_user(target_fh, handle, total_size);
7916 
7917     if (put_user_s32(mid, mount_id)) {
7918         return -TARGET_EFAULT;
7919     }
7920 
7921     return ret;
7922 
7923 }
7924 #endif
7925 
7926 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7927 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7928                                      abi_long flags)
7929 {
7930     struct file_handle *target_fh;
7931     struct file_handle *fh;
7932     unsigned int size, total_size;
7933     abi_long ret;
7934 
7935     if (get_user_s32(size, handle)) {
7936         return -TARGET_EFAULT;
7937     }
7938 
7939     total_size = sizeof(struct file_handle) + size;
7940     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7941     if (!target_fh) {
7942         return -TARGET_EFAULT;
7943     }
7944 
7945     fh = g_memdup(target_fh, total_size);
7946     fh->handle_bytes = size;
7947     fh->handle_type = tswap32(target_fh->handle_type);
7948 
7949     ret = get_errno(open_by_handle_at(mount_fd, fh,
7950                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7951 
7952     g_free(fh);
7953 
7954     unlock_user(target_fh, handle, total_size);
7955 
7956     return ret;
7957 }
7958 #endif
7959 
7960 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7961 
7962 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7963 {
7964     int host_flags;
7965     target_sigset_t *target_mask;
7966     sigset_t host_mask;
7967     abi_long ret;
7968 
7969     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7970         return -TARGET_EINVAL;
7971     }
7972     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7973         return -TARGET_EFAULT;
7974     }
7975 
7976     target_to_host_sigset(&host_mask, target_mask);
7977 
7978     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7979 
7980     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7981     if (ret >= 0) {
7982         fd_trans_register(ret, &target_signalfd_trans);
7983     }
7984 
7985     unlock_user_struct(target_mask, mask, 0);
7986 
7987     return ret;
7988 }
7989 #endif
7990 
7991 /* Map host to target signal numbers for the wait family of syscalls.
7992    Assume all other status bits are the same.  */
7993 int host_to_target_waitstatus(int status)
7994 {
7995     if (WIFSIGNALED(status)) {
7996         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7997     }
7998     if (WIFSTOPPED(status)) {
7999         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8000                | (status & 0xff);
8001     }
8002     return status;
8003 }
8004 
8005 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8006 {
8007     CPUState *cpu = env_cpu(cpu_env);
8008     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8009     int i;
8010 
8011     for (i = 0; i < bprm->argc; i++) {
8012         size_t len = strlen(bprm->argv[i]) + 1;
8013 
8014         if (write(fd, bprm->argv[i], len) != len) {
8015             return -1;
8016         }
8017     }
8018 
8019     return 0;
8020 }
8021 
8022 static int open_self_maps(CPUArchState *cpu_env, int fd)
8023 {
8024     CPUState *cpu = env_cpu(cpu_env);
8025     TaskState *ts = cpu->opaque;
8026     GSList *map_info = read_self_maps();
8027     GSList *s;
8028     int count;
8029 
8030     for (s = map_info; s; s = g_slist_next(s)) {
8031         MapInfo *e = (MapInfo *) s->data;
8032 
8033         if (h2g_valid(e->start)) {
8034             unsigned long min = e->start;
8035             unsigned long max = e->end;
8036             int flags = page_get_flags(h2g(min));
8037             const char *path;
8038 
8039             max = h2g_valid(max - 1) ?
8040                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8041 
8042             if (page_check_range(h2g(min), max - min, flags) == -1) {
8043                 continue;
8044             }
8045 
8046 #ifdef TARGET_HPPA
8047             if (h2g(max) == ts->info->stack_limit) {
8048 #else
8049             if (h2g(min) == ts->info->stack_limit) {
8050 #endif
8051                 path = "[stack]";
8052             } else {
8053                 path = e->path;
8054             }
8055 
8056             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8057                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8058                             h2g(min), h2g(max - 1) + 1,
8059                             (flags & PAGE_READ) ? 'r' : '-',
8060                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8061                             (flags & PAGE_EXEC) ? 'x' : '-',
8062                             e->is_priv ? 'p' : 's',
8063                             (uint64_t) e->offset, e->dev, e->inode);
8064             if (path) {
8065                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8066             } else {
8067                 dprintf(fd, "\n");
8068             }
8069         }
8070     }
8071 
8072     free_self_maps(map_info);
8073 
8074 #ifdef TARGET_VSYSCALL_PAGE
8075     /*
8076      * We only support execution from the vsyscall page.
8077      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8078      */
8079     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8080                     " --xp 00000000 00:00 0",
8081                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8082     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8083 #endif
8084 
8085     return 0;
8086 }
8087 
8088 static int open_self_stat(CPUArchState *cpu_env, int fd)
8089 {
8090     CPUState *cpu = env_cpu(cpu_env);
8091     TaskState *ts = cpu->opaque;
8092     g_autoptr(GString) buf = g_string_new(NULL);
8093     int i;
8094 
8095     for (i = 0; i < 44; i++) {
8096         if (i == 0) {
8097             /* pid */
8098             g_string_printf(buf, FMT_pid " ", getpid());
8099         } else if (i == 1) {
8100             /* app name */
8101             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8102             bin = bin ? bin + 1 : ts->bprm->argv[0];
8103             g_string_printf(buf, "(%.15s) ", bin);
8104         } else if (i == 3) {
8105             /* ppid */
8106             g_string_printf(buf, FMT_pid " ", getppid());
8107         } else if (i == 21) {
8108             /* starttime */
8109             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8110         } else if (i == 27) {
8111             /* stack bottom */
8112             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8113         } else {
8114             /* for the rest, there is MasterCard */
8115             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8116         }
8117 
8118         if (write(fd, buf->str, buf->len) != buf->len) {
8119             return -1;
8120         }
8121     }
8122 
8123     return 0;
8124 }
8125 
8126 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8127 {
8128     CPUState *cpu = env_cpu(cpu_env);
8129     TaskState *ts = cpu->opaque;
8130     abi_ulong auxv = ts->info->saved_auxv;
8131     abi_ulong len = ts->info->auxv_len;
8132     char *ptr;
8133 
8134     /*
8135      * Auxiliary vector is stored in target process stack.
8136      * read in whole auxv vector and copy it to file
8137      */
8138     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8139     if (ptr != NULL) {
8140         while (len > 0) {
8141             ssize_t r;
8142             r = write(fd, ptr, len);
8143             if (r <= 0) {
8144                 break;
8145             }
8146             len -= r;
8147             ptr += r;
8148         }
8149         lseek(fd, 0, SEEK_SET);
8150         unlock_user(ptr, auxv, len);
8151     }
8152 
8153     return 0;
8154 }
8155 
8156 static int is_proc_myself(const char *filename, const char *entry)
8157 {
8158     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8159         filename += strlen("/proc/");
8160         if (!strncmp(filename, "self/", strlen("self/"))) {
8161             filename += strlen("self/");
8162         } else if (*filename >= '1' && *filename <= '9') {
8163             char myself[80];
8164             snprintf(myself, sizeof(myself), "%d/", getpid());
8165             if (!strncmp(filename, myself, strlen(myself))) {
8166                 filename += strlen(myself);
8167             } else {
8168                 return 0;
8169             }
8170         } else {
8171             return 0;
8172         }
8173         if (!strcmp(filename, entry)) {
8174             return 1;
8175         }
8176     }
8177     return 0;
8178 }
8179 
8180 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8181                       const char *fmt, int code)
8182 {
8183     if (logfile) {
8184         CPUState *cs = env_cpu(env);
8185 
8186         fprintf(logfile, fmt, code);
8187         fprintf(logfile, "Failing executable: %s\n", exec_path);
8188         cpu_dump_state(cs, logfile, 0);
8189         open_self_maps(env, fileno(logfile));
8190     }
8191 }
8192 
8193 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8194 {
8195     /* dump to console */
8196     excp_dump_file(stderr, env, fmt, code);
8197 
8198     /* dump to log file */
8199     if (qemu_log_separate()) {
8200         FILE *logfile = qemu_log_trylock();
8201 
8202         excp_dump_file(logfile, env, fmt, code);
8203         qemu_log_unlock(logfile);
8204     }
8205 }
8206 
8207 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8208     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8209 static int is_proc(const char *filename, const char *entry)
8210 {
8211     return strcmp(filename, entry) == 0;
8212 }
8213 #endif
8214 
8215 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8216 static int open_net_route(CPUArchState *cpu_env, int fd)
8217 {
8218     FILE *fp;
8219     char *line = NULL;
8220     size_t len = 0;
8221     ssize_t read;
8222 
8223     fp = fopen("/proc/net/route", "r");
8224     if (fp == NULL) {
8225         return -1;
8226     }
8227 
8228     /* read header */
8229 
8230     read = getline(&line, &len, fp);
8231     dprintf(fd, "%s", line);
8232 
8233     /* read routes */
8234 
8235     while ((read = getline(&line, &len, fp)) != -1) {
8236         char iface[16];
8237         uint32_t dest, gw, mask;
8238         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8239         int fields;
8240 
8241         fields = sscanf(line,
8242                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8243                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8244                         &mask, &mtu, &window, &irtt);
8245         if (fields != 11) {
8246             continue;
8247         }
8248         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8249                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8250                 metric, tswap32(mask), mtu, window, irtt);
8251     }
8252 
8253     free(line);
8254     fclose(fp);
8255 
8256     return 0;
8257 }
8258 #endif
8259 
8260 #if defined(TARGET_SPARC)
8261 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8262 {
8263     dprintf(fd, "type\t\t: sun4u\n");
8264     return 0;
8265 }
8266 #endif
8267 
8268 #if defined(TARGET_HPPA)
8269 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8270 {
8271     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8272     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8273     dprintf(fd, "capabilities\t: os32\n");
8274     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8275     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8276     return 0;
8277 }
8278 #endif
8279 
8280 #if defined(TARGET_M68K)
8281 static int open_hardware(CPUArchState *cpu_env, int fd)
8282 {
8283     dprintf(fd, "Model:\t\tqemu-m68k\n");
8284     return 0;
8285 }
8286 #endif
8287 
8288 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8289 {
8290     struct fake_open {
8291         const char *filename;
8292         int (*fill)(CPUArchState *cpu_env, int fd);
8293         int (*cmp)(const char *s1, const char *s2);
8294     };
8295     const struct fake_open *fake_open;
8296     static const struct fake_open fakes[] = {
8297         { "maps", open_self_maps, is_proc_myself },
8298         { "stat", open_self_stat, is_proc_myself },
8299         { "auxv", open_self_auxv, is_proc_myself },
8300         { "cmdline", open_self_cmdline, is_proc_myself },
8301 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8302         { "/proc/net/route", open_net_route, is_proc },
8303 #endif
8304 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8305         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8306 #endif
8307 #if defined(TARGET_M68K)
8308         { "/proc/hardware", open_hardware, is_proc },
8309 #endif
8310         { NULL, NULL, NULL }
8311     };
8312 
8313     if (is_proc_myself(pathname, "exe")) {
8314         return safe_openat(dirfd, exec_path, flags, mode);
8315     }
8316 
8317     for (fake_open = fakes; fake_open->filename; fake_open++) {
8318         if (fake_open->cmp(pathname, fake_open->filename)) {
8319             break;
8320         }
8321     }
8322 
8323     if (fake_open->filename) {
8324         const char *tmpdir;
8325         char filename[PATH_MAX];
8326         int fd, r;
8327 
8328         fd = memfd_create("qemu-open", 0);
8329         if (fd < 0) {
8330             if (errno != ENOSYS) {
8331                 return fd;
8332             }
8333             /* create temporary file to map stat to */
8334             tmpdir = getenv("TMPDIR");
8335             if (!tmpdir)
8336                 tmpdir = "/tmp";
8337             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8338             fd = mkstemp(filename);
8339             if (fd < 0) {
8340                 return fd;
8341             }
8342             unlink(filename);
8343         }
8344 
8345         if ((r = fake_open->fill(cpu_env, fd))) {
8346             int e = errno;
8347             close(fd);
8348             errno = e;
8349             return r;
8350         }
8351         lseek(fd, 0, SEEK_SET);
8352 
8353         return fd;
8354     }
8355 
8356     return safe_openat(dirfd, path(pathname), flags, mode);
8357 }
8358 
8359 #define TIMER_MAGIC 0x0caf0000
8360 #define TIMER_MAGIC_MASK 0xffff0000
8361 
8362 /* Convert QEMU provided timer ID back to internal 16bit index format */
8363 static target_timer_t get_timer_id(abi_long arg)
8364 {
8365     target_timer_t timerid = arg;
8366 
8367     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8368         return -TARGET_EINVAL;
8369     }
8370 
8371     timerid &= 0xffff;
8372 
8373     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8374         return -TARGET_EINVAL;
8375     }
8376 
8377     return timerid;
8378 }
8379 
8380 static int target_to_host_cpu_mask(unsigned long *host_mask,
8381                                    size_t host_size,
8382                                    abi_ulong target_addr,
8383                                    size_t target_size)
8384 {
8385     unsigned target_bits = sizeof(abi_ulong) * 8;
8386     unsigned host_bits = sizeof(*host_mask) * 8;
8387     abi_ulong *target_mask;
8388     unsigned i, j;
8389 
8390     assert(host_size >= target_size);
8391 
8392     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8393     if (!target_mask) {
8394         return -TARGET_EFAULT;
8395     }
8396     memset(host_mask, 0, host_size);
8397 
8398     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8399         unsigned bit = i * target_bits;
8400         abi_ulong val;
8401 
8402         __get_user(val, &target_mask[i]);
8403         for (j = 0; j < target_bits; j++, bit++) {
8404             if (val & (1UL << j)) {
8405                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8406             }
8407         }
8408     }
8409 
8410     unlock_user(target_mask, target_addr, 0);
8411     return 0;
8412 }
8413 
8414 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8415                                    size_t host_size,
8416                                    abi_ulong target_addr,
8417                                    size_t target_size)
8418 {
8419     unsigned target_bits = sizeof(abi_ulong) * 8;
8420     unsigned host_bits = sizeof(*host_mask) * 8;
8421     abi_ulong *target_mask;
8422     unsigned i, j;
8423 
8424     assert(host_size >= target_size);
8425 
8426     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8427     if (!target_mask) {
8428         return -TARGET_EFAULT;
8429     }
8430 
8431     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8432         unsigned bit = i * target_bits;
8433         abi_ulong val = 0;
8434 
8435         for (j = 0; j < target_bits; j++, bit++) {
8436             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8437                 val |= 1UL << j;
8438             }
8439         }
8440         __put_user(val, &target_mask[i]);
8441     }
8442 
8443     unlock_user(target_mask, target_addr, target_size);
8444     return 0;
8445 }
8446 
8447 #ifdef TARGET_NR_getdents
8448 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8449 {
8450     g_autofree void *hdirp = NULL;
8451     void *tdirp;
8452     int hlen, hoff, toff;
8453     int hreclen, treclen;
8454     off64_t prev_diroff = 0;
8455 
8456     hdirp = g_try_malloc(count);
8457     if (!hdirp) {
8458         return -TARGET_ENOMEM;
8459     }
8460 
8461 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8462     hlen = sys_getdents(dirfd, hdirp, count);
8463 #else
8464     hlen = sys_getdents64(dirfd, hdirp, count);
8465 #endif
8466 
8467     hlen = get_errno(hlen);
8468     if (is_error(hlen)) {
8469         return hlen;
8470     }
8471 
8472     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8473     if (!tdirp) {
8474         return -TARGET_EFAULT;
8475     }
8476 
8477     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8478 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8479         struct linux_dirent *hde = hdirp + hoff;
8480 #else
8481         struct linux_dirent64 *hde = hdirp + hoff;
8482 #endif
8483         struct target_dirent *tde = tdirp + toff;
8484         int namelen;
8485         uint8_t type;
8486 
8487         namelen = strlen(hde->d_name);
8488         hreclen = hde->d_reclen;
8489         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8490         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8491 
8492         if (toff + treclen > count) {
8493             /*
8494              * If the host struct is smaller than the target struct, or
8495              * requires less alignment and thus packs into less space,
8496              * then the host can return more entries than we can pass
8497              * on to the guest.
8498              */
8499             if (toff == 0) {
8500                 toff = -TARGET_EINVAL; /* result buffer is too small */
8501                 break;
8502             }
8503             /*
8504              * Return what we have, resetting the file pointer to the
8505              * location of the first record not returned.
8506              */
8507             lseek64(dirfd, prev_diroff, SEEK_SET);
8508             break;
8509         }
8510 
8511         prev_diroff = hde->d_off;
8512         tde->d_ino = tswapal(hde->d_ino);
8513         tde->d_off = tswapal(hde->d_off);
8514         tde->d_reclen = tswap16(treclen);
8515         memcpy(tde->d_name, hde->d_name, namelen + 1);
8516 
8517         /*
8518          * The getdents type is in what was formerly a padding byte at the
8519          * end of the structure.
8520          */
8521 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8522         type = *((uint8_t *)hde + hreclen - 1);
8523 #else
8524         type = hde->d_type;
8525 #endif
8526         *((uint8_t *)tde + treclen - 1) = type;
8527     }
8528 
8529     unlock_user(tdirp, arg2, toff);
8530     return toff;
8531 }
8532 #endif /* TARGET_NR_getdents */
8533 
8534 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8535 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8536 {
8537     g_autofree void *hdirp = NULL;
8538     void *tdirp;
8539     int hlen, hoff, toff;
8540     int hreclen, treclen;
8541     off64_t prev_diroff = 0;
8542 
8543     hdirp = g_try_malloc(count);
8544     if (!hdirp) {
8545         return -TARGET_ENOMEM;
8546     }
8547 
8548     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8549     if (is_error(hlen)) {
8550         return hlen;
8551     }
8552 
8553     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8554     if (!tdirp) {
8555         return -TARGET_EFAULT;
8556     }
8557 
8558     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8559         struct linux_dirent64 *hde = hdirp + hoff;
8560         struct target_dirent64 *tde = tdirp + toff;
8561         int namelen;
8562 
8563         namelen = strlen(hde->d_name) + 1;
8564         hreclen = hde->d_reclen;
8565         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8566         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8567 
8568         if (toff + treclen > count) {
8569             /*
8570              * If the host struct is smaller than the target struct, or
8571              * requires less alignment and thus packs into less space,
8572              * then the host can return more entries than we can pass
8573              * on to the guest.
8574              */
8575             if (toff == 0) {
8576                 toff = -TARGET_EINVAL; /* result buffer is too small */
8577                 break;
8578             }
8579             /*
8580              * Return what we have, resetting the file pointer to the
8581              * location of the first record not returned.
8582              */
8583             lseek64(dirfd, prev_diroff, SEEK_SET);
8584             break;
8585         }
8586 
8587         prev_diroff = hde->d_off;
8588         tde->d_ino = tswap64(hde->d_ino);
8589         tde->d_off = tswap64(hde->d_off);
8590         tde->d_reclen = tswap16(treclen);
8591         tde->d_type = hde->d_type;
8592         memcpy(tde->d_name, hde->d_name, namelen);
8593     }
8594 
8595     unlock_user(tdirp, arg2, toff);
8596     return toff;
8597 }
8598 #endif /* TARGET_NR_getdents64 */
8599 
8600 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8601 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8602 #endif
8603 
8604 /* This is an internal helper for do_syscall so that it is easier
8605  * to have a single return point, so that actions, such as logging
8606  * of syscall results, can be performed.
8607  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8608  */
8609 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8610                             abi_long arg2, abi_long arg3, abi_long arg4,
8611                             abi_long arg5, abi_long arg6, abi_long arg7,
8612                             abi_long arg8)
8613 {
8614     CPUState *cpu = env_cpu(cpu_env);
8615     abi_long ret;
8616 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8617     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8618     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8619     || defined(TARGET_NR_statx)
8620     struct stat st;
8621 #endif
8622 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8623     || defined(TARGET_NR_fstatfs)
8624     struct statfs stfs;
8625 #endif
8626     void *p;
8627 
8628     switch(num) {
8629     case TARGET_NR_exit:
8630         /* In old applications this may be used to implement _exit(2).
8631            However in threaded applications it is used for thread termination,
8632            and _exit_group is used for application termination.
8633            Do thread termination if we have more then one thread.  */
8634 
8635         if (block_signals()) {
8636             return -QEMU_ERESTARTSYS;
8637         }
8638 
8639         pthread_mutex_lock(&clone_lock);
8640 
8641         if (CPU_NEXT(first_cpu)) {
8642             TaskState *ts = cpu->opaque;
8643 
8644             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8645             object_unref(OBJECT(cpu));
8646             /*
8647              * At this point the CPU should be unrealized and removed
8648              * from cpu lists. We can clean-up the rest of the thread
8649              * data without the lock held.
8650              */
8651 
8652             pthread_mutex_unlock(&clone_lock);
8653 
8654             if (ts->child_tidptr) {
8655                 put_user_u32(0, ts->child_tidptr);
8656                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8657                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8658             }
8659             thread_cpu = NULL;
8660             g_free(ts);
8661             rcu_unregister_thread();
8662             pthread_exit(NULL);
8663         }
8664 
8665         pthread_mutex_unlock(&clone_lock);
8666         preexit_cleanup(cpu_env, arg1);
8667         _exit(arg1);
8668         return 0; /* avoid warning */
8669     case TARGET_NR_read:
8670         if (arg2 == 0 && arg3 == 0) {
8671             return get_errno(safe_read(arg1, 0, 0));
8672         } else {
8673             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8674                 return -TARGET_EFAULT;
8675             ret = get_errno(safe_read(arg1, p, arg3));
8676             if (ret >= 0 &&
8677                 fd_trans_host_to_target_data(arg1)) {
8678                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8679             }
8680             unlock_user(p, arg2, ret);
8681         }
8682         return ret;
8683     case TARGET_NR_write:
8684         if (arg2 == 0 && arg3 == 0) {
8685             return get_errno(safe_write(arg1, 0, 0));
8686         }
8687         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8688             return -TARGET_EFAULT;
8689         if (fd_trans_target_to_host_data(arg1)) {
8690             void *copy = g_malloc(arg3);
8691             memcpy(copy, p, arg3);
8692             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8693             if (ret >= 0) {
8694                 ret = get_errno(safe_write(arg1, copy, ret));
8695             }
8696             g_free(copy);
8697         } else {
8698             ret = get_errno(safe_write(arg1, p, arg3));
8699         }
8700         unlock_user(p, arg2, 0);
8701         return ret;
8702 
8703 #ifdef TARGET_NR_open
8704     case TARGET_NR_open:
8705         if (!(p = lock_user_string(arg1)))
8706             return -TARGET_EFAULT;
8707         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8708                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8709                                   arg3));
8710         fd_trans_unregister(ret);
8711         unlock_user(p, arg1, 0);
8712         return ret;
8713 #endif
8714     case TARGET_NR_openat:
8715         if (!(p = lock_user_string(arg2)))
8716             return -TARGET_EFAULT;
8717         ret = get_errno(do_openat(cpu_env, arg1, p,
8718                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8719                                   arg4));
8720         fd_trans_unregister(ret);
8721         unlock_user(p, arg2, 0);
8722         return ret;
8723 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8724     case TARGET_NR_name_to_handle_at:
8725         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8726         return ret;
8727 #endif
8728 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8729     case TARGET_NR_open_by_handle_at:
8730         ret = do_open_by_handle_at(arg1, arg2, arg3);
8731         fd_trans_unregister(ret);
8732         return ret;
8733 #endif
8734 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8735     case TARGET_NR_pidfd_open:
8736         return get_errno(pidfd_open(arg1, arg2));
8737 #endif
8738 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8739     case TARGET_NR_pidfd_send_signal:
8740         {
8741             siginfo_t uinfo, *puinfo;
8742 
8743             if (arg3) {
8744                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8745                 if (!p) {
8746                     return -TARGET_EFAULT;
8747                  }
8748                  target_to_host_siginfo(&uinfo, p);
8749                  unlock_user(p, arg3, 0);
8750                  puinfo = &uinfo;
8751             } else {
8752                  puinfo = NULL;
8753             }
8754             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8755                                               puinfo, arg4));
8756         }
8757         return ret;
8758 #endif
8759 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8760     case TARGET_NR_pidfd_getfd:
8761         return get_errno(pidfd_getfd(arg1, arg2, arg3));
8762 #endif
8763     case TARGET_NR_close:
8764         fd_trans_unregister(arg1);
8765         return get_errno(close(arg1));
8766 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8767     case TARGET_NR_close_range:
8768         ret = get_errno(sys_close_range(arg1, arg2, arg3));
8769         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
8770             abi_long fd, maxfd;
8771             maxfd = MIN(arg2, target_fd_max);
8772             for (fd = arg1; fd < maxfd; fd++) {
8773                 fd_trans_unregister(fd);
8774             }
8775         }
8776         return ret;
8777 #endif
8778 
8779     case TARGET_NR_brk:
8780         return do_brk(arg1);
8781 #ifdef TARGET_NR_fork
8782     case TARGET_NR_fork:
8783         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8784 #endif
8785 #ifdef TARGET_NR_waitpid
8786     case TARGET_NR_waitpid:
8787         {
8788             int status;
8789             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8790             if (!is_error(ret) && arg2 && ret
8791                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8792                 return -TARGET_EFAULT;
8793         }
8794         return ret;
8795 #endif
8796 #ifdef TARGET_NR_waitid
8797     case TARGET_NR_waitid:
8798         {
8799             siginfo_t info;
8800             info.si_pid = 0;
8801             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8802             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8803                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8804                     return -TARGET_EFAULT;
8805                 host_to_target_siginfo(p, &info);
8806                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8807             }
8808         }
8809         return ret;
8810 #endif
8811 #ifdef TARGET_NR_creat /* not on alpha */
8812     case TARGET_NR_creat:
8813         if (!(p = lock_user_string(arg1)))
8814             return -TARGET_EFAULT;
8815         ret = get_errno(creat(p, arg2));
8816         fd_trans_unregister(ret);
8817         unlock_user(p, arg1, 0);
8818         return ret;
8819 #endif
8820 #ifdef TARGET_NR_link
8821     case TARGET_NR_link:
8822         {
8823             void * p2;
8824             p = lock_user_string(arg1);
8825             p2 = lock_user_string(arg2);
8826             if (!p || !p2)
8827                 ret = -TARGET_EFAULT;
8828             else
8829                 ret = get_errno(link(p, p2));
8830             unlock_user(p2, arg2, 0);
8831             unlock_user(p, arg1, 0);
8832         }
8833         return ret;
8834 #endif
8835 #if defined(TARGET_NR_linkat)
8836     case TARGET_NR_linkat:
8837         {
8838             void * p2 = NULL;
8839             if (!arg2 || !arg4)
8840                 return -TARGET_EFAULT;
8841             p  = lock_user_string(arg2);
8842             p2 = lock_user_string(arg4);
8843             if (!p || !p2)
8844                 ret = -TARGET_EFAULT;
8845             else
8846                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8847             unlock_user(p, arg2, 0);
8848             unlock_user(p2, arg4, 0);
8849         }
8850         return ret;
8851 #endif
8852 #ifdef TARGET_NR_unlink
8853     case TARGET_NR_unlink:
8854         if (!(p = lock_user_string(arg1)))
8855             return -TARGET_EFAULT;
8856         ret = get_errno(unlink(p));
8857         unlock_user(p, arg1, 0);
8858         return ret;
8859 #endif
8860 #if defined(TARGET_NR_unlinkat)
8861     case TARGET_NR_unlinkat:
8862         if (!(p = lock_user_string(arg2)))
8863             return -TARGET_EFAULT;
8864         ret = get_errno(unlinkat(arg1, p, arg3));
8865         unlock_user(p, arg2, 0);
8866         return ret;
8867 #endif
8868     case TARGET_NR_execve:
8869         {
8870             char **argp, **envp;
8871             int argc, envc;
8872             abi_ulong gp;
8873             abi_ulong guest_argp;
8874             abi_ulong guest_envp;
8875             abi_ulong addr;
8876             char **q;
8877 
8878             argc = 0;
8879             guest_argp = arg2;
8880             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8881                 if (get_user_ual(addr, gp))
8882                     return -TARGET_EFAULT;
8883                 if (!addr)
8884                     break;
8885                 argc++;
8886             }
8887             envc = 0;
8888             guest_envp = arg3;
8889             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8890                 if (get_user_ual(addr, gp))
8891                     return -TARGET_EFAULT;
8892                 if (!addr)
8893                     break;
8894                 envc++;
8895             }
8896 
8897             argp = g_new0(char *, argc + 1);
8898             envp = g_new0(char *, envc + 1);
8899 
8900             for (gp = guest_argp, q = argp; gp;
8901                   gp += sizeof(abi_ulong), q++) {
8902                 if (get_user_ual(addr, gp))
8903                     goto execve_efault;
8904                 if (!addr)
8905                     break;
8906                 if (!(*q = lock_user_string(addr)))
8907                     goto execve_efault;
8908             }
8909             *q = NULL;
8910 
8911             for (gp = guest_envp, q = envp; gp;
8912                   gp += sizeof(abi_ulong), q++) {
8913                 if (get_user_ual(addr, gp))
8914                     goto execve_efault;
8915                 if (!addr)
8916                     break;
8917                 if (!(*q = lock_user_string(addr)))
8918                     goto execve_efault;
8919             }
8920             *q = NULL;
8921 
8922             if (!(p = lock_user_string(arg1)))
8923                 goto execve_efault;
8924             /* Although execve() is not an interruptible syscall it is
8925              * a special case where we must use the safe_syscall wrapper:
8926              * if we allow a signal to happen before we make the host
8927              * syscall then we will 'lose' it, because at the point of
8928              * execve the process leaves QEMU's control. So we use the
8929              * safe syscall wrapper to ensure that we either take the
8930              * signal as a guest signal, or else it does not happen
8931              * before the execve completes and makes it the other
8932              * program's problem.
8933              */
8934             if (is_proc_myself(p, "exe")) {
8935                 ret = get_errno(safe_execve(exec_path, argp, envp));
8936             } else {
8937                 ret = get_errno(safe_execve(p, argp, envp));
8938             }
8939             unlock_user(p, arg1, 0);
8940 
8941             goto execve_end;
8942 
8943         execve_efault:
8944             ret = -TARGET_EFAULT;
8945 
8946         execve_end:
8947             for (gp = guest_argp, q = argp; *q;
8948                   gp += sizeof(abi_ulong), q++) {
8949                 if (get_user_ual(addr, gp)
8950                     || !addr)
8951                     break;
8952                 unlock_user(*q, addr, 0);
8953             }
8954             for (gp = guest_envp, q = envp; *q;
8955                   gp += sizeof(abi_ulong), q++) {
8956                 if (get_user_ual(addr, gp)
8957                     || !addr)
8958                     break;
8959                 unlock_user(*q, addr, 0);
8960             }
8961 
8962             g_free(argp);
8963             g_free(envp);
8964         }
8965         return ret;
8966     case TARGET_NR_chdir:
8967         if (!(p = lock_user_string(arg1)))
8968             return -TARGET_EFAULT;
8969         ret = get_errno(chdir(p));
8970         unlock_user(p, arg1, 0);
8971         return ret;
8972 #ifdef TARGET_NR_time
8973     case TARGET_NR_time:
8974         {
8975             time_t host_time;
8976             ret = get_errno(time(&host_time));
8977             if (!is_error(ret)
8978                 && arg1
8979                 && put_user_sal(host_time, arg1))
8980                 return -TARGET_EFAULT;
8981         }
8982         return ret;
8983 #endif
8984 #ifdef TARGET_NR_mknod
8985     case TARGET_NR_mknod:
8986         if (!(p = lock_user_string(arg1)))
8987             return -TARGET_EFAULT;
8988         ret = get_errno(mknod(p, arg2, arg3));
8989         unlock_user(p, arg1, 0);
8990         return ret;
8991 #endif
8992 #if defined(TARGET_NR_mknodat)
8993     case TARGET_NR_mknodat:
8994         if (!(p = lock_user_string(arg2)))
8995             return -TARGET_EFAULT;
8996         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8997         unlock_user(p, arg2, 0);
8998         return ret;
8999 #endif
9000 #ifdef TARGET_NR_chmod
9001     case TARGET_NR_chmod:
9002         if (!(p = lock_user_string(arg1)))
9003             return -TARGET_EFAULT;
9004         ret = get_errno(chmod(p, arg2));
9005         unlock_user(p, arg1, 0);
9006         return ret;
9007 #endif
9008 #ifdef TARGET_NR_lseek
9009     case TARGET_NR_lseek:
9010         return get_errno(lseek(arg1, arg2, arg3));
9011 #endif
9012 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9013     /* Alpha specific */
9014     case TARGET_NR_getxpid:
9015         cpu_env->ir[IR_A4] = getppid();
9016         return get_errno(getpid());
9017 #endif
9018 #ifdef TARGET_NR_getpid
9019     case TARGET_NR_getpid:
9020         return get_errno(getpid());
9021 #endif
9022     case TARGET_NR_mount:
9023         {
9024             /* need to look at the data field */
9025             void *p2, *p3;
9026 
9027             if (arg1) {
9028                 p = lock_user_string(arg1);
9029                 if (!p) {
9030                     return -TARGET_EFAULT;
9031                 }
9032             } else {
9033                 p = NULL;
9034             }
9035 
9036             p2 = lock_user_string(arg2);
9037             if (!p2) {
9038                 if (arg1) {
9039                     unlock_user(p, arg1, 0);
9040                 }
9041                 return -TARGET_EFAULT;
9042             }
9043 
9044             if (arg3) {
9045                 p3 = lock_user_string(arg3);
9046                 if (!p3) {
9047                     if (arg1) {
9048                         unlock_user(p, arg1, 0);
9049                     }
9050                     unlock_user(p2, arg2, 0);
9051                     return -TARGET_EFAULT;
9052                 }
9053             } else {
9054                 p3 = NULL;
9055             }
9056 
9057             /* FIXME - arg5 should be locked, but it isn't clear how to
9058              * do that since it's not guaranteed to be a NULL-terminated
9059              * string.
9060              */
9061             if (!arg5) {
9062                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9063             } else {
9064                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9065             }
9066             ret = get_errno(ret);
9067 
9068             if (arg1) {
9069                 unlock_user(p, arg1, 0);
9070             }
9071             unlock_user(p2, arg2, 0);
9072             if (arg3) {
9073                 unlock_user(p3, arg3, 0);
9074             }
9075         }
9076         return ret;
9077 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9078 #if defined(TARGET_NR_umount)
9079     case TARGET_NR_umount:
9080 #endif
9081 #if defined(TARGET_NR_oldumount)
9082     case TARGET_NR_oldumount:
9083 #endif
9084         if (!(p = lock_user_string(arg1)))
9085             return -TARGET_EFAULT;
9086         ret = get_errno(umount(p));
9087         unlock_user(p, arg1, 0);
9088         return ret;
9089 #endif
9090 #ifdef TARGET_NR_stime /* not on alpha */
9091     case TARGET_NR_stime:
9092         {
9093             struct timespec ts;
9094             ts.tv_nsec = 0;
9095             if (get_user_sal(ts.tv_sec, arg1)) {
9096                 return -TARGET_EFAULT;
9097             }
9098             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9099         }
9100 #endif
9101 #ifdef TARGET_NR_alarm /* not on alpha */
9102     case TARGET_NR_alarm:
9103         return alarm(arg1);
9104 #endif
9105 #ifdef TARGET_NR_pause /* not on alpha */
9106     case TARGET_NR_pause:
9107         if (!block_signals()) {
9108             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9109         }
9110         return -TARGET_EINTR;
9111 #endif
9112 #ifdef TARGET_NR_utime
9113     case TARGET_NR_utime:
9114         {
9115             struct utimbuf tbuf, *host_tbuf;
9116             struct target_utimbuf *target_tbuf;
9117             if (arg2) {
9118                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9119                     return -TARGET_EFAULT;
9120                 tbuf.actime = tswapal(target_tbuf->actime);
9121                 tbuf.modtime = tswapal(target_tbuf->modtime);
9122                 unlock_user_struct(target_tbuf, arg2, 0);
9123                 host_tbuf = &tbuf;
9124             } else {
9125                 host_tbuf = NULL;
9126             }
9127             if (!(p = lock_user_string(arg1)))
9128                 return -TARGET_EFAULT;
9129             ret = get_errno(utime(p, host_tbuf));
9130             unlock_user(p, arg1, 0);
9131         }
9132         return ret;
9133 #endif
9134 #ifdef TARGET_NR_utimes
9135     case TARGET_NR_utimes:
9136         {
9137             struct timeval *tvp, tv[2];
9138             if (arg2) {
9139                 if (copy_from_user_timeval(&tv[0], arg2)
9140                     || copy_from_user_timeval(&tv[1],
9141                                               arg2 + sizeof(struct target_timeval)))
9142                     return -TARGET_EFAULT;
9143                 tvp = tv;
9144             } else {
9145                 tvp = NULL;
9146             }
9147             if (!(p = lock_user_string(arg1)))
9148                 return -TARGET_EFAULT;
9149             ret = get_errno(utimes(p, tvp));
9150             unlock_user(p, arg1, 0);
9151         }
9152         return ret;
9153 #endif
9154 #if defined(TARGET_NR_futimesat)
9155     case TARGET_NR_futimesat:
9156         {
9157             struct timeval *tvp, tv[2];
9158             if (arg3) {
9159                 if (copy_from_user_timeval(&tv[0], arg3)
9160                     || copy_from_user_timeval(&tv[1],
9161                                               arg3 + sizeof(struct target_timeval)))
9162                     return -TARGET_EFAULT;
9163                 tvp = tv;
9164             } else {
9165                 tvp = NULL;
9166             }
9167             if (!(p = lock_user_string(arg2))) {
9168                 return -TARGET_EFAULT;
9169             }
9170             ret = get_errno(futimesat(arg1, path(p), tvp));
9171             unlock_user(p, arg2, 0);
9172         }
9173         return ret;
9174 #endif
9175 #ifdef TARGET_NR_access
9176     case TARGET_NR_access:
9177         if (!(p = lock_user_string(arg1))) {
9178             return -TARGET_EFAULT;
9179         }
9180         ret = get_errno(access(path(p), arg2));
9181         unlock_user(p, arg1, 0);
9182         return ret;
9183 #endif
9184 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9185     case TARGET_NR_faccessat:
9186         if (!(p = lock_user_string(arg2))) {
9187             return -TARGET_EFAULT;
9188         }
9189         ret = get_errno(faccessat(arg1, p, arg3, 0));
9190         unlock_user(p, arg2, 0);
9191         return ret;
9192 #endif
9193 #if defined(TARGET_NR_faccessat2)
9194     case TARGET_NR_faccessat2:
9195         if (!(p = lock_user_string(arg2))) {
9196             return -TARGET_EFAULT;
9197         }
9198         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9199         unlock_user(p, arg2, 0);
9200         return ret;
9201 #endif
9202 #ifdef TARGET_NR_nice /* not on alpha */
9203     case TARGET_NR_nice:
9204         return get_errno(nice(arg1));
9205 #endif
9206     case TARGET_NR_sync:
9207         sync();
9208         return 0;
9209 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9210     case TARGET_NR_syncfs:
9211         return get_errno(syncfs(arg1));
9212 #endif
9213     case TARGET_NR_kill:
9214         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9215 #ifdef TARGET_NR_rename
9216     case TARGET_NR_rename:
9217         {
9218             void *p2;
9219             p = lock_user_string(arg1);
9220             p2 = lock_user_string(arg2);
9221             if (!p || !p2)
9222                 ret = -TARGET_EFAULT;
9223             else
9224                 ret = get_errno(rename(p, p2));
9225             unlock_user(p2, arg2, 0);
9226             unlock_user(p, arg1, 0);
9227         }
9228         return ret;
9229 #endif
9230 #if defined(TARGET_NR_renameat)
9231     case TARGET_NR_renameat:
9232         {
9233             void *p2;
9234             p  = lock_user_string(arg2);
9235             p2 = lock_user_string(arg4);
9236             if (!p || !p2)
9237                 ret = -TARGET_EFAULT;
9238             else
9239                 ret = get_errno(renameat(arg1, p, arg3, p2));
9240             unlock_user(p2, arg4, 0);
9241             unlock_user(p, arg2, 0);
9242         }
9243         return ret;
9244 #endif
9245 #if defined(TARGET_NR_renameat2)
9246     case TARGET_NR_renameat2:
9247         {
9248             void *p2;
9249             p  = lock_user_string(arg2);
9250             p2 = lock_user_string(arg4);
9251             if (!p || !p2) {
9252                 ret = -TARGET_EFAULT;
9253             } else {
9254                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9255             }
9256             unlock_user(p2, arg4, 0);
9257             unlock_user(p, arg2, 0);
9258         }
9259         return ret;
9260 #endif
9261 #ifdef TARGET_NR_mkdir
9262     case TARGET_NR_mkdir:
9263         if (!(p = lock_user_string(arg1)))
9264             return -TARGET_EFAULT;
9265         ret = get_errno(mkdir(p, arg2));
9266         unlock_user(p, arg1, 0);
9267         return ret;
9268 #endif
9269 #if defined(TARGET_NR_mkdirat)
9270     case TARGET_NR_mkdirat:
9271         if (!(p = lock_user_string(arg2)))
9272             return -TARGET_EFAULT;
9273         ret = get_errno(mkdirat(arg1, p, arg3));
9274         unlock_user(p, arg2, 0);
9275         return ret;
9276 #endif
9277 #ifdef TARGET_NR_rmdir
9278     case TARGET_NR_rmdir:
9279         if (!(p = lock_user_string(arg1)))
9280             return -TARGET_EFAULT;
9281         ret = get_errno(rmdir(p));
9282         unlock_user(p, arg1, 0);
9283         return ret;
9284 #endif
9285     case TARGET_NR_dup:
9286         ret = get_errno(dup(arg1));
9287         if (ret >= 0) {
9288             fd_trans_dup(arg1, ret);
9289         }
9290         return ret;
9291 #ifdef TARGET_NR_pipe
9292     case TARGET_NR_pipe:
9293         return do_pipe(cpu_env, arg1, 0, 0);
9294 #endif
9295 #ifdef TARGET_NR_pipe2
9296     case TARGET_NR_pipe2:
9297         return do_pipe(cpu_env, arg1,
9298                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9299 #endif
9300     case TARGET_NR_times:
9301         {
9302             struct target_tms *tmsp;
9303             struct tms tms;
9304             ret = get_errno(times(&tms));
9305             if (arg1) {
9306                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9307                 if (!tmsp)
9308                     return -TARGET_EFAULT;
9309                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9310                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9311                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9312                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9313             }
9314             if (!is_error(ret))
9315                 ret = host_to_target_clock_t(ret);
9316         }
9317         return ret;
9318     case TARGET_NR_acct:
9319         if (arg1 == 0) {
9320             ret = get_errno(acct(NULL));
9321         } else {
9322             if (!(p = lock_user_string(arg1))) {
9323                 return -TARGET_EFAULT;
9324             }
9325             ret = get_errno(acct(path(p)));
9326             unlock_user(p, arg1, 0);
9327         }
9328         return ret;
9329 #ifdef TARGET_NR_umount2
9330     case TARGET_NR_umount2:
9331         if (!(p = lock_user_string(arg1)))
9332             return -TARGET_EFAULT;
9333         ret = get_errno(umount2(p, arg2));
9334         unlock_user(p, arg1, 0);
9335         return ret;
9336 #endif
9337     case TARGET_NR_ioctl:
9338         return do_ioctl(arg1, arg2, arg3);
9339 #ifdef TARGET_NR_fcntl
9340     case TARGET_NR_fcntl:
9341         return do_fcntl(arg1, arg2, arg3);
9342 #endif
9343     case TARGET_NR_setpgid:
9344         return get_errno(setpgid(arg1, arg2));
9345     case TARGET_NR_umask:
9346         return get_errno(umask(arg1));
9347     case TARGET_NR_chroot:
9348         if (!(p = lock_user_string(arg1)))
9349             return -TARGET_EFAULT;
9350         ret = get_errno(chroot(p));
9351         unlock_user(p, arg1, 0);
9352         return ret;
9353 #ifdef TARGET_NR_dup2
9354     case TARGET_NR_dup2:
9355         ret = get_errno(dup2(arg1, arg2));
9356         if (ret >= 0) {
9357             fd_trans_dup(arg1, arg2);
9358         }
9359         return ret;
9360 #endif
9361 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9362     case TARGET_NR_dup3:
9363     {
9364         int host_flags;
9365 
9366         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9367             return -EINVAL;
9368         }
9369         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9370         ret = get_errno(dup3(arg1, arg2, host_flags));
9371         if (ret >= 0) {
9372             fd_trans_dup(arg1, arg2);
9373         }
9374         return ret;
9375     }
9376 #endif
9377 #ifdef TARGET_NR_getppid /* not on alpha */
9378     case TARGET_NR_getppid:
9379         return get_errno(getppid());
9380 #endif
9381 #ifdef TARGET_NR_getpgrp
9382     case TARGET_NR_getpgrp:
9383         return get_errno(getpgrp());
9384 #endif
9385     case TARGET_NR_setsid:
9386         return get_errno(setsid());
9387 #ifdef TARGET_NR_sigaction
9388     case TARGET_NR_sigaction:
9389         {
9390 #if defined(TARGET_MIPS)
9391 	    struct target_sigaction act, oact, *pact, *old_act;
9392 
9393 	    if (arg2) {
9394                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9395                     return -TARGET_EFAULT;
9396 		act._sa_handler = old_act->_sa_handler;
9397 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9398 		act.sa_flags = old_act->sa_flags;
9399 		unlock_user_struct(old_act, arg2, 0);
9400 		pact = &act;
9401 	    } else {
9402 		pact = NULL;
9403 	    }
9404 
9405         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9406 
9407 	    if (!is_error(ret) && arg3) {
9408                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9409                     return -TARGET_EFAULT;
9410 		old_act->_sa_handler = oact._sa_handler;
9411 		old_act->sa_flags = oact.sa_flags;
9412 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9413 		old_act->sa_mask.sig[1] = 0;
9414 		old_act->sa_mask.sig[2] = 0;
9415 		old_act->sa_mask.sig[3] = 0;
9416 		unlock_user_struct(old_act, arg3, 1);
9417 	    }
9418 #else
9419             struct target_old_sigaction *old_act;
9420             struct target_sigaction act, oact, *pact;
9421             if (arg2) {
9422                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9423                     return -TARGET_EFAULT;
9424                 act._sa_handler = old_act->_sa_handler;
9425                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9426                 act.sa_flags = old_act->sa_flags;
9427 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9428                 act.sa_restorer = old_act->sa_restorer;
9429 #endif
9430                 unlock_user_struct(old_act, arg2, 0);
9431                 pact = &act;
9432             } else {
9433                 pact = NULL;
9434             }
9435             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9436             if (!is_error(ret) && arg3) {
9437                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9438                     return -TARGET_EFAULT;
9439                 old_act->_sa_handler = oact._sa_handler;
9440                 old_act->sa_mask = oact.sa_mask.sig[0];
9441                 old_act->sa_flags = oact.sa_flags;
9442 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9443                 old_act->sa_restorer = oact.sa_restorer;
9444 #endif
9445                 unlock_user_struct(old_act, arg3, 1);
9446             }
9447 #endif
9448         }
9449         return ret;
9450 #endif
9451     case TARGET_NR_rt_sigaction:
9452         {
9453             /*
9454              * For Alpha and SPARC this is a 5 argument syscall, with
9455              * a 'restorer' parameter which must be copied into the
9456              * sa_restorer field of the sigaction struct.
9457              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9458              * and arg5 is the sigsetsize.
9459              */
9460 #if defined(TARGET_ALPHA)
9461             target_ulong sigsetsize = arg4;
9462             target_ulong restorer = arg5;
9463 #elif defined(TARGET_SPARC)
9464             target_ulong restorer = arg4;
9465             target_ulong sigsetsize = arg5;
9466 #else
9467             target_ulong sigsetsize = arg4;
9468             target_ulong restorer = 0;
9469 #endif
9470             struct target_sigaction *act = NULL;
9471             struct target_sigaction *oact = NULL;
9472 
9473             if (sigsetsize != sizeof(target_sigset_t)) {
9474                 return -TARGET_EINVAL;
9475             }
9476             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9477                 return -TARGET_EFAULT;
9478             }
9479             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9480                 ret = -TARGET_EFAULT;
9481             } else {
9482                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9483                 if (oact) {
9484                     unlock_user_struct(oact, arg3, 1);
9485                 }
9486             }
9487             if (act) {
9488                 unlock_user_struct(act, arg2, 0);
9489             }
9490         }
9491         return ret;
9492 #ifdef TARGET_NR_sgetmask /* not on alpha */
9493     case TARGET_NR_sgetmask:
9494         {
9495             sigset_t cur_set;
9496             abi_ulong target_set;
9497             ret = do_sigprocmask(0, NULL, &cur_set);
9498             if (!ret) {
9499                 host_to_target_old_sigset(&target_set, &cur_set);
9500                 ret = target_set;
9501             }
9502         }
9503         return ret;
9504 #endif
9505 #ifdef TARGET_NR_ssetmask /* not on alpha */
9506     case TARGET_NR_ssetmask:
9507         {
9508             sigset_t set, oset;
9509             abi_ulong target_set = arg1;
9510             target_to_host_old_sigset(&set, &target_set);
9511             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9512             if (!ret) {
9513                 host_to_target_old_sigset(&target_set, &oset);
9514                 ret = target_set;
9515             }
9516         }
9517         return ret;
9518 #endif
9519 #ifdef TARGET_NR_sigprocmask
9520     case TARGET_NR_sigprocmask:
9521         {
9522 #if defined(TARGET_ALPHA)
9523             sigset_t set, oldset;
9524             abi_ulong mask;
9525             int how;
9526 
9527             switch (arg1) {
9528             case TARGET_SIG_BLOCK:
9529                 how = SIG_BLOCK;
9530                 break;
9531             case TARGET_SIG_UNBLOCK:
9532                 how = SIG_UNBLOCK;
9533                 break;
9534             case TARGET_SIG_SETMASK:
9535                 how = SIG_SETMASK;
9536                 break;
9537             default:
9538                 return -TARGET_EINVAL;
9539             }
9540             mask = arg2;
9541             target_to_host_old_sigset(&set, &mask);
9542 
9543             ret = do_sigprocmask(how, &set, &oldset);
9544             if (!is_error(ret)) {
9545                 host_to_target_old_sigset(&mask, &oldset);
9546                 ret = mask;
9547                 cpu_env->ir[IR_V0] = 0; /* force no error */
9548             }
9549 #else
9550             sigset_t set, oldset, *set_ptr;
9551             int how;
9552 
9553             if (arg2) {
9554                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9555                 if (!p) {
9556                     return -TARGET_EFAULT;
9557                 }
9558                 target_to_host_old_sigset(&set, p);
9559                 unlock_user(p, arg2, 0);
9560                 set_ptr = &set;
9561                 switch (arg1) {
9562                 case TARGET_SIG_BLOCK:
9563                     how = SIG_BLOCK;
9564                     break;
9565                 case TARGET_SIG_UNBLOCK:
9566                     how = SIG_UNBLOCK;
9567                     break;
9568                 case TARGET_SIG_SETMASK:
9569                     how = SIG_SETMASK;
9570                     break;
9571                 default:
9572                     return -TARGET_EINVAL;
9573                 }
9574             } else {
9575                 how = 0;
9576                 set_ptr = NULL;
9577             }
9578             ret = do_sigprocmask(how, set_ptr, &oldset);
9579             if (!is_error(ret) && arg3) {
9580                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9581                     return -TARGET_EFAULT;
9582                 host_to_target_old_sigset(p, &oldset);
9583                 unlock_user(p, arg3, sizeof(target_sigset_t));
9584             }
9585 #endif
9586         }
9587         return ret;
9588 #endif
9589     case TARGET_NR_rt_sigprocmask:
9590         {
9591             int how = arg1;
9592             sigset_t set, oldset, *set_ptr;
9593 
9594             if (arg4 != sizeof(target_sigset_t)) {
9595                 return -TARGET_EINVAL;
9596             }
9597 
9598             if (arg2) {
9599                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9600                 if (!p) {
9601                     return -TARGET_EFAULT;
9602                 }
9603                 target_to_host_sigset(&set, p);
9604                 unlock_user(p, arg2, 0);
9605                 set_ptr = &set;
9606                 switch(how) {
9607                 case TARGET_SIG_BLOCK:
9608                     how = SIG_BLOCK;
9609                     break;
9610                 case TARGET_SIG_UNBLOCK:
9611                     how = SIG_UNBLOCK;
9612                     break;
9613                 case TARGET_SIG_SETMASK:
9614                     how = SIG_SETMASK;
9615                     break;
9616                 default:
9617                     return -TARGET_EINVAL;
9618                 }
9619             } else {
9620                 how = 0;
9621                 set_ptr = NULL;
9622             }
9623             ret = do_sigprocmask(how, set_ptr, &oldset);
9624             if (!is_error(ret) && arg3) {
9625                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9626                     return -TARGET_EFAULT;
9627                 host_to_target_sigset(p, &oldset);
9628                 unlock_user(p, arg3, sizeof(target_sigset_t));
9629             }
9630         }
9631         return ret;
9632 #ifdef TARGET_NR_sigpending
9633     case TARGET_NR_sigpending:
9634         {
9635             sigset_t set;
9636             ret = get_errno(sigpending(&set));
9637             if (!is_error(ret)) {
9638                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9639                     return -TARGET_EFAULT;
9640                 host_to_target_old_sigset(p, &set);
9641                 unlock_user(p, arg1, sizeof(target_sigset_t));
9642             }
9643         }
9644         return ret;
9645 #endif
9646     case TARGET_NR_rt_sigpending:
9647         {
9648             sigset_t set;
9649 
9650             /* Yes, this check is >, not != like most. We follow the kernel's
9651              * logic and it does it like this because it implements
9652              * NR_sigpending through the same code path, and in that case
9653              * the old_sigset_t is smaller in size.
9654              */
9655             if (arg2 > sizeof(target_sigset_t)) {
9656                 return -TARGET_EINVAL;
9657             }
9658 
9659             ret = get_errno(sigpending(&set));
9660             if (!is_error(ret)) {
9661                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9662                     return -TARGET_EFAULT;
9663                 host_to_target_sigset(p, &set);
9664                 unlock_user(p, arg1, sizeof(target_sigset_t));
9665             }
9666         }
9667         return ret;
9668 #ifdef TARGET_NR_sigsuspend
9669     case TARGET_NR_sigsuspend:
9670         {
9671             sigset_t *set;
9672 
9673 #if defined(TARGET_ALPHA)
9674             TaskState *ts = cpu->opaque;
9675             /* target_to_host_old_sigset will bswap back */
9676             abi_ulong mask = tswapal(arg1);
9677             set = &ts->sigsuspend_mask;
9678             target_to_host_old_sigset(set, &mask);
9679 #else
9680             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9681             if (ret != 0) {
9682                 return ret;
9683             }
9684 #endif
9685             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9686             finish_sigsuspend_mask(ret);
9687         }
9688         return ret;
9689 #endif
9690     case TARGET_NR_rt_sigsuspend:
9691         {
9692             sigset_t *set;
9693 
9694             ret = process_sigsuspend_mask(&set, arg1, arg2);
9695             if (ret != 0) {
9696                 return ret;
9697             }
9698             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9699             finish_sigsuspend_mask(ret);
9700         }
9701         return ret;
9702 #ifdef TARGET_NR_rt_sigtimedwait
9703     case TARGET_NR_rt_sigtimedwait:
9704         {
9705             sigset_t set;
9706             struct timespec uts, *puts;
9707             siginfo_t uinfo;
9708 
9709             if (arg4 != sizeof(target_sigset_t)) {
9710                 return -TARGET_EINVAL;
9711             }
9712 
9713             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9714                 return -TARGET_EFAULT;
9715             target_to_host_sigset(&set, p);
9716             unlock_user(p, arg1, 0);
9717             if (arg3) {
9718                 puts = &uts;
9719                 if (target_to_host_timespec(puts, arg3)) {
9720                     return -TARGET_EFAULT;
9721                 }
9722             } else {
9723                 puts = NULL;
9724             }
9725             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9726                                                  SIGSET_T_SIZE));
9727             if (!is_error(ret)) {
9728                 if (arg2) {
9729                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9730                                   0);
9731                     if (!p) {
9732                         return -TARGET_EFAULT;
9733                     }
9734                     host_to_target_siginfo(p, &uinfo);
9735                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9736                 }
9737                 ret = host_to_target_signal(ret);
9738             }
9739         }
9740         return ret;
9741 #endif
9742 #ifdef TARGET_NR_rt_sigtimedwait_time64
9743     case TARGET_NR_rt_sigtimedwait_time64:
9744         {
9745             sigset_t set;
9746             struct timespec uts, *puts;
9747             siginfo_t uinfo;
9748 
9749             if (arg4 != sizeof(target_sigset_t)) {
9750                 return -TARGET_EINVAL;
9751             }
9752 
9753             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9754             if (!p) {
9755                 return -TARGET_EFAULT;
9756             }
9757             target_to_host_sigset(&set, p);
9758             unlock_user(p, arg1, 0);
9759             if (arg3) {
9760                 puts = &uts;
9761                 if (target_to_host_timespec64(puts, arg3)) {
9762                     return -TARGET_EFAULT;
9763                 }
9764             } else {
9765                 puts = NULL;
9766             }
9767             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9768                                                  SIGSET_T_SIZE));
9769             if (!is_error(ret)) {
9770                 if (arg2) {
9771                     p = lock_user(VERIFY_WRITE, arg2,
9772                                   sizeof(target_siginfo_t), 0);
9773                     if (!p) {
9774                         return -TARGET_EFAULT;
9775                     }
9776                     host_to_target_siginfo(p, &uinfo);
9777                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9778                 }
9779                 ret = host_to_target_signal(ret);
9780             }
9781         }
9782         return ret;
9783 #endif
9784     case TARGET_NR_rt_sigqueueinfo:
9785         {
9786             siginfo_t uinfo;
9787 
9788             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9789             if (!p) {
9790                 return -TARGET_EFAULT;
9791             }
9792             target_to_host_siginfo(&uinfo, p);
9793             unlock_user(p, arg3, 0);
9794             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9795         }
9796         return ret;
9797     case TARGET_NR_rt_tgsigqueueinfo:
9798         {
9799             siginfo_t uinfo;
9800 
9801             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9802             if (!p) {
9803                 return -TARGET_EFAULT;
9804             }
9805             target_to_host_siginfo(&uinfo, p);
9806             unlock_user(p, arg4, 0);
9807             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9808         }
9809         return ret;
9810 #ifdef TARGET_NR_sigreturn
9811     case TARGET_NR_sigreturn:
9812         if (block_signals()) {
9813             return -QEMU_ERESTARTSYS;
9814         }
9815         return do_sigreturn(cpu_env);
9816 #endif
9817     case TARGET_NR_rt_sigreturn:
9818         if (block_signals()) {
9819             return -QEMU_ERESTARTSYS;
9820         }
9821         return do_rt_sigreturn(cpu_env);
9822     case TARGET_NR_sethostname:
9823         if (!(p = lock_user_string(arg1)))
9824             return -TARGET_EFAULT;
9825         ret = get_errno(sethostname(p, arg2));
9826         unlock_user(p, arg1, 0);
9827         return ret;
9828 #ifdef TARGET_NR_setrlimit
9829     case TARGET_NR_setrlimit:
9830         {
9831             int resource = target_to_host_resource(arg1);
9832             struct target_rlimit *target_rlim;
9833             struct rlimit rlim;
9834             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9835                 return -TARGET_EFAULT;
9836             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9837             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9838             unlock_user_struct(target_rlim, arg2, 0);
9839             /*
9840              * If we just passed through resource limit settings for memory then
9841              * they would also apply to QEMU's own allocations, and QEMU will
9842              * crash or hang or die if its allocations fail. Ideally we would
9843              * track the guest allocations in QEMU and apply the limits ourselves.
9844              * For now, just tell the guest the call succeeded but don't actually
9845              * limit anything.
9846              */
9847             if (resource != RLIMIT_AS &&
9848                 resource != RLIMIT_DATA &&
9849                 resource != RLIMIT_STACK) {
9850                 return get_errno(setrlimit(resource, &rlim));
9851             } else {
9852                 return 0;
9853             }
9854         }
9855 #endif
9856 #ifdef TARGET_NR_getrlimit
9857     case TARGET_NR_getrlimit:
9858         {
9859             int resource = target_to_host_resource(arg1);
9860             struct target_rlimit *target_rlim;
9861             struct rlimit rlim;
9862 
9863             ret = get_errno(getrlimit(resource, &rlim));
9864             if (!is_error(ret)) {
9865                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9866                     return -TARGET_EFAULT;
9867                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9868                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9869                 unlock_user_struct(target_rlim, arg2, 1);
9870             }
9871         }
9872         return ret;
9873 #endif
9874     case TARGET_NR_getrusage:
9875         {
9876             struct rusage rusage;
9877             ret = get_errno(getrusage(arg1, &rusage));
9878             if (!is_error(ret)) {
9879                 ret = host_to_target_rusage(arg2, &rusage);
9880             }
9881         }
9882         return ret;
9883 #if defined(TARGET_NR_gettimeofday)
9884     case TARGET_NR_gettimeofday:
9885         {
9886             struct timeval tv;
9887             struct timezone tz;
9888 
9889             ret = get_errno(gettimeofday(&tv, &tz));
9890             if (!is_error(ret)) {
9891                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9892                     return -TARGET_EFAULT;
9893                 }
9894                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9895                     return -TARGET_EFAULT;
9896                 }
9897             }
9898         }
9899         return ret;
9900 #endif
9901 #if defined(TARGET_NR_settimeofday)
9902     case TARGET_NR_settimeofday:
9903         {
9904             struct timeval tv, *ptv = NULL;
9905             struct timezone tz, *ptz = NULL;
9906 
9907             if (arg1) {
9908                 if (copy_from_user_timeval(&tv, arg1)) {
9909                     return -TARGET_EFAULT;
9910                 }
9911                 ptv = &tv;
9912             }
9913 
9914             if (arg2) {
9915                 if (copy_from_user_timezone(&tz, arg2)) {
9916                     return -TARGET_EFAULT;
9917                 }
9918                 ptz = &tz;
9919             }
9920 
9921             return get_errno(settimeofday(ptv, ptz));
9922         }
9923 #endif
9924 #if defined(TARGET_NR_select)
9925     case TARGET_NR_select:
9926 #if defined(TARGET_WANT_NI_OLD_SELECT)
9927         /* some architectures used to have old_select here
9928          * but now ENOSYS it.
9929          */
9930         ret = -TARGET_ENOSYS;
9931 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9932         ret = do_old_select(arg1);
9933 #else
9934         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9935 #endif
9936         return ret;
9937 #endif
9938 #ifdef TARGET_NR_pselect6
9939     case TARGET_NR_pselect6:
9940         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9941 #endif
9942 #ifdef TARGET_NR_pselect6_time64
9943     case TARGET_NR_pselect6_time64:
9944         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9945 #endif
9946 #ifdef TARGET_NR_symlink
9947     case TARGET_NR_symlink:
9948         {
9949             void *p2;
9950             p = lock_user_string(arg1);
9951             p2 = lock_user_string(arg2);
9952             if (!p || !p2)
9953                 ret = -TARGET_EFAULT;
9954             else
9955                 ret = get_errno(symlink(p, p2));
9956             unlock_user(p2, arg2, 0);
9957             unlock_user(p, arg1, 0);
9958         }
9959         return ret;
9960 #endif
9961 #if defined(TARGET_NR_symlinkat)
9962     case TARGET_NR_symlinkat:
9963         {
9964             void *p2;
9965             p  = lock_user_string(arg1);
9966             p2 = lock_user_string(arg3);
9967             if (!p || !p2)
9968                 ret = -TARGET_EFAULT;
9969             else
9970                 ret = get_errno(symlinkat(p, arg2, p2));
9971             unlock_user(p2, arg3, 0);
9972             unlock_user(p, arg1, 0);
9973         }
9974         return ret;
9975 #endif
9976 #ifdef TARGET_NR_readlink
9977     case TARGET_NR_readlink:
9978         {
9979             void *p2;
9980             p = lock_user_string(arg1);
9981             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9982             if (!p || !p2) {
9983                 ret = -TARGET_EFAULT;
9984             } else if (!arg3) {
9985                 /* Short circuit this for the magic exe check. */
9986                 ret = -TARGET_EINVAL;
9987             } else if (is_proc_myself((const char *)p, "exe")) {
9988                 char real[PATH_MAX], *temp;
9989                 temp = realpath(exec_path, real);
9990                 /* Return value is # of bytes that we wrote to the buffer. */
9991                 if (temp == NULL) {
9992                     ret = get_errno(-1);
9993                 } else {
9994                     /* Don't worry about sign mismatch as earlier mapping
9995                      * logic would have thrown a bad address error. */
9996                     ret = MIN(strlen(real), arg3);
9997                     /* We cannot NUL terminate the string. */
9998                     memcpy(p2, real, ret);
9999                 }
10000             } else {
10001                 ret = get_errno(readlink(path(p), p2, arg3));
10002             }
10003             unlock_user(p2, arg2, ret);
10004             unlock_user(p, arg1, 0);
10005         }
10006         return ret;
10007 #endif
10008 #if defined(TARGET_NR_readlinkat)
10009     case TARGET_NR_readlinkat:
10010         {
10011             void *p2;
10012             p  = lock_user_string(arg2);
10013             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10014             if (!p || !p2) {
10015                 ret = -TARGET_EFAULT;
10016             } else if (!arg4) {
10017                 /* Short circuit this for the magic exe check. */
10018                 ret = -TARGET_EINVAL;
10019             } else if (is_proc_myself((const char *)p, "exe")) {
10020                 char real[PATH_MAX], *temp;
10021                 temp = realpath(exec_path, real);
10022                 /* Return value is # of bytes that we wrote to the buffer. */
10023                 if (temp == NULL) {
10024                     ret = get_errno(-1);
10025                 } else {
10026                     /* Don't worry about sign mismatch as earlier mapping
10027                      * logic would have thrown a bad address error. */
10028                     ret = MIN(strlen(real), arg4);
10029                     /* We cannot NUL terminate the string. */
10030                     memcpy(p2, real, ret);
10031                 }
10032             } else {
10033                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10034             }
10035             unlock_user(p2, arg3, ret);
10036             unlock_user(p, arg2, 0);
10037         }
10038         return ret;
10039 #endif
10040 #ifdef TARGET_NR_swapon
10041     case TARGET_NR_swapon:
10042         if (!(p = lock_user_string(arg1)))
10043             return -TARGET_EFAULT;
10044         ret = get_errno(swapon(p, arg2));
10045         unlock_user(p, arg1, 0);
10046         return ret;
10047 #endif
10048     case TARGET_NR_reboot:
10049         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10050            /* arg4 must be ignored in all other cases */
10051            p = lock_user_string(arg4);
10052            if (!p) {
10053                return -TARGET_EFAULT;
10054            }
10055            ret = get_errno(reboot(arg1, arg2, arg3, p));
10056            unlock_user(p, arg4, 0);
10057         } else {
10058            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10059         }
10060         return ret;
10061 #ifdef TARGET_NR_mmap
10062     case TARGET_NR_mmap:
10063 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10064     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10065     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10066     || defined(TARGET_S390X)
10067         {
10068             abi_ulong *v;
10069             abi_ulong v1, v2, v3, v4, v5, v6;
10070             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10071                 return -TARGET_EFAULT;
10072             v1 = tswapal(v[0]);
10073             v2 = tswapal(v[1]);
10074             v3 = tswapal(v[2]);
10075             v4 = tswapal(v[3]);
10076             v5 = tswapal(v[4]);
10077             v6 = tswapal(v[5]);
10078             unlock_user(v, arg1, 0);
10079             ret = get_errno(target_mmap(v1, v2, v3,
10080                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10081                                         v5, v6));
10082         }
10083 #else
10084         /* mmap pointers are always untagged */
10085         ret = get_errno(target_mmap(arg1, arg2, arg3,
10086                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10087                                     arg5,
10088                                     arg6));
10089 #endif
10090         return ret;
10091 #endif
10092 #ifdef TARGET_NR_mmap2
10093     case TARGET_NR_mmap2:
10094 #ifndef MMAP_SHIFT
10095 #define MMAP_SHIFT 12
10096 #endif
10097         ret = target_mmap(arg1, arg2, arg3,
10098                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10099                           arg5, arg6 << MMAP_SHIFT);
10100         return get_errno(ret);
10101 #endif
10102     case TARGET_NR_munmap:
10103         arg1 = cpu_untagged_addr(cpu, arg1);
10104         return get_errno(target_munmap(arg1, arg2));
10105     case TARGET_NR_mprotect:
10106         arg1 = cpu_untagged_addr(cpu, arg1);
10107         {
10108             TaskState *ts = cpu->opaque;
10109             /* Special hack to detect libc making the stack executable.  */
10110             if ((arg3 & PROT_GROWSDOWN)
10111                 && arg1 >= ts->info->stack_limit
10112                 && arg1 <= ts->info->start_stack) {
10113                 arg3 &= ~PROT_GROWSDOWN;
10114                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10115                 arg1 = ts->info->stack_limit;
10116             }
10117         }
10118         return get_errno(target_mprotect(arg1, arg2, arg3));
10119 #ifdef TARGET_NR_mremap
10120     case TARGET_NR_mremap:
10121         arg1 = cpu_untagged_addr(cpu, arg1);
10122         /* mremap new_addr (arg5) is always untagged */
10123         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10124 #endif
10125         /* ??? msync/mlock/munlock are broken for softmmu.  */
10126 #ifdef TARGET_NR_msync
10127     case TARGET_NR_msync:
10128         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10129 #endif
10130 #ifdef TARGET_NR_mlock
10131     case TARGET_NR_mlock:
10132         return get_errno(mlock(g2h(cpu, arg1), arg2));
10133 #endif
10134 #ifdef TARGET_NR_munlock
10135     case TARGET_NR_munlock:
10136         return get_errno(munlock(g2h(cpu, arg1), arg2));
10137 #endif
10138 #ifdef TARGET_NR_mlockall
10139     case TARGET_NR_mlockall:
10140         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10141 #endif
10142 #ifdef TARGET_NR_munlockall
10143     case TARGET_NR_munlockall:
10144         return get_errno(munlockall());
10145 #endif
10146 #ifdef TARGET_NR_truncate
10147     case TARGET_NR_truncate:
10148         if (!(p = lock_user_string(arg1)))
10149             return -TARGET_EFAULT;
10150         ret = get_errno(truncate(p, arg2));
10151         unlock_user(p, arg1, 0);
10152         return ret;
10153 #endif
10154 #ifdef TARGET_NR_ftruncate
10155     case TARGET_NR_ftruncate:
10156         return get_errno(ftruncate(arg1, arg2));
10157 #endif
10158     case TARGET_NR_fchmod:
10159         return get_errno(fchmod(arg1, arg2));
10160 #if defined(TARGET_NR_fchmodat)
10161     case TARGET_NR_fchmodat:
10162         if (!(p = lock_user_string(arg2)))
10163             return -TARGET_EFAULT;
10164         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10165         unlock_user(p, arg2, 0);
10166         return ret;
10167 #endif
10168     case TARGET_NR_getpriority:
10169         /* Note that negative values are valid for getpriority, so we must
10170            differentiate based on errno settings.  */
10171         errno = 0;
10172         ret = getpriority(arg1, arg2);
10173         if (ret == -1 && errno != 0) {
10174             return -host_to_target_errno(errno);
10175         }
10176 #ifdef TARGET_ALPHA
10177         /* Return value is the unbiased priority.  Signal no error.  */
10178         cpu_env->ir[IR_V0] = 0;
10179 #else
10180         /* Return value is a biased priority to avoid negative numbers.  */
10181         ret = 20 - ret;
10182 #endif
10183         return ret;
10184     case TARGET_NR_setpriority:
10185         return get_errno(setpriority(arg1, arg2, arg3));
10186 #ifdef TARGET_NR_statfs
10187     case TARGET_NR_statfs:
10188         if (!(p = lock_user_string(arg1))) {
10189             return -TARGET_EFAULT;
10190         }
10191         ret = get_errno(statfs(path(p), &stfs));
10192         unlock_user(p, arg1, 0);
10193     convert_statfs:
10194         if (!is_error(ret)) {
10195             struct target_statfs *target_stfs;
10196 
10197             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10198                 return -TARGET_EFAULT;
10199             __put_user(stfs.f_type, &target_stfs->f_type);
10200             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10201             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10202             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10203             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10204             __put_user(stfs.f_files, &target_stfs->f_files);
10205             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10206             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10207             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10208             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10209             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10210 #ifdef _STATFS_F_FLAGS
10211             __put_user(stfs.f_flags, &target_stfs->f_flags);
10212 #else
10213             __put_user(0, &target_stfs->f_flags);
10214 #endif
10215             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10216             unlock_user_struct(target_stfs, arg2, 1);
10217         }
10218         return ret;
10219 #endif
10220 #ifdef TARGET_NR_fstatfs
10221     case TARGET_NR_fstatfs:
10222         ret = get_errno(fstatfs(arg1, &stfs));
10223         goto convert_statfs;
10224 #endif
10225 #ifdef TARGET_NR_statfs64
10226     case TARGET_NR_statfs64:
10227         if (!(p = lock_user_string(arg1))) {
10228             return -TARGET_EFAULT;
10229         }
10230         ret = get_errno(statfs(path(p), &stfs));
10231         unlock_user(p, arg1, 0);
10232     convert_statfs64:
10233         if (!is_error(ret)) {
10234             struct target_statfs64 *target_stfs;
10235 
10236             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10237                 return -TARGET_EFAULT;
10238             __put_user(stfs.f_type, &target_stfs->f_type);
10239             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10240             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10241             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10242             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10243             __put_user(stfs.f_files, &target_stfs->f_files);
10244             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10245             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10246             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10247             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10248             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10249 #ifdef _STATFS_F_FLAGS
10250             __put_user(stfs.f_flags, &target_stfs->f_flags);
10251 #else
10252             __put_user(0, &target_stfs->f_flags);
10253 #endif
10254             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10255             unlock_user_struct(target_stfs, arg3, 1);
10256         }
10257         return ret;
10258     case TARGET_NR_fstatfs64:
10259         ret = get_errno(fstatfs(arg1, &stfs));
10260         goto convert_statfs64;
10261 #endif
10262 #ifdef TARGET_NR_socketcall
10263     case TARGET_NR_socketcall:
10264         return do_socketcall(arg1, arg2);
10265 #endif
10266 #ifdef TARGET_NR_accept
10267     case TARGET_NR_accept:
10268         return do_accept4(arg1, arg2, arg3, 0);
10269 #endif
10270 #ifdef TARGET_NR_accept4
10271     case TARGET_NR_accept4:
10272         return do_accept4(arg1, arg2, arg3, arg4);
10273 #endif
10274 #ifdef TARGET_NR_bind
10275     case TARGET_NR_bind:
10276         return do_bind(arg1, arg2, arg3);
10277 #endif
10278 #ifdef TARGET_NR_connect
10279     case TARGET_NR_connect:
10280         return do_connect(arg1, arg2, arg3);
10281 #endif
10282 #ifdef TARGET_NR_getpeername
10283     case TARGET_NR_getpeername:
10284         return do_getpeername(arg1, arg2, arg3);
10285 #endif
10286 #ifdef TARGET_NR_getsockname
10287     case TARGET_NR_getsockname:
10288         return do_getsockname(arg1, arg2, arg3);
10289 #endif
10290 #ifdef TARGET_NR_getsockopt
10291     case TARGET_NR_getsockopt:
10292         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10293 #endif
10294 #ifdef TARGET_NR_listen
10295     case TARGET_NR_listen:
10296         return get_errno(listen(arg1, arg2));
10297 #endif
10298 #ifdef TARGET_NR_recv
10299     case TARGET_NR_recv:
10300         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10301 #endif
10302 #ifdef TARGET_NR_recvfrom
10303     case TARGET_NR_recvfrom:
10304         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10305 #endif
10306 #ifdef TARGET_NR_recvmsg
10307     case TARGET_NR_recvmsg:
10308         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10309 #endif
10310 #ifdef TARGET_NR_send
10311     case TARGET_NR_send:
10312         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10313 #endif
10314 #ifdef TARGET_NR_sendmsg
10315     case TARGET_NR_sendmsg:
10316         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10317 #endif
10318 #ifdef TARGET_NR_sendmmsg
10319     case TARGET_NR_sendmmsg:
10320         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10321 #endif
10322 #ifdef TARGET_NR_recvmmsg
10323     case TARGET_NR_recvmmsg:
10324         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10325 #endif
10326 #ifdef TARGET_NR_sendto
10327     case TARGET_NR_sendto:
10328         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10329 #endif
10330 #ifdef TARGET_NR_shutdown
10331     case TARGET_NR_shutdown:
10332         return get_errno(shutdown(arg1, arg2));
10333 #endif
10334 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10335     case TARGET_NR_getrandom:
10336         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10337         if (!p) {
10338             return -TARGET_EFAULT;
10339         }
10340         ret = get_errno(getrandom(p, arg2, arg3));
10341         unlock_user(p, arg1, ret);
10342         return ret;
10343 #endif
10344 #ifdef TARGET_NR_socket
10345     case TARGET_NR_socket:
10346         return do_socket(arg1, arg2, arg3);
10347 #endif
10348 #ifdef TARGET_NR_socketpair
10349     case TARGET_NR_socketpair:
10350         return do_socketpair(arg1, arg2, arg3, arg4);
10351 #endif
10352 #ifdef TARGET_NR_setsockopt
10353     case TARGET_NR_setsockopt:
10354         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10355 #endif
10356 #if defined(TARGET_NR_syslog)
10357     case TARGET_NR_syslog:
10358         {
10359             int len = arg2;
10360 
10361             switch (arg1) {
10362             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10363             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10364             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10365             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10366             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10367             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10368             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10369             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10370                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10371             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10372             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10373             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10374                 {
10375                     if (len < 0) {
10376                         return -TARGET_EINVAL;
10377                     }
10378                     if (len == 0) {
10379                         return 0;
10380                     }
10381                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10382                     if (!p) {
10383                         return -TARGET_EFAULT;
10384                     }
10385                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10386                     unlock_user(p, arg2, arg3);
10387                 }
10388                 return ret;
10389             default:
10390                 return -TARGET_EINVAL;
10391             }
10392         }
10393         break;
10394 #endif
10395     case TARGET_NR_setitimer:
10396         {
10397             struct itimerval value, ovalue, *pvalue;
10398 
10399             if (arg2) {
10400                 pvalue = &value;
10401                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10402                     || copy_from_user_timeval(&pvalue->it_value,
10403                                               arg2 + sizeof(struct target_timeval)))
10404                     return -TARGET_EFAULT;
10405             } else {
10406                 pvalue = NULL;
10407             }
10408             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10409             if (!is_error(ret) && arg3) {
10410                 if (copy_to_user_timeval(arg3,
10411                                          &ovalue.it_interval)
10412                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10413                                             &ovalue.it_value))
10414                     return -TARGET_EFAULT;
10415             }
10416         }
10417         return ret;
10418     case TARGET_NR_getitimer:
10419         {
10420             struct itimerval value;
10421 
10422             ret = get_errno(getitimer(arg1, &value));
10423             if (!is_error(ret) && arg2) {
10424                 if (copy_to_user_timeval(arg2,
10425                                          &value.it_interval)
10426                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10427                                             &value.it_value))
10428                     return -TARGET_EFAULT;
10429             }
10430         }
10431         return ret;
10432 #ifdef TARGET_NR_stat
10433     case TARGET_NR_stat:
10434         if (!(p = lock_user_string(arg1))) {
10435             return -TARGET_EFAULT;
10436         }
10437         ret = get_errno(stat(path(p), &st));
10438         unlock_user(p, arg1, 0);
10439         goto do_stat;
10440 #endif
10441 #ifdef TARGET_NR_lstat
10442     case TARGET_NR_lstat:
10443         if (!(p = lock_user_string(arg1))) {
10444             return -TARGET_EFAULT;
10445         }
10446         ret = get_errno(lstat(path(p), &st));
10447         unlock_user(p, arg1, 0);
10448         goto do_stat;
10449 #endif
10450 #ifdef TARGET_NR_fstat
10451     case TARGET_NR_fstat:
10452         {
10453             ret = get_errno(fstat(arg1, &st));
10454 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10455         do_stat:
10456 #endif
10457             if (!is_error(ret)) {
10458                 struct target_stat *target_st;
10459 
10460                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10461                     return -TARGET_EFAULT;
10462                 memset(target_st, 0, sizeof(*target_st));
10463                 __put_user(st.st_dev, &target_st->st_dev);
10464                 __put_user(st.st_ino, &target_st->st_ino);
10465                 __put_user(st.st_mode, &target_st->st_mode);
10466                 __put_user(st.st_uid, &target_st->st_uid);
10467                 __put_user(st.st_gid, &target_st->st_gid);
10468                 __put_user(st.st_nlink, &target_st->st_nlink);
10469                 __put_user(st.st_rdev, &target_st->st_rdev);
10470                 __put_user(st.st_size, &target_st->st_size);
10471                 __put_user(st.st_blksize, &target_st->st_blksize);
10472                 __put_user(st.st_blocks, &target_st->st_blocks);
10473                 __put_user(st.st_atime, &target_st->target_st_atime);
10474                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10475                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10476 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10477                 __put_user(st.st_atim.tv_nsec,
10478                            &target_st->target_st_atime_nsec);
10479                 __put_user(st.st_mtim.tv_nsec,
10480                            &target_st->target_st_mtime_nsec);
10481                 __put_user(st.st_ctim.tv_nsec,
10482                            &target_st->target_st_ctime_nsec);
10483 #endif
10484                 unlock_user_struct(target_st, arg2, 1);
10485             }
10486         }
10487         return ret;
10488 #endif
10489     case TARGET_NR_vhangup:
10490         return get_errno(vhangup());
10491 #ifdef TARGET_NR_syscall
10492     case TARGET_NR_syscall:
10493         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10494                           arg6, arg7, arg8, 0);
10495 #endif
10496 #if defined(TARGET_NR_wait4)
10497     case TARGET_NR_wait4:
10498         {
10499             int status;
10500             abi_long status_ptr = arg2;
10501             struct rusage rusage, *rusage_ptr;
10502             abi_ulong target_rusage = arg4;
10503             abi_long rusage_err;
10504             if (target_rusage)
10505                 rusage_ptr = &rusage;
10506             else
10507                 rusage_ptr = NULL;
10508             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10509             if (!is_error(ret)) {
10510                 if (status_ptr && ret) {
10511                     status = host_to_target_waitstatus(status);
10512                     if (put_user_s32(status, status_ptr))
10513                         return -TARGET_EFAULT;
10514                 }
10515                 if (target_rusage) {
10516                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10517                     if (rusage_err) {
10518                         ret = rusage_err;
10519                     }
10520                 }
10521             }
10522         }
10523         return ret;
10524 #endif
10525 #ifdef TARGET_NR_swapoff
10526     case TARGET_NR_swapoff:
10527         if (!(p = lock_user_string(arg1)))
10528             return -TARGET_EFAULT;
10529         ret = get_errno(swapoff(p));
10530         unlock_user(p, arg1, 0);
10531         return ret;
10532 #endif
10533     case TARGET_NR_sysinfo:
10534         {
10535             struct target_sysinfo *target_value;
10536             struct sysinfo value;
10537             ret = get_errno(sysinfo(&value));
10538             if (!is_error(ret) && arg1)
10539             {
10540                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10541                     return -TARGET_EFAULT;
10542                 __put_user(value.uptime, &target_value->uptime);
10543                 __put_user(value.loads[0], &target_value->loads[0]);
10544                 __put_user(value.loads[1], &target_value->loads[1]);
10545                 __put_user(value.loads[2], &target_value->loads[2]);
10546                 __put_user(value.totalram, &target_value->totalram);
10547                 __put_user(value.freeram, &target_value->freeram);
10548                 __put_user(value.sharedram, &target_value->sharedram);
10549                 __put_user(value.bufferram, &target_value->bufferram);
10550                 __put_user(value.totalswap, &target_value->totalswap);
10551                 __put_user(value.freeswap, &target_value->freeswap);
10552                 __put_user(value.procs, &target_value->procs);
10553                 __put_user(value.totalhigh, &target_value->totalhigh);
10554                 __put_user(value.freehigh, &target_value->freehigh);
10555                 __put_user(value.mem_unit, &target_value->mem_unit);
10556                 unlock_user_struct(target_value, arg1, 1);
10557             }
10558         }
10559         return ret;
10560 #ifdef TARGET_NR_ipc
10561     case TARGET_NR_ipc:
10562         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10563 #endif
10564 #ifdef TARGET_NR_semget
10565     case TARGET_NR_semget:
10566         return get_errno(semget(arg1, arg2, arg3));
10567 #endif
10568 #ifdef TARGET_NR_semop
10569     case TARGET_NR_semop:
10570         return do_semtimedop(arg1, arg2, arg3, 0, false);
10571 #endif
10572 #ifdef TARGET_NR_semtimedop
10573     case TARGET_NR_semtimedop:
10574         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10575 #endif
10576 #ifdef TARGET_NR_semtimedop_time64
10577     case TARGET_NR_semtimedop_time64:
10578         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10579 #endif
10580 #ifdef TARGET_NR_semctl
10581     case TARGET_NR_semctl:
10582         return do_semctl(arg1, arg2, arg3, arg4);
10583 #endif
10584 #ifdef TARGET_NR_msgctl
10585     case TARGET_NR_msgctl:
10586         return do_msgctl(arg1, arg2, arg3);
10587 #endif
10588 #ifdef TARGET_NR_msgget
10589     case TARGET_NR_msgget:
10590         return get_errno(msgget(arg1, arg2));
10591 #endif
10592 #ifdef TARGET_NR_msgrcv
10593     case TARGET_NR_msgrcv:
10594         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10595 #endif
10596 #ifdef TARGET_NR_msgsnd
10597     case TARGET_NR_msgsnd:
10598         return do_msgsnd(arg1, arg2, arg3, arg4);
10599 #endif
10600 #ifdef TARGET_NR_shmget
10601     case TARGET_NR_shmget:
10602         return get_errno(shmget(arg1, arg2, arg3));
10603 #endif
10604 #ifdef TARGET_NR_shmctl
10605     case TARGET_NR_shmctl:
10606         return do_shmctl(arg1, arg2, arg3);
10607 #endif
10608 #ifdef TARGET_NR_shmat
10609     case TARGET_NR_shmat:
10610         return do_shmat(cpu_env, arg1, arg2, arg3);
10611 #endif
10612 #ifdef TARGET_NR_shmdt
10613     case TARGET_NR_shmdt:
10614         return do_shmdt(arg1);
10615 #endif
10616     case TARGET_NR_fsync:
10617         return get_errno(fsync(arg1));
10618     case TARGET_NR_clone:
10619         /* Linux manages to have three different orderings for its
10620          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10621          * match the kernel's CONFIG_CLONE_* settings.
10622          * Microblaze is further special in that it uses a sixth
10623          * implicit argument to clone for the TLS pointer.
10624          */
10625 #if defined(TARGET_MICROBLAZE)
10626         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10627 #elif defined(TARGET_CLONE_BACKWARDS)
10628         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10629 #elif defined(TARGET_CLONE_BACKWARDS2)
10630         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10631 #else
10632         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10633 #endif
10634         return ret;
10635 #ifdef __NR_exit_group
10636         /* new thread calls */
10637     case TARGET_NR_exit_group:
10638         preexit_cleanup(cpu_env, arg1);
10639         return get_errno(exit_group(arg1));
10640 #endif
10641     case TARGET_NR_setdomainname:
10642         if (!(p = lock_user_string(arg1)))
10643             return -TARGET_EFAULT;
10644         ret = get_errno(setdomainname(p, arg2));
10645         unlock_user(p, arg1, 0);
10646         return ret;
10647     case TARGET_NR_uname:
10648         /* no need to transcode because we use the linux syscall */
10649         {
10650             struct new_utsname * buf;
10651 
10652             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10653                 return -TARGET_EFAULT;
10654             ret = get_errno(sys_uname(buf));
10655             if (!is_error(ret)) {
10656                 /* Overwrite the native machine name with whatever is being
10657                    emulated. */
10658                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10659                           sizeof(buf->machine));
10660                 /* Allow the user to override the reported release.  */
10661                 if (qemu_uname_release && *qemu_uname_release) {
10662                     g_strlcpy(buf->release, qemu_uname_release,
10663                               sizeof(buf->release));
10664                 }
10665             }
10666             unlock_user_struct(buf, arg1, 1);
10667         }
10668         return ret;
10669 #ifdef TARGET_I386
10670     case TARGET_NR_modify_ldt:
10671         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10672 #if !defined(TARGET_X86_64)
10673     case TARGET_NR_vm86:
10674         return do_vm86(cpu_env, arg1, arg2);
10675 #endif
10676 #endif
10677 #if defined(TARGET_NR_adjtimex)
10678     case TARGET_NR_adjtimex:
10679         {
10680             struct timex host_buf;
10681 
10682             if (target_to_host_timex(&host_buf, arg1) != 0) {
10683                 return -TARGET_EFAULT;
10684             }
10685             ret = get_errno(adjtimex(&host_buf));
10686             if (!is_error(ret)) {
10687                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10688                     return -TARGET_EFAULT;
10689                 }
10690             }
10691         }
10692         return ret;
10693 #endif
10694 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10695     case TARGET_NR_clock_adjtime:
10696         {
10697             struct timex htx, *phtx = &htx;
10698 
10699             if (target_to_host_timex(phtx, arg2) != 0) {
10700                 return -TARGET_EFAULT;
10701             }
10702             ret = get_errno(clock_adjtime(arg1, phtx));
10703             if (!is_error(ret) && phtx) {
10704                 if (host_to_target_timex(arg2, phtx) != 0) {
10705                     return -TARGET_EFAULT;
10706                 }
10707             }
10708         }
10709         return ret;
10710 #endif
10711 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10712     case TARGET_NR_clock_adjtime64:
10713         {
10714             struct timex htx;
10715 
10716             if (target_to_host_timex64(&htx, arg2) != 0) {
10717                 return -TARGET_EFAULT;
10718             }
10719             ret = get_errno(clock_adjtime(arg1, &htx));
10720             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10721                     return -TARGET_EFAULT;
10722             }
10723         }
10724         return ret;
10725 #endif
10726     case TARGET_NR_getpgid:
10727         return get_errno(getpgid(arg1));
10728     case TARGET_NR_fchdir:
10729         return get_errno(fchdir(arg1));
10730     case TARGET_NR_personality:
10731         return get_errno(personality(arg1));
10732 #ifdef TARGET_NR__llseek /* Not on alpha */
10733     case TARGET_NR__llseek:
10734         {
10735             int64_t res;
10736 #if !defined(__NR_llseek)
10737             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10738             if (res == -1) {
10739                 ret = get_errno(res);
10740             } else {
10741                 ret = 0;
10742             }
10743 #else
10744             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10745 #endif
10746             if ((ret == 0) && put_user_s64(res, arg4)) {
10747                 return -TARGET_EFAULT;
10748             }
10749         }
10750         return ret;
10751 #endif
10752 #ifdef TARGET_NR_getdents
10753     case TARGET_NR_getdents:
10754         return do_getdents(arg1, arg2, arg3);
10755 #endif /* TARGET_NR_getdents */
10756 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10757     case TARGET_NR_getdents64:
10758         return do_getdents64(arg1, arg2, arg3);
10759 #endif /* TARGET_NR_getdents64 */
10760 #if defined(TARGET_NR__newselect)
10761     case TARGET_NR__newselect:
10762         return do_select(arg1, arg2, arg3, arg4, arg5);
10763 #endif
10764 #ifdef TARGET_NR_poll
10765     case TARGET_NR_poll:
10766         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10767 #endif
10768 #ifdef TARGET_NR_ppoll
10769     case TARGET_NR_ppoll:
10770         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10771 #endif
10772 #ifdef TARGET_NR_ppoll_time64
10773     case TARGET_NR_ppoll_time64:
10774         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10775 #endif
10776     case TARGET_NR_flock:
10777         /* NOTE: the flock constant seems to be the same for every
10778            Linux platform */
10779         return get_errno(safe_flock(arg1, arg2));
10780     case TARGET_NR_readv:
10781         {
10782             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10783             if (vec != NULL) {
10784                 ret = get_errno(safe_readv(arg1, vec, arg3));
10785                 unlock_iovec(vec, arg2, arg3, 1);
10786             } else {
10787                 ret = -host_to_target_errno(errno);
10788             }
10789         }
10790         return ret;
10791     case TARGET_NR_writev:
10792         {
10793             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10794             if (vec != NULL) {
10795                 ret = get_errno(safe_writev(arg1, vec, arg3));
10796                 unlock_iovec(vec, arg2, arg3, 0);
10797             } else {
10798                 ret = -host_to_target_errno(errno);
10799             }
10800         }
10801         return ret;
10802 #if defined(TARGET_NR_preadv)
10803     case TARGET_NR_preadv:
10804         {
10805             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10806             if (vec != NULL) {
10807                 unsigned long low, high;
10808 
10809                 target_to_host_low_high(arg4, arg5, &low, &high);
10810                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10811                 unlock_iovec(vec, arg2, arg3, 1);
10812             } else {
10813                 ret = -host_to_target_errno(errno);
10814            }
10815         }
10816         return ret;
10817 #endif
10818 #if defined(TARGET_NR_pwritev)
10819     case TARGET_NR_pwritev:
10820         {
10821             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10822             if (vec != NULL) {
10823                 unsigned long low, high;
10824 
10825                 target_to_host_low_high(arg4, arg5, &low, &high);
10826                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10827                 unlock_iovec(vec, arg2, arg3, 0);
10828             } else {
10829                 ret = -host_to_target_errno(errno);
10830            }
10831         }
10832         return ret;
10833 #endif
10834     case TARGET_NR_getsid:
10835         return get_errno(getsid(arg1));
10836 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10837     case TARGET_NR_fdatasync:
10838         return get_errno(fdatasync(arg1));
10839 #endif
10840     case TARGET_NR_sched_getaffinity:
10841         {
10842             unsigned int mask_size;
10843             unsigned long *mask;
10844 
10845             /*
10846              * sched_getaffinity needs multiples of ulong, so need to take
10847              * care of mismatches between target ulong and host ulong sizes.
10848              */
10849             if (arg2 & (sizeof(abi_ulong) - 1)) {
10850                 return -TARGET_EINVAL;
10851             }
10852             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10853 
10854             mask = alloca(mask_size);
10855             memset(mask, 0, mask_size);
10856             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10857 
10858             if (!is_error(ret)) {
10859                 if (ret > arg2) {
10860                     /* More data returned than the caller's buffer will fit.
10861                      * This only happens if sizeof(abi_long) < sizeof(long)
10862                      * and the caller passed us a buffer holding an odd number
10863                      * of abi_longs. If the host kernel is actually using the
10864                      * extra 4 bytes then fail EINVAL; otherwise we can just
10865                      * ignore them and only copy the interesting part.
10866                      */
10867                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10868                     if (numcpus > arg2 * 8) {
10869                         return -TARGET_EINVAL;
10870                     }
10871                     ret = arg2;
10872                 }
10873 
10874                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10875                     return -TARGET_EFAULT;
10876                 }
10877             }
10878         }
10879         return ret;
10880     case TARGET_NR_sched_setaffinity:
10881         {
10882             unsigned int mask_size;
10883             unsigned long *mask;
10884 
10885             /*
10886              * sched_setaffinity needs multiples of ulong, so need to take
10887              * care of mismatches between target ulong and host ulong sizes.
10888              */
10889             if (arg2 & (sizeof(abi_ulong) - 1)) {
10890                 return -TARGET_EINVAL;
10891             }
10892             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10893             mask = alloca(mask_size);
10894 
10895             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10896             if (ret) {
10897                 return ret;
10898             }
10899 
10900             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10901         }
10902     case TARGET_NR_getcpu:
10903         {
10904             unsigned cpu, node;
10905             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10906                                        arg2 ? &node : NULL,
10907                                        NULL));
10908             if (is_error(ret)) {
10909                 return ret;
10910             }
10911             if (arg1 && put_user_u32(cpu, arg1)) {
10912                 return -TARGET_EFAULT;
10913             }
10914             if (arg2 && put_user_u32(node, arg2)) {
10915                 return -TARGET_EFAULT;
10916             }
10917         }
10918         return ret;
10919     case TARGET_NR_sched_setparam:
10920         {
10921             struct target_sched_param *target_schp;
10922             struct sched_param schp;
10923 
10924             if (arg2 == 0) {
10925                 return -TARGET_EINVAL;
10926             }
10927             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10928                 return -TARGET_EFAULT;
10929             }
10930             schp.sched_priority = tswap32(target_schp->sched_priority);
10931             unlock_user_struct(target_schp, arg2, 0);
10932             return get_errno(sys_sched_setparam(arg1, &schp));
10933         }
10934     case TARGET_NR_sched_getparam:
10935         {
10936             struct target_sched_param *target_schp;
10937             struct sched_param schp;
10938 
10939             if (arg2 == 0) {
10940                 return -TARGET_EINVAL;
10941             }
10942             ret = get_errno(sys_sched_getparam(arg1, &schp));
10943             if (!is_error(ret)) {
10944                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10945                     return -TARGET_EFAULT;
10946                 }
10947                 target_schp->sched_priority = tswap32(schp.sched_priority);
10948                 unlock_user_struct(target_schp, arg2, 1);
10949             }
10950         }
10951         return ret;
10952     case TARGET_NR_sched_setscheduler:
10953         {
10954             struct target_sched_param *target_schp;
10955             struct sched_param schp;
10956             if (arg3 == 0) {
10957                 return -TARGET_EINVAL;
10958             }
10959             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10960                 return -TARGET_EFAULT;
10961             }
10962             schp.sched_priority = tswap32(target_schp->sched_priority);
10963             unlock_user_struct(target_schp, arg3, 0);
10964             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10965         }
10966     case TARGET_NR_sched_getscheduler:
10967         return get_errno(sys_sched_getscheduler(arg1));
10968     case TARGET_NR_sched_getattr:
10969         {
10970             struct target_sched_attr *target_scha;
10971             struct sched_attr scha;
10972             if (arg2 == 0) {
10973                 return -TARGET_EINVAL;
10974             }
10975             if (arg3 > sizeof(scha)) {
10976                 arg3 = sizeof(scha);
10977             }
10978             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10979             if (!is_error(ret)) {
10980                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10981                 if (!target_scha) {
10982                     return -TARGET_EFAULT;
10983                 }
10984                 target_scha->size = tswap32(scha.size);
10985                 target_scha->sched_policy = tswap32(scha.sched_policy);
10986                 target_scha->sched_flags = tswap64(scha.sched_flags);
10987                 target_scha->sched_nice = tswap32(scha.sched_nice);
10988                 target_scha->sched_priority = tswap32(scha.sched_priority);
10989                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10990                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10991                 target_scha->sched_period = tswap64(scha.sched_period);
10992                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10993                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10994                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10995                 }
10996                 unlock_user(target_scha, arg2, arg3);
10997             }
10998             return ret;
10999         }
11000     case TARGET_NR_sched_setattr:
11001         {
11002             struct target_sched_attr *target_scha;
11003             struct sched_attr scha;
11004             uint32_t size;
11005             int zeroed;
11006             if (arg2 == 0) {
11007                 return -TARGET_EINVAL;
11008             }
11009             if (get_user_u32(size, arg2)) {
11010                 return -TARGET_EFAULT;
11011             }
11012             if (!size) {
11013                 size = offsetof(struct target_sched_attr, sched_util_min);
11014             }
11015             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11016                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11017                     return -TARGET_EFAULT;
11018                 }
11019                 return -TARGET_E2BIG;
11020             }
11021 
11022             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11023             if (zeroed < 0) {
11024                 return zeroed;
11025             } else if (zeroed == 0) {
11026                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11027                     return -TARGET_EFAULT;
11028                 }
11029                 return -TARGET_E2BIG;
11030             }
11031             if (size > sizeof(struct target_sched_attr)) {
11032                 size = sizeof(struct target_sched_attr);
11033             }
11034 
11035             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11036             if (!target_scha) {
11037                 return -TARGET_EFAULT;
11038             }
11039             scha.size = size;
11040             scha.sched_policy = tswap32(target_scha->sched_policy);
11041             scha.sched_flags = tswap64(target_scha->sched_flags);
11042             scha.sched_nice = tswap32(target_scha->sched_nice);
11043             scha.sched_priority = tswap32(target_scha->sched_priority);
11044             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11045             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11046             scha.sched_period = tswap64(target_scha->sched_period);
11047             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11048                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11049                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11050             }
11051             unlock_user(target_scha, arg2, 0);
11052             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11053         }
11054     case TARGET_NR_sched_yield:
11055         return get_errno(sched_yield());
11056     case TARGET_NR_sched_get_priority_max:
11057         return get_errno(sched_get_priority_max(arg1));
11058     case TARGET_NR_sched_get_priority_min:
11059         return get_errno(sched_get_priority_min(arg1));
11060 #ifdef TARGET_NR_sched_rr_get_interval
11061     case TARGET_NR_sched_rr_get_interval:
11062         {
11063             struct timespec ts;
11064             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11065             if (!is_error(ret)) {
11066                 ret = host_to_target_timespec(arg2, &ts);
11067             }
11068         }
11069         return ret;
11070 #endif
11071 #ifdef TARGET_NR_sched_rr_get_interval_time64
11072     case TARGET_NR_sched_rr_get_interval_time64:
11073         {
11074             struct timespec ts;
11075             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11076             if (!is_error(ret)) {
11077                 ret = host_to_target_timespec64(arg2, &ts);
11078             }
11079         }
11080         return ret;
11081 #endif
11082 #if defined(TARGET_NR_nanosleep)
11083     case TARGET_NR_nanosleep:
11084         {
11085             struct timespec req, rem;
11086             target_to_host_timespec(&req, arg1);
11087             ret = get_errno(safe_nanosleep(&req, &rem));
11088             if (is_error(ret) && arg2) {
11089                 host_to_target_timespec(arg2, &rem);
11090             }
11091         }
11092         return ret;
11093 #endif
11094     case TARGET_NR_prctl:
11095         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11096         break;
11097 #ifdef TARGET_NR_arch_prctl
11098     case TARGET_NR_arch_prctl:
11099         return do_arch_prctl(cpu_env, arg1, arg2);
11100 #endif
11101 #ifdef TARGET_NR_pread64
11102     case TARGET_NR_pread64:
11103         if (regpairs_aligned(cpu_env, num)) {
11104             arg4 = arg5;
11105             arg5 = arg6;
11106         }
11107         if (arg2 == 0 && arg3 == 0) {
11108             /* Special-case NULL buffer and zero length, which should succeed */
11109             p = 0;
11110         } else {
11111             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11112             if (!p) {
11113                 return -TARGET_EFAULT;
11114             }
11115         }
11116         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11117         unlock_user(p, arg2, ret);
11118         return ret;
11119     case TARGET_NR_pwrite64:
11120         if (regpairs_aligned(cpu_env, num)) {
11121             arg4 = arg5;
11122             arg5 = arg6;
11123         }
11124         if (arg2 == 0 && arg3 == 0) {
11125             /* Special-case NULL buffer and zero length, which should succeed */
11126             p = 0;
11127         } else {
11128             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11129             if (!p) {
11130                 return -TARGET_EFAULT;
11131             }
11132         }
11133         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11134         unlock_user(p, arg2, 0);
11135         return ret;
11136 #endif
11137     case TARGET_NR_getcwd:
11138         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11139             return -TARGET_EFAULT;
11140         ret = get_errno(sys_getcwd1(p, arg2));
11141         unlock_user(p, arg1, ret);
11142         return ret;
11143     case TARGET_NR_capget:
11144     case TARGET_NR_capset:
11145     {
11146         struct target_user_cap_header *target_header;
11147         struct target_user_cap_data *target_data = NULL;
11148         struct __user_cap_header_struct header;
11149         struct __user_cap_data_struct data[2];
11150         struct __user_cap_data_struct *dataptr = NULL;
11151         int i, target_datalen;
11152         int data_items = 1;
11153 
11154         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11155             return -TARGET_EFAULT;
11156         }
11157         header.version = tswap32(target_header->version);
11158         header.pid = tswap32(target_header->pid);
11159 
11160         if (header.version != _LINUX_CAPABILITY_VERSION) {
11161             /* Version 2 and up takes pointer to two user_data structs */
11162             data_items = 2;
11163         }
11164 
11165         target_datalen = sizeof(*target_data) * data_items;
11166 
11167         if (arg2) {
11168             if (num == TARGET_NR_capget) {
11169                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11170             } else {
11171                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11172             }
11173             if (!target_data) {
11174                 unlock_user_struct(target_header, arg1, 0);
11175                 return -TARGET_EFAULT;
11176             }
11177 
11178             if (num == TARGET_NR_capset) {
11179                 for (i = 0; i < data_items; i++) {
11180                     data[i].effective = tswap32(target_data[i].effective);
11181                     data[i].permitted = tswap32(target_data[i].permitted);
11182                     data[i].inheritable = tswap32(target_data[i].inheritable);
11183                 }
11184             }
11185 
11186             dataptr = data;
11187         }
11188 
11189         if (num == TARGET_NR_capget) {
11190             ret = get_errno(capget(&header, dataptr));
11191         } else {
11192             ret = get_errno(capset(&header, dataptr));
11193         }
11194 
11195         /* The kernel always updates version for both capget and capset */
11196         target_header->version = tswap32(header.version);
11197         unlock_user_struct(target_header, arg1, 1);
11198 
11199         if (arg2) {
11200             if (num == TARGET_NR_capget) {
11201                 for (i = 0; i < data_items; i++) {
11202                     target_data[i].effective = tswap32(data[i].effective);
11203                     target_data[i].permitted = tswap32(data[i].permitted);
11204                     target_data[i].inheritable = tswap32(data[i].inheritable);
11205                 }
11206                 unlock_user(target_data, arg2, target_datalen);
11207             } else {
11208                 unlock_user(target_data, arg2, 0);
11209             }
11210         }
11211         return ret;
11212     }
11213     case TARGET_NR_sigaltstack:
11214         return do_sigaltstack(arg1, arg2, cpu_env);
11215 
11216 #ifdef CONFIG_SENDFILE
11217 #ifdef TARGET_NR_sendfile
11218     case TARGET_NR_sendfile:
11219     {
11220         off_t *offp = NULL;
11221         off_t off;
11222         if (arg3) {
11223             ret = get_user_sal(off, arg3);
11224             if (is_error(ret)) {
11225                 return ret;
11226             }
11227             offp = &off;
11228         }
11229         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11230         if (!is_error(ret) && arg3) {
11231             abi_long ret2 = put_user_sal(off, arg3);
11232             if (is_error(ret2)) {
11233                 ret = ret2;
11234             }
11235         }
11236         return ret;
11237     }
11238 #endif
11239 #ifdef TARGET_NR_sendfile64
11240     case TARGET_NR_sendfile64:
11241     {
11242         off_t *offp = NULL;
11243         off_t off;
11244         if (arg3) {
11245             ret = get_user_s64(off, arg3);
11246             if (is_error(ret)) {
11247                 return ret;
11248             }
11249             offp = &off;
11250         }
11251         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11252         if (!is_error(ret) && arg3) {
11253             abi_long ret2 = put_user_s64(off, arg3);
11254             if (is_error(ret2)) {
11255                 ret = ret2;
11256             }
11257         }
11258         return ret;
11259     }
11260 #endif
11261 #endif
11262 #ifdef TARGET_NR_vfork
11263     case TARGET_NR_vfork:
11264         return get_errno(do_fork(cpu_env,
11265                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11266                          0, 0, 0, 0));
11267 #endif
11268 #ifdef TARGET_NR_ugetrlimit
11269     case TARGET_NR_ugetrlimit:
11270     {
11271 	struct rlimit rlim;
11272 	int resource = target_to_host_resource(arg1);
11273 	ret = get_errno(getrlimit(resource, &rlim));
11274 	if (!is_error(ret)) {
11275 	    struct target_rlimit *target_rlim;
11276             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11277                 return -TARGET_EFAULT;
11278 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11279 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11280             unlock_user_struct(target_rlim, arg2, 1);
11281 	}
11282         return ret;
11283     }
11284 #endif
11285 #ifdef TARGET_NR_truncate64
11286     case TARGET_NR_truncate64:
11287         if (!(p = lock_user_string(arg1)))
11288             return -TARGET_EFAULT;
11289 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11290         unlock_user(p, arg1, 0);
11291         return ret;
11292 #endif
11293 #ifdef TARGET_NR_ftruncate64
11294     case TARGET_NR_ftruncate64:
11295         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11296 #endif
11297 #ifdef TARGET_NR_stat64
11298     case TARGET_NR_stat64:
11299         if (!(p = lock_user_string(arg1))) {
11300             return -TARGET_EFAULT;
11301         }
11302         ret = get_errno(stat(path(p), &st));
11303         unlock_user(p, arg1, 0);
11304         if (!is_error(ret))
11305             ret = host_to_target_stat64(cpu_env, arg2, &st);
11306         return ret;
11307 #endif
11308 #ifdef TARGET_NR_lstat64
11309     case TARGET_NR_lstat64:
11310         if (!(p = lock_user_string(arg1))) {
11311             return -TARGET_EFAULT;
11312         }
11313         ret = get_errno(lstat(path(p), &st));
11314         unlock_user(p, arg1, 0);
11315         if (!is_error(ret))
11316             ret = host_to_target_stat64(cpu_env, arg2, &st);
11317         return ret;
11318 #endif
11319 #ifdef TARGET_NR_fstat64
11320     case TARGET_NR_fstat64:
11321         ret = get_errno(fstat(arg1, &st));
11322         if (!is_error(ret))
11323             ret = host_to_target_stat64(cpu_env, arg2, &st);
11324         return ret;
11325 #endif
11326 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11327 #ifdef TARGET_NR_fstatat64
11328     case TARGET_NR_fstatat64:
11329 #endif
11330 #ifdef TARGET_NR_newfstatat
11331     case TARGET_NR_newfstatat:
11332 #endif
11333         if (!(p = lock_user_string(arg2))) {
11334             return -TARGET_EFAULT;
11335         }
11336         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11337         unlock_user(p, arg2, 0);
11338         if (!is_error(ret))
11339             ret = host_to_target_stat64(cpu_env, arg3, &st);
11340         return ret;
11341 #endif
11342 #if defined(TARGET_NR_statx)
11343     case TARGET_NR_statx:
11344         {
11345             struct target_statx *target_stx;
11346             int dirfd = arg1;
11347             int flags = arg3;
11348 
11349             p = lock_user_string(arg2);
11350             if (p == NULL) {
11351                 return -TARGET_EFAULT;
11352             }
11353 #if defined(__NR_statx)
11354             {
11355                 /*
11356                  * It is assumed that struct statx is architecture independent.
11357                  */
11358                 struct target_statx host_stx;
11359                 int mask = arg4;
11360 
11361                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11362                 if (!is_error(ret)) {
11363                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11364                         unlock_user(p, arg2, 0);
11365                         return -TARGET_EFAULT;
11366                     }
11367                 }
11368 
11369                 if (ret != -TARGET_ENOSYS) {
11370                     unlock_user(p, arg2, 0);
11371                     return ret;
11372                 }
11373             }
11374 #endif
11375             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11376             unlock_user(p, arg2, 0);
11377 
11378             if (!is_error(ret)) {
11379                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11380                     return -TARGET_EFAULT;
11381                 }
11382                 memset(target_stx, 0, sizeof(*target_stx));
11383                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11384                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11385                 __put_user(st.st_ino, &target_stx->stx_ino);
11386                 __put_user(st.st_mode, &target_stx->stx_mode);
11387                 __put_user(st.st_uid, &target_stx->stx_uid);
11388                 __put_user(st.st_gid, &target_stx->stx_gid);
11389                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11390                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11391                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11392                 __put_user(st.st_size, &target_stx->stx_size);
11393                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11394                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11395                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11396                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11397                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11398                 unlock_user_struct(target_stx, arg5, 1);
11399             }
11400         }
11401         return ret;
11402 #endif
11403 #ifdef TARGET_NR_lchown
11404     case TARGET_NR_lchown:
11405         if (!(p = lock_user_string(arg1)))
11406             return -TARGET_EFAULT;
11407         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11408         unlock_user(p, arg1, 0);
11409         return ret;
11410 #endif
11411 #ifdef TARGET_NR_getuid
11412     case TARGET_NR_getuid:
11413         return get_errno(high2lowuid(getuid()));
11414 #endif
11415 #ifdef TARGET_NR_getgid
11416     case TARGET_NR_getgid:
11417         return get_errno(high2lowgid(getgid()));
11418 #endif
11419 #ifdef TARGET_NR_geteuid
11420     case TARGET_NR_geteuid:
11421         return get_errno(high2lowuid(geteuid()));
11422 #endif
11423 #ifdef TARGET_NR_getegid
11424     case TARGET_NR_getegid:
11425         return get_errno(high2lowgid(getegid()));
11426 #endif
11427     case TARGET_NR_setreuid:
11428         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11429     case TARGET_NR_setregid:
11430         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11431     case TARGET_NR_getgroups:
11432         {
11433             int gidsetsize = arg1;
11434             target_id *target_grouplist;
11435             gid_t *grouplist;
11436             int i;
11437 
11438             grouplist = alloca(gidsetsize * sizeof(gid_t));
11439             ret = get_errno(getgroups(gidsetsize, grouplist));
11440             if (gidsetsize == 0)
11441                 return ret;
11442             if (!is_error(ret)) {
11443                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11444                 if (!target_grouplist)
11445                     return -TARGET_EFAULT;
11446                 for(i = 0;i < ret; i++)
11447                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11448                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11449             }
11450         }
11451         return ret;
11452     case TARGET_NR_setgroups:
11453         {
11454             int gidsetsize = arg1;
11455             target_id *target_grouplist;
11456             gid_t *grouplist = NULL;
11457             int i;
11458             if (gidsetsize) {
11459                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11460                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11461                 if (!target_grouplist) {
11462                     return -TARGET_EFAULT;
11463                 }
11464                 for (i = 0; i < gidsetsize; i++) {
11465                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11466                 }
11467                 unlock_user(target_grouplist, arg2, 0);
11468             }
11469             return get_errno(setgroups(gidsetsize, grouplist));
11470         }
11471     case TARGET_NR_fchown:
11472         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11473 #if defined(TARGET_NR_fchownat)
11474     case TARGET_NR_fchownat:
11475         if (!(p = lock_user_string(arg2)))
11476             return -TARGET_EFAULT;
11477         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11478                                  low2highgid(arg4), arg5));
11479         unlock_user(p, arg2, 0);
11480         return ret;
11481 #endif
11482 #ifdef TARGET_NR_setresuid
11483     case TARGET_NR_setresuid:
11484         return get_errno(sys_setresuid(low2highuid(arg1),
11485                                        low2highuid(arg2),
11486                                        low2highuid(arg3)));
11487 #endif
11488 #ifdef TARGET_NR_getresuid
11489     case TARGET_NR_getresuid:
11490         {
11491             uid_t ruid, euid, suid;
11492             ret = get_errno(getresuid(&ruid, &euid, &suid));
11493             if (!is_error(ret)) {
11494                 if (put_user_id(high2lowuid(ruid), arg1)
11495                     || put_user_id(high2lowuid(euid), arg2)
11496                     || put_user_id(high2lowuid(suid), arg3))
11497                     return -TARGET_EFAULT;
11498             }
11499         }
11500         return ret;
11501 #endif
11502 #ifdef TARGET_NR_getresgid
11503     case TARGET_NR_setresgid:
11504         return get_errno(sys_setresgid(low2highgid(arg1),
11505                                        low2highgid(arg2),
11506                                        low2highgid(arg3)));
11507 #endif
11508 #ifdef TARGET_NR_getresgid
11509     case TARGET_NR_getresgid:
11510         {
11511             gid_t rgid, egid, sgid;
11512             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11513             if (!is_error(ret)) {
11514                 if (put_user_id(high2lowgid(rgid), arg1)
11515                     || put_user_id(high2lowgid(egid), arg2)
11516                     || put_user_id(high2lowgid(sgid), arg3))
11517                     return -TARGET_EFAULT;
11518             }
11519         }
11520         return ret;
11521 #endif
11522 #ifdef TARGET_NR_chown
11523     case TARGET_NR_chown:
11524         if (!(p = lock_user_string(arg1)))
11525             return -TARGET_EFAULT;
11526         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11527         unlock_user(p, arg1, 0);
11528         return ret;
11529 #endif
11530     case TARGET_NR_setuid:
11531         return get_errno(sys_setuid(low2highuid(arg1)));
11532     case TARGET_NR_setgid:
11533         return get_errno(sys_setgid(low2highgid(arg1)));
11534     case TARGET_NR_setfsuid:
11535         return get_errno(setfsuid(arg1));
11536     case TARGET_NR_setfsgid:
11537         return get_errno(setfsgid(arg1));
11538 
11539 #ifdef TARGET_NR_lchown32
11540     case TARGET_NR_lchown32:
11541         if (!(p = lock_user_string(arg1)))
11542             return -TARGET_EFAULT;
11543         ret = get_errno(lchown(p, arg2, arg3));
11544         unlock_user(p, arg1, 0);
11545         return ret;
11546 #endif
11547 #ifdef TARGET_NR_getuid32
11548     case TARGET_NR_getuid32:
11549         return get_errno(getuid());
11550 #endif
11551 
11552 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11553    /* Alpha specific */
11554     case TARGET_NR_getxuid:
11555          {
11556             uid_t euid;
11557             euid=geteuid();
11558             cpu_env->ir[IR_A4]=euid;
11559          }
11560         return get_errno(getuid());
11561 #endif
11562 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11563    /* Alpha specific */
11564     case TARGET_NR_getxgid:
11565          {
11566             uid_t egid;
11567             egid=getegid();
11568             cpu_env->ir[IR_A4]=egid;
11569          }
11570         return get_errno(getgid());
11571 #endif
11572 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11573     /* Alpha specific */
11574     case TARGET_NR_osf_getsysinfo:
11575         ret = -TARGET_EOPNOTSUPP;
11576         switch (arg1) {
11577           case TARGET_GSI_IEEE_FP_CONTROL:
11578             {
11579                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11580                 uint64_t swcr = cpu_env->swcr;
11581 
11582                 swcr &= ~SWCR_STATUS_MASK;
11583                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11584 
11585                 if (put_user_u64 (swcr, arg2))
11586                         return -TARGET_EFAULT;
11587                 ret = 0;
11588             }
11589             break;
11590 
11591           /* case GSI_IEEE_STATE_AT_SIGNAL:
11592              -- Not implemented in linux kernel.
11593              case GSI_UACPROC:
11594              -- Retrieves current unaligned access state; not much used.
11595              case GSI_PROC_TYPE:
11596              -- Retrieves implver information; surely not used.
11597              case GSI_GET_HWRPB:
11598              -- Grabs a copy of the HWRPB; surely not used.
11599           */
11600         }
11601         return ret;
11602 #endif
11603 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11604     /* Alpha specific */
11605     case TARGET_NR_osf_setsysinfo:
11606         ret = -TARGET_EOPNOTSUPP;
11607         switch (arg1) {
11608           case TARGET_SSI_IEEE_FP_CONTROL:
11609             {
11610                 uint64_t swcr, fpcr;
11611 
11612                 if (get_user_u64 (swcr, arg2)) {
11613                     return -TARGET_EFAULT;
11614                 }
11615 
11616                 /*
11617                  * The kernel calls swcr_update_status to update the
11618                  * status bits from the fpcr at every point that it
11619                  * could be queried.  Therefore, we store the status
11620                  * bits only in FPCR.
11621                  */
11622                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11623 
11624                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11625                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11626                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11627                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11628                 ret = 0;
11629             }
11630             break;
11631 
11632           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11633             {
11634                 uint64_t exc, fpcr, fex;
11635 
11636                 if (get_user_u64(exc, arg2)) {
11637                     return -TARGET_EFAULT;
11638                 }
11639                 exc &= SWCR_STATUS_MASK;
11640                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11641 
11642                 /* Old exceptions are not signaled.  */
11643                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11644                 fex = exc & ~fex;
11645                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11646                 fex &= (cpu_env)->swcr;
11647 
11648                 /* Update the hardware fpcr.  */
11649                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11650                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11651 
11652                 if (fex) {
11653                     int si_code = TARGET_FPE_FLTUNK;
11654                     target_siginfo_t info;
11655 
11656                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11657                         si_code = TARGET_FPE_FLTUND;
11658                     }
11659                     if (fex & SWCR_TRAP_ENABLE_INE) {
11660                         si_code = TARGET_FPE_FLTRES;
11661                     }
11662                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11663                         si_code = TARGET_FPE_FLTUND;
11664                     }
11665                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11666                         si_code = TARGET_FPE_FLTOVF;
11667                     }
11668                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11669                         si_code = TARGET_FPE_FLTDIV;
11670                     }
11671                     if (fex & SWCR_TRAP_ENABLE_INV) {
11672                         si_code = TARGET_FPE_FLTINV;
11673                     }
11674 
11675                     info.si_signo = SIGFPE;
11676                     info.si_errno = 0;
11677                     info.si_code = si_code;
11678                     info._sifields._sigfault._addr = (cpu_env)->pc;
11679                     queue_signal(cpu_env, info.si_signo,
11680                                  QEMU_SI_FAULT, &info);
11681                 }
11682                 ret = 0;
11683             }
11684             break;
11685 
11686           /* case SSI_NVPAIRS:
11687              -- Used with SSIN_UACPROC to enable unaligned accesses.
11688              case SSI_IEEE_STATE_AT_SIGNAL:
11689              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11690              -- Not implemented in linux kernel
11691           */
11692         }
11693         return ret;
11694 #endif
11695 #ifdef TARGET_NR_osf_sigprocmask
11696     /* Alpha specific.  */
11697     case TARGET_NR_osf_sigprocmask:
11698         {
11699             abi_ulong mask;
11700             int how;
11701             sigset_t set, oldset;
11702 
11703             switch(arg1) {
11704             case TARGET_SIG_BLOCK:
11705                 how = SIG_BLOCK;
11706                 break;
11707             case TARGET_SIG_UNBLOCK:
11708                 how = SIG_UNBLOCK;
11709                 break;
11710             case TARGET_SIG_SETMASK:
11711                 how = SIG_SETMASK;
11712                 break;
11713             default:
11714                 return -TARGET_EINVAL;
11715             }
11716             mask = arg2;
11717             target_to_host_old_sigset(&set, &mask);
11718             ret = do_sigprocmask(how, &set, &oldset);
11719             if (!ret) {
11720                 host_to_target_old_sigset(&mask, &oldset);
11721                 ret = mask;
11722             }
11723         }
11724         return ret;
11725 #endif
11726 
11727 #ifdef TARGET_NR_getgid32
11728     case TARGET_NR_getgid32:
11729         return get_errno(getgid());
11730 #endif
11731 #ifdef TARGET_NR_geteuid32
11732     case TARGET_NR_geteuid32:
11733         return get_errno(geteuid());
11734 #endif
11735 #ifdef TARGET_NR_getegid32
11736     case TARGET_NR_getegid32:
11737         return get_errno(getegid());
11738 #endif
11739 #ifdef TARGET_NR_setreuid32
11740     case TARGET_NR_setreuid32:
11741         return get_errno(setreuid(arg1, arg2));
11742 #endif
11743 #ifdef TARGET_NR_setregid32
11744     case TARGET_NR_setregid32:
11745         return get_errno(setregid(arg1, arg2));
11746 #endif
11747 #ifdef TARGET_NR_getgroups32
11748     case TARGET_NR_getgroups32:
11749         {
11750             int gidsetsize = arg1;
11751             uint32_t *target_grouplist;
11752             gid_t *grouplist;
11753             int i;
11754 
11755             grouplist = alloca(gidsetsize * sizeof(gid_t));
11756             ret = get_errno(getgroups(gidsetsize, grouplist));
11757             if (gidsetsize == 0)
11758                 return ret;
11759             if (!is_error(ret)) {
11760                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11761                 if (!target_grouplist) {
11762                     return -TARGET_EFAULT;
11763                 }
11764                 for(i = 0;i < ret; i++)
11765                     target_grouplist[i] = tswap32(grouplist[i]);
11766                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11767             }
11768         }
11769         return ret;
11770 #endif
11771 #ifdef TARGET_NR_setgroups32
11772     case TARGET_NR_setgroups32:
11773         {
11774             int gidsetsize = arg1;
11775             uint32_t *target_grouplist;
11776             gid_t *grouplist;
11777             int i;
11778 
11779             grouplist = alloca(gidsetsize * sizeof(gid_t));
11780             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11781             if (!target_grouplist) {
11782                 return -TARGET_EFAULT;
11783             }
11784             for(i = 0;i < gidsetsize; i++)
11785                 grouplist[i] = tswap32(target_grouplist[i]);
11786             unlock_user(target_grouplist, arg2, 0);
11787             return get_errno(setgroups(gidsetsize, grouplist));
11788         }
11789 #endif
11790 #ifdef TARGET_NR_fchown32
11791     case TARGET_NR_fchown32:
11792         return get_errno(fchown(arg1, arg2, arg3));
11793 #endif
11794 #ifdef TARGET_NR_setresuid32
11795     case TARGET_NR_setresuid32:
11796         return get_errno(sys_setresuid(arg1, arg2, arg3));
11797 #endif
11798 #ifdef TARGET_NR_getresuid32
11799     case TARGET_NR_getresuid32:
11800         {
11801             uid_t ruid, euid, suid;
11802             ret = get_errno(getresuid(&ruid, &euid, &suid));
11803             if (!is_error(ret)) {
11804                 if (put_user_u32(ruid, arg1)
11805                     || put_user_u32(euid, arg2)
11806                     || put_user_u32(suid, arg3))
11807                     return -TARGET_EFAULT;
11808             }
11809         }
11810         return ret;
11811 #endif
11812 #ifdef TARGET_NR_setresgid32
11813     case TARGET_NR_setresgid32:
11814         return get_errno(sys_setresgid(arg1, arg2, arg3));
11815 #endif
11816 #ifdef TARGET_NR_getresgid32
11817     case TARGET_NR_getresgid32:
11818         {
11819             gid_t rgid, egid, sgid;
11820             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11821             if (!is_error(ret)) {
11822                 if (put_user_u32(rgid, arg1)
11823                     || put_user_u32(egid, arg2)
11824                     || put_user_u32(sgid, arg3))
11825                     return -TARGET_EFAULT;
11826             }
11827         }
11828         return ret;
11829 #endif
11830 #ifdef TARGET_NR_chown32
11831     case TARGET_NR_chown32:
11832         if (!(p = lock_user_string(arg1)))
11833             return -TARGET_EFAULT;
11834         ret = get_errno(chown(p, arg2, arg3));
11835         unlock_user(p, arg1, 0);
11836         return ret;
11837 #endif
11838 #ifdef TARGET_NR_setuid32
11839     case TARGET_NR_setuid32:
11840         return get_errno(sys_setuid(arg1));
11841 #endif
11842 #ifdef TARGET_NR_setgid32
11843     case TARGET_NR_setgid32:
11844         return get_errno(sys_setgid(arg1));
11845 #endif
11846 #ifdef TARGET_NR_setfsuid32
11847     case TARGET_NR_setfsuid32:
11848         return get_errno(setfsuid(arg1));
11849 #endif
11850 #ifdef TARGET_NR_setfsgid32
11851     case TARGET_NR_setfsgid32:
11852         return get_errno(setfsgid(arg1));
11853 #endif
11854 #ifdef TARGET_NR_mincore
11855     case TARGET_NR_mincore:
11856         {
11857             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11858             if (!a) {
11859                 return -TARGET_ENOMEM;
11860             }
11861             p = lock_user_string(arg3);
11862             if (!p) {
11863                 ret = -TARGET_EFAULT;
11864             } else {
11865                 ret = get_errno(mincore(a, arg2, p));
11866                 unlock_user(p, arg3, ret);
11867             }
11868             unlock_user(a, arg1, 0);
11869         }
11870         return ret;
11871 #endif
11872 #ifdef TARGET_NR_arm_fadvise64_64
11873     case TARGET_NR_arm_fadvise64_64:
11874         /* arm_fadvise64_64 looks like fadvise64_64 but
11875          * with different argument order: fd, advice, offset, len
11876          * rather than the usual fd, offset, len, advice.
11877          * Note that offset and len are both 64-bit so appear as
11878          * pairs of 32-bit registers.
11879          */
11880         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11881                             target_offset64(arg5, arg6), arg2);
11882         return -host_to_target_errno(ret);
11883 #endif
11884 
11885 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11886 
11887 #ifdef TARGET_NR_fadvise64_64
11888     case TARGET_NR_fadvise64_64:
11889 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11890         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11891         ret = arg2;
11892         arg2 = arg3;
11893         arg3 = arg4;
11894         arg4 = arg5;
11895         arg5 = arg6;
11896         arg6 = ret;
11897 #else
11898         /* 6 args: fd, offset (high, low), len (high, low), advice */
11899         if (regpairs_aligned(cpu_env, num)) {
11900             /* offset is in (3,4), len in (5,6) and advice in 7 */
11901             arg2 = arg3;
11902             arg3 = arg4;
11903             arg4 = arg5;
11904             arg5 = arg6;
11905             arg6 = arg7;
11906         }
11907 #endif
11908         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11909                             target_offset64(arg4, arg5), arg6);
11910         return -host_to_target_errno(ret);
11911 #endif
11912 
11913 #ifdef TARGET_NR_fadvise64
11914     case TARGET_NR_fadvise64:
11915         /* 5 args: fd, offset (high, low), len, advice */
11916         if (regpairs_aligned(cpu_env, num)) {
11917             /* offset is in (3,4), len in 5 and advice in 6 */
11918             arg2 = arg3;
11919             arg3 = arg4;
11920             arg4 = arg5;
11921             arg5 = arg6;
11922         }
11923         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11924         return -host_to_target_errno(ret);
11925 #endif
11926 
11927 #else /* not a 32-bit ABI */
11928 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11929 #ifdef TARGET_NR_fadvise64_64
11930     case TARGET_NR_fadvise64_64:
11931 #endif
11932 #ifdef TARGET_NR_fadvise64
11933     case TARGET_NR_fadvise64:
11934 #endif
11935 #ifdef TARGET_S390X
11936         switch (arg4) {
11937         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11938         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11939         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11940         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11941         default: break;
11942         }
11943 #endif
11944         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11945 #endif
11946 #endif /* end of 64-bit ABI fadvise handling */
11947 
11948 #ifdef TARGET_NR_madvise
11949     case TARGET_NR_madvise:
11950         return target_madvise(arg1, arg2, arg3);
11951 #endif
11952 #ifdef TARGET_NR_fcntl64
11953     case TARGET_NR_fcntl64:
11954     {
11955         int cmd;
11956         struct flock64 fl;
11957         from_flock64_fn *copyfrom = copy_from_user_flock64;
11958         to_flock64_fn *copyto = copy_to_user_flock64;
11959 
11960 #ifdef TARGET_ARM
11961         if (!cpu_env->eabi) {
11962             copyfrom = copy_from_user_oabi_flock64;
11963             copyto = copy_to_user_oabi_flock64;
11964         }
11965 #endif
11966 
11967         cmd = target_to_host_fcntl_cmd(arg2);
11968         if (cmd == -TARGET_EINVAL) {
11969             return cmd;
11970         }
11971 
11972         switch(arg2) {
11973         case TARGET_F_GETLK64:
11974             ret = copyfrom(&fl, arg3);
11975             if (ret) {
11976                 break;
11977             }
11978             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11979             if (ret == 0) {
11980                 ret = copyto(arg3, &fl);
11981             }
11982 	    break;
11983 
11984         case TARGET_F_SETLK64:
11985         case TARGET_F_SETLKW64:
11986             ret = copyfrom(&fl, arg3);
11987             if (ret) {
11988                 break;
11989             }
11990             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11991 	    break;
11992         default:
11993             ret = do_fcntl(arg1, arg2, arg3);
11994             break;
11995         }
11996         return ret;
11997     }
11998 #endif
11999 #ifdef TARGET_NR_cacheflush
12000     case TARGET_NR_cacheflush:
12001         /* self-modifying code is handled automatically, so nothing needed */
12002         return 0;
12003 #endif
12004 #ifdef TARGET_NR_getpagesize
12005     case TARGET_NR_getpagesize:
12006         return TARGET_PAGE_SIZE;
12007 #endif
12008     case TARGET_NR_gettid:
12009         return get_errno(sys_gettid());
12010 #ifdef TARGET_NR_readahead
12011     case TARGET_NR_readahead:
12012 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12013         if (regpairs_aligned(cpu_env, num)) {
12014             arg2 = arg3;
12015             arg3 = arg4;
12016             arg4 = arg5;
12017         }
12018         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12019 #else
12020         ret = get_errno(readahead(arg1, arg2, arg3));
12021 #endif
12022         return ret;
12023 #endif
12024 #ifdef CONFIG_ATTR
12025 #ifdef TARGET_NR_setxattr
12026     case TARGET_NR_listxattr:
12027     case TARGET_NR_llistxattr:
12028     {
12029         void *p, *b = 0;
12030         if (arg2) {
12031             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12032             if (!b) {
12033                 return -TARGET_EFAULT;
12034             }
12035         }
12036         p = lock_user_string(arg1);
12037         if (p) {
12038             if (num == TARGET_NR_listxattr) {
12039                 ret = get_errno(listxattr(p, b, arg3));
12040             } else {
12041                 ret = get_errno(llistxattr(p, b, arg3));
12042             }
12043         } else {
12044             ret = -TARGET_EFAULT;
12045         }
12046         unlock_user(p, arg1, 0);
12047         unlock_user(b, arg2, arg3);
12048         return ret;
12049     }
12050     case TARGET_NR_flistxattr:
12051     {
12052         void *b = 0;
12053         if (arg2) {
12054             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12055             if (!b) {
12056                 return -TARGET_EFAULT;
12057             }
12058         }
12059         ret = get_errno(flistxattr(arg1, b, arg3));
12060         unlock_user(b, arg2, arg3);
12061         return ret;
12062     }
12063     case TARGET_NR_setxattr:
12064     case TARGET_NR_lsetxattr:
12065         {
12066             void *p, *n, *v = 0;
12067             if (arg3) {
12068                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12069                 if (!v) {
12070                     return -TARGET_EFAULT;
12071                 }
12072             }
12073             p = lock_user_string(arg1);
12074             n = lock_user_string(arg2);
12075             if (p && n) {
12076                 if (num == TARGET_NR_setxattr) {
12077                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12078                 } else {
12079                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12080                 }
12081             } else {
12082                 ret = -TARGET_EFAULT;
12083             }
12084             unlock_user(p, arg1, 0);
12085             unlock_user(n, arg2, 0);
12086             unlock_user(v, arg3, 0);
12087         }
12088         return ret;
12089     case TARGET_NR_fsetxattr:
12090         {
12091             void *n, *v = 0;
12092             if (arg3) {
12093                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12094                 if (!v) {
12095                     return -TARGET_EFAULT;
12096                 }
12097             }
12098             n = lock_user_string(arg2);
12099             if (n) {
12100                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12101             } else {
12102                 ret = -TARGET_EFAULT;
12103             }
12104             unlock_user(n, arg2, 0);
12105             unlock_user(v, arg3, 0);
12106         }
12107         return ret;
12108     case TARGET_NR_getxattr:
12109     case TARGET_NR_lgetxattr:
12110         {
12111             void *p, *n, *v = 0;
12112             if (arg3) {
12113                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12114                 if (!v) {
12115                     return -TARGET_EFAULT;
12116                 }
12117             }
12118             p = lock_user_string(arg1);
12119             n = lock_user_string(arg2);
12120             if (p && n) {
12121                 if (num == TARGET_NR_getxattr) {
12122                     ret = get_errno(getxattr(p, n, v, arg4));
12123                 } else {
12124                     ret = get_errno(lgetxattr(p, n, v, arg4));
12125                 }
12126             } else {
12127                 ret = -TARGET_EFAULT;
12128             }
12129             unlock_user(p, arg1, 0);
12130             unlock_user(n, arg2, 0);
12131             unlock_user(v, arg3, arg4);
12132         }
12133         return ret;
12134     case TARGET_NR_fgetxattr:
12135         {
12136             void *n, *v = 0;
12137             if (arg3) {
12138                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12139                 if (!v) {
12140                     return -TARGET_EFAULT;
12141                 }
12142             }
12143             n = lock_user_string(arg2);
12144             if (n) {
12145                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12146             } else {
12147                 ret = -TARGET_EFAULT;
12148             }
12149             unlock_user(n, arg2, 0);
12150             unlock_user(v, arg3, arg4);
12151         }
12152         return ret;
12153     case TARGET_NR_removexattr:
12154     case TARGET_NR_lremovexattr:
12155         {
12156             void *p, *n;
12157             p = lock_user_string(arg1);
12158             n = lock_user_string(arg2);
12159             if (p && n) {
12160                 if (num == TARGET_NR_removexattr) {
12161                     ret = get_errno(removexattr(p, n));
12162                 } else {
12163                     ret = get_errno(lremovexattr(p, n));
12164                 }
12165             } else {
12166                 ret = -TARGET_EFAULT;
12167             }
12168             unlock_user(p, arg1, 0);
12169             unlock_user(n, arg2, 0);
12170         }
12171         return ret;
12172     case TARGET_NR_fremovexattr:
12173         {
12174             void *n;
12175             n = lock_user_string(arg2);
12176             if (n) {
12177                 ret = get_errno(fremovexattr(arg1, n));
12178             } else {
12179                 ret = -TARGET_EFAULT;
12180             }
12181             unlock_user(n, arg2, 0);
12182         }
12183         return ret;
12184 #endif
12185 #endif /* CONFIG_ATTR */
12186 #ifdef TARGET_NR_set_thread_area
12187     case TARGET_NR_set_thread_area:
12188 #if defined(TARGET_MIPS)
12189       cpu_env->active_tc.CP0_UserLocal = arg1;
12190       return 0;
12191 #elif defined(TARGET_CRIS)
12192       if (arg1 & 0xff)
12193           ret = -TARGET_EINVAL;
12194       else {
12195           cpu_env->pregs[PR_PID] = arg1;
12196           ret = 0;
12197       }
12198       return ret;
12199 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12200       return do_set_thread_area(cpu_env, arg1);
12201 #elif defined(TARGET_M68K)
12202       {
12203           TaskState *ts = cpu->opaque;
12204           ts->tp_value = arg1;
12205           return 0;
12206       }
12207 #else
12208       return -TARGET_ENOSYS;
12209 #endif
12210 #endif
12211 #ifdef TARGET_NR_get_thread_area
12212     case TARGET_NR_get_thread_area:
12213 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12214         return do_get_thread_area(cpu_env, arg1);
12215 #elif defined(TARGET_M68K)
12216         {
12217             TaskState *ts = cpu->opaque;
12218             return ts->tp_value;
12219         }
12220 #else
12221         return -TARGET_ENOSYS;
12222 #endif
12223 #endif
12224 #ifdef TARGET_NR_getdomainname
12225     case TARGET_NR_getdomainname:
12226         return -TARGET_ENOSYS;
12227 #endif
12228 
12229 #ifdef TARGET_NR_clock_settime
12230     case TARGET_NR_clock_settime:
12231     {
12232         struct timespec ts;
12233 
12234         ret = target_to_host_timespec(&ts, arg2);
12235         if (!is_error(ret)) {
12236             ret = get_errno(clock_settime(arg1, &ts));
12237         }
12238         return ret;
12239     }
12240 #endif
12241 #ifdef TARGET_NR_clock_settime64
12242     case TARGET_NR_clock_settime64:
12243     {
12244         struct timespec ts;
12245 
12246         ret = target_to_host_timespec64(&ts, arg2);
12247         if (!is_error(ret)) {
12248             ret = get_errno(clock_settime(arg1, &ts));
12249         }
12250         return ret;
12251     }
12252 #endif
12253 #ifdef TARGET_NR_clock_gettime
12254     case TARGET_NR_clock_gettime:
12255     {
12256         struct timespec ts;
12257         ret = get_errno(clock_gettime(arg1, &ts));
12258         if (!is_error(ret)) {
12259             ret = host_to_target_timespec(arg2, &ts);
12260         }
12261         return ret;
12262     }
12263 #endif
12264 #ifdef TARGET_NR_clock_gettime64
12265     case TARGET_NR_clock_gettime64:
12266     {
12267         struct timespec ts;
12268         ret = get_errno(clock_gettime(arg1, &ts));
12269         if (!is_error(ret)) {
12270             ret = host_to_target_timespec64(arg2, &ts);
12271         }
12272         return ret;
12273     }
12274 #endif
12275 #ifdef TARGET_NR_clock_getres
12276     case TARGET_NR_clock_getres:
12277     {
12278         struct timespec ts;
12279         ret = get_errno(clock_getres(arg1, &ts));
12280         if (!is_error(ret)) {
12281             host_to_target_timespec(arg2, &ts);
12282         }
12283         return ret;
12284     }
12285 #endif
12286 #ifdef TARGET_NR_clock_getres_time64
12287     case TARGET_NR_clock_getres_time64:
12288     {
12289         struct timespec ts;
12290         ret = get_errno(clock_getres(arg1, &ts));
12291         if (!is_error(ret)) {
12292             host_to_target_timespec64(arg2, &ts);
12293         }
12294         return ret;
12295     }
12296 #endif
12297 #ifdef TARGET_NR_clock_nanosleep
12298     case TARGET_NR_clock_nanosleep:
12299     {
12300         struct timespec ts;
12301         if (target_to_host_timespec(&ts, arg3)) {
12302             return -TARGET_EFAULT;
12303         }
12304         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12305                                              &ts, arg4 ? &ts : NULL));
12306         /*
12307          * if the call is interrupted by a signal handler, it fails
12308          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12309          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12310          */
12311         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12312             host_to_target_timespec(arg4, &ts)) {
12313               return -TARGET_EFAULT;
12314         }
12315 
12316         return ret;
12317     }
12318 #endif
12319 #ifdef TARGET_NR_clock_nanosleep_time64
12320     case TARGET_NR_clock_nanosleep_time64:
12321     {
12322         struct timespec ts;
12323 
12324         if (target_to_host_timespec64(&ts, arg3)) {
12325             return -TARGET_EFAULT;
12326         }
12327 
12328         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12329                                              &ts, arg4 ? &ts : NULL));
12330 
12331         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12332             host_to_target_timespec64(arg4, &ts)) {
12333             return -TARGET_EFAULT;
12334         }
12335         return ret;
12336     }
12337 #endif
12338 
12339 #if defined(TARGET_NR_set_tid_address)
12340     case TARGET_NR_set_tid_address:
12341     {
12342         TaskState *ts = cpu->opaque;
12343         ts->child_tidptr = arg1;
12344         /* do not call host set_tid_address() syscall, instead return tid() */
12345         return get_errno(sys_gettid());
12346     }
12347 #endif
12348 
12349     case TARGET_NR_tkill:
12350         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12351 
12352     case TARGET_NR_tgkill:
12353         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12354                          target_to_host_signal(arg3)));
12355 
12356 #ifdef TARGET_NR_set_robust_list
12357     case TARGET_NR_set_robust_list:
12358     case TARGET_NR_get_robust_list:
12359         /* The ABI for supporting robust futexes has userspace pass
12360          * the kernel a pointer to a linked list which is updated by
12361          * userspace after the syscall; the list is walked by the kernel
12362          * when the thread exits. Since the linked list in QEMU guest
12363          * memory isn't a valid linked list for the host and we have
12364          * no way to reliably intercept the thread-death event, we can't
12365          * support these. Silently return ENOSYS so that guest userspace
12366          * falls back to a non-robust futex implementation (which should
12367          * be OK except in the corner case of the guest crashing while
12368          * holding a mutex that is shared with another process via
12369          * shared memory).
12370          */
12371         return -TARGET_ENOSYS;
12372 #endif
12373 
12374 #if defined(TARGET_NR_utimensat)
12375     case TARGET_NR_utimensat:
12376         {
12377             struct timespec *tsp, ts[2];
12378             if (!arg3) {
12379                 tsp = NULL;
12380             } else {
12381                 if (target_to_host_timespec(ts, arg3)) {
12382                     return -TARGET_EFAULT;
12383                 }
12384                 if (target_to_host_timespec(ts + 1, arg3 +
12385                                             sizeof(struct target_timespec))) {
12386                     return -TARGET_EFAULT;
12387                 }
12388                 tsp = ts;
12389             }
12390             if (!arg2)
12391                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12392             else {
12393                 if (!(p = lock_user_string(arg2))) {
12394                     return -TARGET_EFAULT;
12395                 }
12396                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12397                 unlock_user(p, arg2, 0);
12398             }
12399         }
12400         return ret;
12401 #endif
12402 #ifdef TARGET_NR_utimensat_time64
12403     case TARGET_NR_utimensat_time64:
12404         {
12405             struct timespec *tsp, ts[2];
12406             if (!arg3) {
12407                 tsp = NULL;
12408             } else {
12409                 if (target_to_host_timespec64(ts, arg3)) {
12410                     return -TARGET_EFAULT;
12411                 }
12412                 if (target_to_host_timespec64(ts + 1, arg3 +
12413                                      sizeof(struct target__kernel_timespec))) {
12414                     return -TARGET_EFAULT;
12415                 }
12416                 tsp = ts;
12417             }
12418             if (!arg2)
12419                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12420             else {
12421                 p = lock_user_string(arg2);
12422                 if (!p) {
12423                     return -TARGET_EFAULT;
12424                 }
12425                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12426                 unlock_user(p, arg2, 0);
12427             }
12428         }
12429         return ret;
12430 #endif
12431 #ifdef TARGET_NR_futex
12432     case TARGET_NR_futex:
12433         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12434 #endif
12435 #ifdef TARGET_NR_futex_time64
12436     case TARGET_NR_futex_time64:
12437         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12438 #endif
12439 #ifdef CONFIG_INOTIFY
12440 #if defined(TARGET_NR_inotify_init)
12441     case TARGET_NR_inotify_init:
12442         ret = get_errno(inotify_init());
12443         if (ret >= 0) {
12444             fd_trans_register(ret, &target_inotify_trans);
12445         }
12446         return ret;
12447 #endif
12448 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12449     case TARGET_NR_inotify_init1:
12450         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12451                                           fcntl_flags_tbl)));
12452         if (ret >= 0) {
12453             fd_trans_register(ret, &target_inotify_trans);
12454         }
12455         return ret;
12456 #endif
12457 #if defined(TARGET_NR_inotify_add_watch)
12458     case TARGET_NR_inotify_add_watch:
12459         p = lock_user_string(arg2);
12460         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12461         unlock_user(p, arg2, 0);
12462         return ret;
12463 #endif
12464 #if defined(TARGET_NR_inotify_rm_watch)
12465     case TARGET_NR_inotify_rm_watch:
12466         return get_errno(inotify_rm_watch(arg1, arg2));
12467 #endif
12468 #endif
12469 
12470 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12471     case TARGET_NR_mq_open:
12472         {
12473             struct mq_attr posix_mq_attr;
12474             struct mq_attr *pposix_mq_attr;
12475             int host_flags;
12476 
12477             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12478             pposix_mq_attr = NULL;
12479             if (arg4) {
12480                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12481                     return -TARGET_EFAULT;
12482                 }
12483                 pposix_mq_attr = &posix_mq_attr;
12484             }
12485             p = lock_user_string(arg1 - 1);
12486             if (!p) {
12487                 return -TARGET_EFAULT;
12488             }
12489             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12490             unlock_user (p, arg1, 0);
12491         }
12492         return ret;
12493 
12494     case TARGET_NR_mq_unlink:
12495         p = lock_user_string(arg1 - 1);
12496         if (!p) {
12497             return -TARGET_EFAULT;
12498         }
12499         ret = get_errno(mq_unlink(p));
12500         unlock_user (p, arg1, 0);
12501         return ret;
12502 
12503 #ifdef TARGET_NR_mq_timedsend
12504     case TARGET_NR_mq_timedsend:
12505         {
12506             struct timespec ts;
12507 
12508             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12509             if (arg5 != 0) {
12510                 if (target_to_host_timespec(&ts, arg5)) {
12511                     return -TARGET_EFAULT;
12512                 }
12513                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12514                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12515                     return -TARGET_EFAULT;
12516                 }
12517             } else {
12518                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12519             }
12520             unlock_user (p, arg2, arg3);
12521         }
12522         return ret;
12523 #endif
12524 #ifdef TARGET_NR_mq_timedsend_time64
12525     case TARGET_NR_mq_timedsend_time64:
12526         {
12527             struct timespec ts;
12528 
12529             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12530             if (arg5 != 0) {
12531                 if (target_to_host_timespec64(&ts, arg5)) {
12532                     return -TARGET_EFAULT;
12533                 }
12534                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12535                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12536                     return -TARGET_EFAULT;
12537                 }
12538             } else {
12539                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12540             }
12541             unlock_user(p, arg2, arg3);
12542         }
12543         return ret;
12544 #endif
12545 
12546 #ifdef TARGET_NR_mq_timedreceive
12547     case TARGET_NR_mq_timedreceive:
12548         {
12549             struct timespec ts;
12550             unsigned int prio;
12551 
12552             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12553             if (arg5 != 0) {
12554                 if (target_to_host_timespec(&ts, arg5)) {
12555                     return -TARGET_EFAULT;
12556                 }
12557                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12558                                                      &prio, &ts));
12559                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12560                     return -TARGET_EFAULT;
12561                 }
12562             } else {
12563                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12564                                                      &prio, NULL));
12565             }
12566             unlock_user (p, arg2, arg3);
12567             if (arg4 != 0)
12568                 put_user_u32(prio, arg4);
12569         }
12570         return ret;
12571 #endif
12572 #ifdef TARGET_NR_mq_timedreceive_time64
12573     case TARGET_NR_mq_timedreceive_time64:
12574         {
12575             struct timespec ts;
12576             unsigned int prio;
12577 
12578             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12579             if (arg5 != 0) {
12580                 if (target_to_host_timespec64(&ts, arg5)) {
12581                     return -TARGET_EFAULT;
12582                 }
12583                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12584                                                      &prio, &ts));
12585                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12586                     return -TARGET_EFAULT;
12587                 }
12588             } else {
12589                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12590                                                      &prio, NULL));
12591             }
12592             unlock_user(p, arg2, arg3);
12593             if (arg4 != 0) {
12594                 put_user_u32(prio, arg4);
12595             }
12596         }
12597         return ret;
12598 #endif
12599 
12600     /* Not implemented for now... */
12601 /*     case TARGET_NR_mq_notify: */
12602 /*         break; */
12603 
12604     case TARGET_NR_mq_getsetattr:
12605         {
12606             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12607             ret = 0;
12608             if (arg2 != 0) {
12609                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12610                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12611                                            &posix_mq_attr_out));
12612             } else if (arg3 != 0) {
12613                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12614             }
12615             if (ret == 0 && arg3 != 0) {
12616                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12617             }
12618         }
12619         return ret;
12620 #endif
12621 
12622 #ifdef CONFIG_SPLICE
12623 #ifdef TARGET_NR_tee
12624     case TARGET_NR_tee:
12625         {
12626             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12627         }
12628         return ret;
12629 #endif
12630 #ifdef TARGET_NR_splice
12631     case TARGET_NR_splice:
12632         {
12633             loff_t loff_in, loff_out;
12634             loff_t *ploff_in = NULL, *ploff_out = NULL;
12635             if (arg2) {
12636                 if (get_user_u64(loff_in, arg2)) {
12637                     return -TARGET_EFAULT;
12638                 }
12639                 ploff_in = &loff_in;
12640             }
12641             if (arg4) {
12642                 if (get_user_u64(loff_out, arg4)) {
12643                     return -TARGET_EFAULT;
12644                 }
12645                 ploff_out = &loff_out;
12646             }
12647             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12648             if (arg2) {
12649                 if (put_user_u64(loff_in, arg2)) {
12650                     return -TARGET_EFAULT;
12651                 }
12652             }
12653             if (arg4) {
12654                 if (put_user_u64(loff_out, arg4)) {
12655                     return -TARGET_EFAULT;
12656                 }
12657             }
12658         }
12659         return ret;
12660 #endif
12661 #ifdef TARGET_NR_vmsplice
12662 	case TARGET_NR_vmsplice:
12663         {
12664             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12665             if (vec != NULL) {
12666                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12667                 unlock_iovec(vec, arg2, arg3, 0);
12668             } else {
12669                 ret = -host_to_target_errno(errno);
12670             }
12671         }
12672         return ret;
12673 #endif
12674 #endif /* CONFIG_SPLICE */
12675 #ifdef CONFIG_EVENTFD
12676 #if defined(TARGET_NR_eventfd)
12677     case TARGET_NR_eventfd:
12678         ret = get_errno(eventfd(arg1, 0));
12679         if (ret >= 0) {
12680             fd_trans_register(ret, &target_eventfd_trans);
12681         }
12682         return ret;
12683 #endif
12684 #if defined(TARGET_NR_eventfd2)
12685     case TARGET_NR_eventfd2:
12686     {
12687         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12688         if (arg2 & TARGET_O_NONBLOCK) {
12689             host_flags |= O_NONBLOCK;
12690         }
12691         if (arg2 & TARGET_O_CLOEXEC) {
12692             host_flags |= O_CLOEXEC;
12693         }
12694         ret = get_errno(eventfd(arg1, host_flags));
12695         if (ret >= 0) {
12696             fd_trans_register(ret, &target_eventfd_trans);
12697         }
12698         return ret;
12699     }
12700 #endif
12701 #endif /* CONFIG_EVENTFD  */
12702 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12703     case TARGET_NR_fallocate:
12704 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12705         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12706                                   target_offset64(arg5, arg6)));
12707 #else
12708         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12709 #endif
12710         return ret;
12711 #endif
12712 #if defined(CONFIG_SYNC_FILE_RANGE)
12713 #if defined(TARGET_NR_sync_file_range)
12714     case TARGET_NR_sync_file_range:
12715 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12716 #if defined(TARGET_MIPS)
12717         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12718                                         target_offset64(arg5, arg6), arg7));
12719 #else
12720         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12721                                         target_offset64(arg4, arg5), arg6));
12722 #endif /* !TARGET_MIPS */
12723 #else
12724         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12725 #endif
12726         return ret;
12727 #endif
12728 #if defined(TARGET_NR_sync_file_range2) || \
12729     defined(TARGET_NR_arm_sync_file_range)
12730 #if defined(TARGET_NR_sync_file_range2)
12731     case TARGET_NR_sync_file_range2:
12732 #endif
12733 #if defined(TARGET_NR_arm_sync_file_range)
12734     case TARGET_NR_arm_sync_file_range:
12735 #endif
12736         /* This is like sync_file_range but the arguments are reordered */
12737 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12738         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12739                                         target_offset64(arg5, arg6), arg2));
12740 #else
12741         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12742 #endif
12743         return ret;
12744 #endif
12745 #endif
12746 #if defined(TARGET_NR_signalfd4)
12747     case TARGET_NR_signalfd4:
12748         return do_signalfd4(arg1, arg2, arg4);
12749 #endif
12750 #if defined(TARGET_NR_signalfd)
12751     case TARGET_NR_signalfd:
12752         return do_signalfd4(arg1, arg2, 0);
12753 #endif
12754 #if defined(CONFIG_EPOLL)
12755 #if defined(TARGET_NR_epoll_create)
12756     case TARGET_NR_epoll_create:
12757         return get_errno(epoll_create(arg1));
12758 #endif
12759 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12760     case TARGET_NR_epoll_create1:
12761         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12762 #endif
12763 #if defined(TARGET_NR_epoll_ctl)
12764     case TARGET_NR_epoll_ctl:
12765     {
12766         struct epoll_event ep;
12767         struct epoll_event *epp = 0;
12768         if (arg4) {
12769             if (arg2 != EPOLL_CTL_DEL) {
12770                 struct target_epoll_event *target_ep;
12771                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12772                     return -TARGET_EFAULT;
12773                 }
12774                 ep.events = tswap32(target_ep->events);
12775                 /*
12776                  * The epoll_data_t union is just opaque data to the kernel,
12777                  * so we transfer all 64 bits across and need not worry what
12778                  * actual data type it is.
12779                  */
12780                 ep.data.u64 = tswap64(target_ep->data.u64);
12781                 unlock_user_struct(target_ep, arg4, 0);
12782             }
12783             /*
12784              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12785              * non-null pointer, even though this argument is ignored.
12786              *
12787              */
12788             epp = &ep;
12789         }
12790         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12791     }
12792 #endif
12793 
12794 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12795 #if defined(TARGET_NR_epoll_wait)
12796     case TARGET_NR_epoll_wait:
12797 #endif
12798 #if defined(TARGET_NR_epoll_pwait)
12799     case TARGET_NR_epoll_pwait:
12800 #endif
12801     {
12802         struct target_epoll_event *target_ep;
12803         struct epoll_event *ep;
12804         int epfd = arg1;
12805         int maxevents = arg3;
12806         int timeout = arg4;
12807 
12808         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12809             return -TARGET_EINVAL;
12810         }
12811 
12812         target_ep = lock_user(VERIFY_WRITE, arg2,
12813                               maxevents * sizeof(struct target_epoll_event), 1);
12814         if (!target_ep) {
12815             return -TARGET_EFAULT;
12816         }
12817 
12818         ep = g_try_new(struct epoll_event, maxevents);
12819         if (!ep) {
12820             unlock_user(target_ep, arg2, 0);
12821             return -TARGET_ENOMEM;
12822         }
12823 
12824         switch (num) {
12825 #if defined(TARGET_NR_epoll_pwait)
12826         case TARGET_NR_epoll_pwait:
12827         {
12828             sigset_t *set = NULL;
12829 
12830             if (arg5) {
12831                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12832                 if (ret != 0) {
12833                     break;
12834                 }
12835             }
12836 
12837             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12838                                              set, SIGSET_T_SIZE));
12839 
12840             if (set) {
12841                 finish_sigsuspend_mask(ret);
12842             }
12843             break;
12844         }
12845 #endif
12846 #if defined(TARGET_NR_epoll_wait)
12847         case TARGET_NR_epoll_wait:
12848             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12849                                              NULL, 0));
12850             break;
12851 #endif
12852         default:
12853             ret = -TARGET_ENOSYS;
12854         }
12855         if (!is_error(ret)) {
12856             int i;
12857             for (i = 0; i < ret; i++) {
12858                 target_ep[i].events = tswap32(ep[i].events);
12859                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12860             }
12861             unlock_user(target_ep, arg2,
12862                         ret * sizeof(struct target_epoll_event));
12863         } else {
12864             unlock_user(target_ep, arg2, 0);
12865         }
12866         g_free(ep);
12867         return ret;
12868     }
12869 #endif
12870 #endif
12871 #ifdef TARGET_NR_prlimit64
12872     case TARGET_NR_prlimit64:
12873     {
12874         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12875         struct target_rlimit64 *target_rnew, *target_rold;
12876         struct host_rlimit64 rnew, rold, *rnewp = 0;
12877         int resource = target_to_host_resource(arg2);
12878 
12879         if (arg3 && (resource != RLIMIT_AS &&
12880                      resource != RLIMIT_DATA &&
12881                      resource != RLIMIT_STACK)) {
12882             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12883                 return -TARGET_EFAULT;
12884             }
12885             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12886             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12887             unlock_user_struct(target_rnew, arg3, 0);
12888             rnewp = &rnew;
12889         }
12890 
12891         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12892         if (!is_error(ret) && arg4) {
12893             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12894                 return -TARGET_EFAULT;
12895             }
12896             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12897             target_rold->rlim_max = tswap64(rold.rlim_max);
12898             unlock_user_struct(target_rold, arg4, 1);
12899         }
12900         return ret;
12901     }
12902 #endif
12903 #ifdef TARGET_NR_gethostname
12904     case TARGET_NR_gethostname:
12905     {
12906         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12907         if (name) {
12908             ret = get_errno(gethostname(name, arg2));
12909             unlock_user(name, arg1, arg2);
12910         } else {
12911             ret = -TARGET_EFAULT;
12912         }
12913         return ret;
12914     }
12915 #endif
12916 #ifdef TARGET_NR_atomic_cmpxchg_32
12917     case TARGET_NR_atomic_cmpxchg_32:
12918     {
12919         /* should use start_exclusive from main.c */
12920         abi_ulong mem_value;
12921         if (get_user_u32(mem_value, arg6)) {
12922             target_siginfo_t info;
12923             info.si_signo = SIGSEGV;
12924             info.si_errno = 0;
12925             info.si_code = TARGET_SEGV_MAPERR;
12926             info._sifields._sigfault._addr = arg6;
12927             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12928             ret = 0xdeadbeef;
12929 
12930         }
12931         if (mem_value == arg2)
12932             put_user_u32(arg1, arg6);
12933         return mem_value;
12934     }
12935 #endif
12936 #ifdef TARGET_NR_atomic_barrier
12937     case TARGET_NR_atomic_barrier:
12938         /* Like the kernel implementation and the
12939            qemu arm barrier, no-op this? */
12940         return 0;
12941 #endif
12942 
12943 #ifdef TARGET_NR_timer_create
12944     case TARGET_NR_timer_create:
12945     {
12946         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12947 
12948         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12949 
12950         int clkid = arg1;
12951         int timer_index = next_free_host_timer();
12952 
12953         if (timer_index < 0) {
12954             ret = -TARGET_EAGAIN;
12955         } else {
12956             timer_t *phtimer = g_posix_timers  + timer_index;
12957 
12958             if (arg2) {
12959                 phost_sevp = &host_sevp;
12960                 ret = target_to_host_sigevent(phost_sevp, arg2);
12961                 if (ret != 0) {
12962                     free_host_timer_slot(timer_index);
12963                     return ret;
12964                 }
12965             }
12966 
12967             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12968             if (ret) {
12969                 free_host_timer_slot(timer_index);
12970             } else {
12971                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12972                     timer_delete(*phtimer);
12973                     free_host_timer_slot(timer_index);
12974                     return -TARGET_EFAULT;
12975                 }
12976             }
12977         }
12978         return ret;
12979     }
12980 #endif
12981 
12982 #ifdef TARGET_NR_timer_settime
12983     case TARGET_NR_timer_settime:
12984     {
12985         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12986          * struct itimerspec * old_value */
12987         target_timer_t timerid = get_timer_id(arg1);
12988 
12989         if (timerid < 0) {
12990             ret = timerid;
12991         } else if (arg3 == 0) {
12992             ret = -TARGET_EINVAL;
12993         } else {
12994             timer_t htimer = g_posix_timers[timerid];
12995             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12996 
12997             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12998                 return -TARGET_EFAULT;
12999             }
13000             ret = get_errno(
13001                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13002             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13003                 return -TARGET_EFAULT;
13004             }
13005         }
13006         return ret;
13007     }
13008 #endif
13009 
13010 #ifdef TARGET_NR_timer_settime64
13011     case TARGET_NR_timer_settime64:
13012     {
13013         target_timer_t timerid = get_timer_id(arg1);
13014 
13015         if (timerid < 0) {
13016             ret = timerid;
13017         } else if (arg3 == 0) {
13018             ret = -TARGET_EINVAL;
13019         } else {
13020             timer_t htimer = g_posix_timers[timerid];
13021             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13022 
13023             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13024                 return -TARGET_EFAULT;
13025             }
13026             ret = get_errno(
13027                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13028             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13029                 return -TARGET_EFAULT;
13030             }
13031         }
13032         return ret;
13033     }
13034 #endif
13035 
13036 #ifdef TARGET_NR_timer_gettime
13037     case TARGET_NR_timer_gettime:
13038     {
13039         /* args: timer_t timerid, struct itimerspec *curr_value */
13040         target_timer_t timerid = get_timer_id(arg1);
13041 
13042         if (timerid < 0) {
13043             ret = timerid;
13044         } else if (!arg2) {
13045             ret = -TARGET_EFAULT;
13046         } else {
13047             timer_t htimer = g_posix_timers[timerid];
13048             struct itimerspec hspec;
13049             ret = get_errno(timer_gettime(htimer, &hspec));
13050 
13051             if (host_to_target_itimerspec(arg2, &hspec)) {
13052                 ret = -TARGET_EFAULT;
13053             }
13054         }
13055         return ret;
13056     }
13057 #endif
13058 
13059 #ifdef TARGET_NR_timer_gettime64
13060     case TARGET_NR_timer_gettime64:
13061     {
13062         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13063         target_timer_t timerid = get_timer_id(arg1);
13064 
13065         if (timerid < 0) {
13066             ret = timerid;
13067         } else if (!arg2) {
13068             ret = -TARGET_EFAULT;
13069         } else {
13070             timer_t htimer = g_posix_timers[timerid];
13071             struct itimerspec hspec;
13072             ret = get_errno(timer_gettime(htimer, &hspec));
13073 
13074             if (host_to_target_itimerspec64(arg2, &hspec)) {
13075                 ret = -TARGET_EFAULT;
13076             }
13077         }
13078         return ret;
13079     }
13080 #endif
13081 
13082 #ifdef TARGET_NR_timer_getoverrun
13083     case TARGET_NR_timer_getoverrun:
13084     {
13085         /* args: timer_t timerid */
13086         target_timer_t timerid = get_timer_id(arg1);
13087 
13088         if (timerid < 0) {
13089             ret = timerid;
13090         } else {
13091             timer_t htimer = g_posix_timers[timerid];
13092             ret = get_errno(timer_getoverrun(htimer));
13093         }
13094         return ret;
13095     }
13096 #endif
13097 
13098 #ifdef TARGET_NR_timer_delete
13099     case TARGET_NR_timer_delete:
13100     {
13101         /* args: timer_t timerid */
13102         target_timer_t timerid = get_timer_id(arg1);
13103 
13104         if (timerid < 0) {
13105             ret = timerid;
13106         } else {
13107             timer_t htimer = g_posix_timers[timerid];
13108             ret = get_errno(timer_delete(htimer));
13109             free_host_timer_slot(timerid);
13110         }
13111         return ret;
13112     }
13113 #endif
13114 
13115 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13116     case TARGET_NR_timerfd_create:
13117         return get_errno(timerfd_create(arg1,
13118                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13119 #endif
13120 
13121 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13122     case TARGET_NR_timerfd_gettime:
13123         {
13124             struct itimerspec its_curr;
13125 
13126             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13127 
13128             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13129                 return -TARGET_EFAULT;
13130             }
13131         }
13132         return ret;
13133 #endif
13134 
13135 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13136     case TARGET_NR_timerfd_gettime64:
13137         {
13138             struct itimerspec its_curr;
13139 
13140             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13141 
13142             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13143                 return -TARGET_EFAULT;
13144             }
13145         }
13146         return ret;
13147 #endif
13148 
13149 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13150     case TARGET_NR_timerfd_settime:
13151         {
13152             struct itimerspec its_new, its_old, *p_new;
13153 
13154             if (arg3) {
13155                 if (target_to_host_itimerspec(&its_new, arg3)) {
13156                     return -TARGET_EFAULT;
13157                 }
13158                 p_new = &its_new;
13159             } else {
13160                 p_new = NULL;
13161             }
13162 
13163             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13164 
13165             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13166                 return -TARGET_EFAULT;
13167             }
13168         }
13169         return ret;
13170 #endif
13171 
13172 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13173     case TARGET_NR_timerfd_settime64:
13174         {
13175             struct itimerspec its_new, its_old, *p_new;
13176 
13177             if (arg3) {
13178                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13179                     return -TARGET_EFAULT;
13180                 }
13181                 p_new = &its_new;
13182             } else {
13183                 p_new = NULL;
13184             }
13185 
13186             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13187 
13188             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13189                 return -TARGET_EFAULT;
13190             }
13191         }
13192         return ret;
13193 #endif
13194 
13195 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13196     case TARGET_NR_ioprio_get:
13197         return get_errno(ioprio_get(arg1, arg2));
13198 #endif
13199 
13200 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13201     case TARGET_NR_ioprio_set:
13202         return get_errno(ioprio_set(arg1, arg2, arg3));
13203 #endif
13204 
13205 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13206     case TARGET_NR_setns:
13207         return get_errno(setns(arg1, arg2));
13208 #endif
13209 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13210     case TARGET_NR_unshare:
13211         return get_errno(unshare(arg1));
13212 #endif
13213 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13214     case TARGET_NR_kcmp:
13215         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13216 #endif
13217 #ifdef TARGET_NR_swapcontext
13218     case TARGET_NR_swapcontext:
13219         /* PowerPC specific.  */
13220         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13221 #endif
13222 #ifdef TARGET_NR_memfd_create
13223     case TARGET_NR_memfd_create:
13224         p = lock_user_string(arg1);
13225         if (!p) {
13226             return -TARGET_EFAULT;
13227         }
13228         ret = get_errno(memfd_create(p, arg2));
13229         fd_trans_unregister(ret);
13230         unlock_user(p, arg1, 0);
13231         return ret;
13232 #endif
13233 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13234     case TARGET_NR_membarrier:
13235         return get_errno(membarrier(arg1, arg2));
13236 #endif
13237 
13238 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13239     case TARGET_NR_copy_file_range:
13240         {
13241             loff_t inoff, outoff;
13242             loff_t *pinoff = NULL, *poutoff = NULL;
13243 
13244             if (arg2) {
13245                 if (get_user_u64(inoff, arg2)) {
13246                     return -TARGET_EFAULT;
13247                 }
13248                 pinoff = &inoff;
13249             }
13250             if (arg4) {
13251                 if (get_user_u64(outoff, arg4)) {
13252                     return -TARGET_EFAULT;
13253                 }
13254                 poutoff = &outoff;
13255             }
13256             /* Do not sign-extend the count parameter. */
13257             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13258                                                  (abi_ulong)arg5, arg6));
13259             if (!is_error(ret) && ret > 0) {
13260                 if (arg2) {
13261                     if (put_user_u64(inoff, arg2)) {
13262                         return -TARGET_EFAULT;
13263                     }
13264                 }
13265                 if (arg4) {
13266                     if (put_user_u64(outoff, arg4)) {
13267                         return -TARGET_EFAULT;
13268                     }
13269                 }
13270             }
13271         }
13272         return ret;
13273 #endif
13274 
13275 #if defined(TARGET_NR_pivot_root)
13276     case TARGET_NR_pivot_root:
13277         {
13278             void *p2;
13279             p = lock_user_string(arg1); /* new_root */
13280             p2 = lock_user_string(arg2); /* put_old */
13281             if (!p || !p2) {
13282                 ret = -TARGET_EFAULT;
13283             } else {
13284                 ret = get_errno(pivot_root(p, p2));
13285             }
13286             unlock_user(p2, arg2, 0);
13287             unlock_user(p, arg1, 0);
13288         }
13289         return ret;
13290 #endif
13291 
13292     default:
13293         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13294         return -TARGET_ENOSYS;
13295     }
13296     return ret;
13297 }
13298 
13299 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13300                     abi_long arg2, abi_long arg3, abi_long arg4,
13301                     abi_long arg5, abi_long arg6, abi_long arg7,
13302                     abi_long arg8)
13303 {
13304     CPUState *cpu = env_cpu(cpu_env);
13305     abi_long ret;
13306 
13307 #ifdef DEBUG_ERESTARTSYS
13308     /* Debug-only code for exercising the syscall-restart code paths
13309      * in the per-architecture cpu main loops: restart every syscall
13310      * the guest makes once before letting it through.
13311      */
13312     {
13313         static bool flag;
13314         flag = !flag;
13315         if (flag) {
13316             return -QEMU_ERESTARTSYS;
13317         }
13318     }
13319 #endif
13320 
13321     record_syscall_start(cpu, num, arg1,
13322                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13323 
13324     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13325         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13326     }
13327 
13328     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13329                       arg5, arg6, arg7, arg8);
13330 
13331     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13332         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13333                           arg3, arg4, arg5, arg6);
13334     }
13335 
13336     record_syscall_return(cpu, num, ret);
13337     return ret;
13338 }
13339