xref: /openbmc/qemu/linux-user/syscall.c (revision 6490d9aa)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 
99 #ifdef HAVE_SYS_MOUNT_FSCONFIG
100 /*
101  * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h,
102  * which in turn prevents use of linux/fs.h. So we have to
103  * define the constants ourselves for now.
104  */
105 #define FS_IOC_GETFLAGS                _IOR('f', 1, long)
106 #define FS_IOC_SETFLAGS                _IOW('f', 2, long)
107 #define FS_IOC_GETVERSION              _IOR('v', 1, long)
108 #define FS_IOC_SETVERSION              _IOW('v', 2, long)
109 #define FS_IOC_FIEMAP                  _IOWR('f', 11, struct fiemap)
110 #define FS_IOC32_GETFLAGS              _IOR('f', 1, int)
111 #define FS_IOC32_SETFLAGS              _IOW('f', 2, int)
112 #define FS_IOC32_GETVERSION            _IOR('v', 1, int)
113 #define FS_IOC32_SETVERSION            _IOW('v', 2, int)
114 
115 #define BLKGETSIZE64 _IOR(0x12,114,size_t)
116 #define BLKDISCARD _IO(0x12,119)
117 #define BLKIOMIN _IO(0x12,120)
118 #define BLKIOOPT _IO(0x12,121)
119 #define BLKALIGNOFF _IO(0x12,122)
120 #define BLKPBSZGET _IO(0x12,123)
121 #define BLKDISCARDZEROES _IO(0x12,124)
122 #define BLKSECDISCARD _IO(0x12,125)
123 #define BLKROTATIONAL _IO(0x12,126)
124 #define BLKZEROOUT _IO(0x12,127)
125 
126 #define FIBMAP     _IO(0x00,1)
127 #define FIGETBSZ   _IO(0x00,2)
128 
129 struct file_clone_range {
130         __s64 src_fd;
131         __u64 src_offset;
132         __u64 src_length;
133         __u64 dest_offset;
134 };
135 
136 #define FICLONE         _IOW(0x94, 9, int)
137 #define FICLONERANGE    _IOW(0x94, 13, struct file_clone_range)
138 
139 #else
140 #include <linux/fs.h>
141 #endif
142 #include <linux/fd.h>
143 #if defined(CONFIG_FIEMAP)
144 #include <linux/fiemap.h>
145 #endif
146 #include <linux/fb.h>
147 #if defined(CONFIG_USBFS)
148 #include <linux/usbdevice_fs.h>
149 #include <linux/usb/ch9.h>
150 #endif
151 #include <linux/vt.h>
152 #include <linux/dm-ioctl.h>
153 #include <linux/reboot.h>
154 #include <linux/route.h>
155 #include <linux/filter.h>
156 #include <linux/blkpg.h>
157 #include <netpacket/packet.h>
158 #include <linux/netlink.h>
159 #include <linux/if_alg.h>
160 #include <linux/rtc.h>
161 #include <sound/asound.h>
162 #ifdef HAVE_BTRFS_H
163 #include <linux/btrfs.h>
164 #endif
165 #ifdef HAVE_DRM_H
166 #include <libdrm/drm.h>
167 #include <libdrm/i915_drm.h>
168 #endif
169 #include "linux_loop.h"
170 #include "uname.h"
171 
172 #include "qemu.h"
173 #include "user-internals.h"
174 #include "strace.h"
175 #include "signal-common.h"
176 #include "loader.h"
177 #include "user-mmap.h"
178 #include "user/safe-syscall.h"
179 #include "qemu/guest-random.h"
180 #include "qemu/selfmap.h"
181 #include "user/syscall-trace.h"
182 #include "special-errno.h"
183 #include "qapi/error.h"
184 #include "fd-trans.h"
185 #include "tcg/tcg.h"
186 #include "cpu_loop-common.h"
187 
188 #ifndef CLONE_IO
189 #define CLONE_IO                0x80000000      /* Clone io context */
190 #endif
191 
192 /* We can't directly call the host clone syscall, because this will
193  * badly confuse libc (breaking mutexes, for example). So we must
194  * divide clone flags into:
195  *  * flag combinations that look like pthread_create()
196  *  * flag combinations that look like fork()
197  *  * flags we can implement within QEMU itself
198  *  * flags we can't support and will return an error for
199  */
200 /* For thread creation, all these flags must be present; for
201  * fork, none must be present.
202  */
203 #define CLONE_THREAD_FLAGS                              \
204     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
205      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
206 
207 /* These flags are ignored:
208  * CLONE_DETACHED is now ignored by the kernel;
209  * CLONE_IO is just an optimisation hint to the I/O scheduler
210  */
211 #define CLONE_IGNORED_FLAGS                     \
212     (CLONE_DETACHED | CLONE_IO)
213 
214 /* Flags for fork which we can implement within QEMU itself */
215 #define CLONE_OPTIONAL_FORK_FLAGS               \
216     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
217      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
218 
219 /* Flags for thread creation which we can implement within QEMU itself */
220 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
221     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
222      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
223 
224 #define CLONE_INVALID_FORK_FLAGS                                        \
225     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
226 
227 #define CLONE_INVALID_THREAD_FLAGS                                      \
228     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
229        CLONE_IGNORED_FLAGS))
230 
231 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
232  * have almost all been allocated. We cannot support any of
233  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
234  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
235  * The checks against the invalid thread masks above will catch these.
236  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
237  */
238 
239 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
240  * once. This exercises the codepaths for restart.
241  */
242 //#define DEBUG_ERESTARTSYS
243 
244 //#include <linux/msdos_fs.h>
245 #define VFAT_IOCTL_READDIR_BOTH \
246     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
247 #define VFAT_IOCTL_READDIR_SHORT \
248     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
249 
250 #undef _syscall0
251 #undef _syscall1
252 #undef _syscall2
253 #undef _syscall3
254 #undef _syscall4
255 #undef _syscall5
256 #undef _syscall6
257 
258 #define _syscall0(type,name)		\
259 static type name (void)			\
260 {					\
261 	return syscall(__NR_##name);	\
262 }
263 
264 #define _syscall1(type,name,type1,arg1)		\
265 static type name (type1 arg1)			\
266 {						\
267 	return syscall(__NR_##name, arg1);	\
268 }
269 
270 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
271 static type name (type1 arg1,type2 arg2)		\
272 {							\
273 	return syscall(__NR_##name, arg1, arg2);	\
274 }
275 
276 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
277 static type name (type1 arg1,type2 arg2,type3 arg3)		\
278 {								\
279 	return syscall(__NR_##name, arg1, arg2, arg3);		\
280 }
281 
282 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
283 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
284 {										\
285 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
286 }
287 
288 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
289 		  type5,arg5)							\
290 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
291 {										\
292 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
293 }
294 
295 
296 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
297 		  type5,arg5,type6,arg6)					\
298 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
299                   type6 arg6)							\
300 {										\
301 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
302 }
303 
304 
305 #define __NR_sys_uname __NR_uname
306 #define __NR_sys_getcwd1 __NR_getcwd
307 #define __NR_sys_getdents __NR_getdents
308 #define __NR_sys_getdents64 __NR_getdents64
309 #define __NR_sys_getpriority __NR_getpriority
310 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
311 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
312 #define __NR_sys_syslog __NR_syslog
313 #if defined(__NR_futex)
314 # define __NR_sys_futex __NR_futex
315 #endif
316 #if defined(__NR_futex_time64)
317 # define __NR_sys_futex_time64 __NR_futex_time64
318 #endif
319 #define __NR_sys_statx __NR_statx
320 
321 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
322 #define __NR__llseek __NR_lseek
323 #endif
324 
325 /* Newer kernel ports have llseek() instead of _llseek() */
326 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
327 #define TARGET_NR__llseek TARGET_NR_llseek
328 #endif
329 
330 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
331 #ifndef TARGET_O_NONBLOCK_MASK
332 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
333 #endif
334 
335 #define __NR_sys_gettid __NR_gettid
336 _syscall0(int, sys_gettid)
337 
338 /* For the 64-bit guest on 32-bit host case we must emulate
339  * getdents using getdents64, because otherwise the host
340  * might hand us back more dirent records than we can fit
341  * into the guest buffer after structure format conversion.
342  * Otherwise we emulate getdents with getdents if the host has it.
343  */
344 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
345 #define EMULATE_GETDENTS_WITH_GETDENTS
346 #endif
347 
348 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
349 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
350 #endif
351 #if (defined(TARGET_NR_getdents) && \
352       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
353     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
354 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
355 #endif
356 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
357 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
358           loff_t *, res, uint, wh);
359 #endif
360 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
361 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
362           siginfo_t *, uinfo)
363 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
364 #ifdef __NR_exit_group
365 _syscall1(int,exit_group,int,error_code)
366 #endif
367 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
368 #define __NR_sys_close_range __NR_close_range
369 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
370 #ifndef CLOSE_RANGE_CLOEXEC
371 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
372 #endif
373 #endif
374 #if defined(__NR_futex)
375 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
376           const struct timespec *,timeout,int *,uaddr2,int,val3)
377 #endif
378 #if defined(__NR_futex_time64)
379 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
380           const struct timespec *,timeout,int *,uaddr2,int,val3)
381 #endif
382 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
383 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
384 #endif
385 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
386 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
387                              unsigned int, flags);
388 #endif
389 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
390 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
391 #endif
392 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
393 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
394           unsigned long *, user_mask_ptr);
395 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
396 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
397           unsigned long *, user_mask_ptr);
398 /* sched_attr is not defined in glibc */
399 struct sched_attr {
400     uint32_t size;
401     uint32_t sched_policy;
402     uint64_t sched_flags;
403     int32_t sched_nice;
404     uint32_t sched_priority;
405     uint64_t sched_runtime;
406     uint64_t sched_deadline;
407     uint64_t sched_period;
408     uint32_t sched_util_min;
409     uint32_t sched_util_max;
410 };
411 #define __NR_sys_sched_getattr __NR_sched_getattr
412 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
413           unsigned int, size, unsigned int, flags);
414 #define __NR_sys_sched_setattr __NR_sched_setattr
415 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
416           unsigned int, flags);
417 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
418 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
419 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
420 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
421           const struct sched_param *, param);
422 #define __NR_sys_sched_getparam __NR_sched_getparam
423 _syscall2(int, sys_sched_getparam, pid_t, pid,
424           struct sched_param *, param);
425 #define __NR_sys_sched_setparam __NR_sched_setparam
426 _syscall2(int, sys_sched_setparam, pid_t, pid,
427           const struct sched_param *, param);
428 #define __NR_sys_getcpu __NR_getcpu
429 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
430 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
431           void *, arg);
432 _syscall2(int, capget, struct __user_cap_header_struct *, header,
433           struct __user_cap_data_struct *, data);
434 _syscall2(int, capset, struct __user_cap_header_struct *, header,
435           struct __user_cap_data_struct *, data);
436 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
437 _syscall2(int, ioprio_get, int, which, int, who)
438 #endif
439 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
440 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
441 #endif
442 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
443 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
444 #endif
445 
446 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
447 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
448           unsigned long, idx1, unsigned long, idx2)
449 #endif
450 
451 /*
452  * It is assumed that struct statx is architecture independent.
453  */
454 #if defined(TARGET_NR_statx) && defined(__NR_statx)
455 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
456           unsigned int, mask, struct target_statx *, statxbuf)
457 #endif
458 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
459 _syscall2(int, membarrier, int, cmd, int, flags)
460 #endif
461 
462 static const bitmask_transtbl fcntl_flags_tbl[] = {
463   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
464   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
465   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
466   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
467   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
468   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
469   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
470   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
471   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
472   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
473   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
474   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
475   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
476 #if defined(O_DIRECT)
477   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
478 #endif
479 #if defined(O_NOATIME)
480   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
481 #endif
482 #if defined(O_CLOEXEC)
483   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
484 #endif
485 #if defined(O_PATH)
486   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
487 #endif
488 #if defined(O_TMPFILE)
489   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
490 #endif
491   /* Don't terminate the list prematurely on 64-bit host+guest.  */
492 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
493   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
494 #endif
495   { 0, 0, 0, 0 }
496 };
497 
498 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
499 
500 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
501 #if defined(__NR_utimensat)
502 #define __NR_sys_utimensat __NR_utimensat
503 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
504           const struct timespec *,tsp,int,flags)
505 #else
506 static int sys_utimensat(int dirfd, const char *pathname,
507                          const struct timespec times[2], int flags)
508 {
509     errno = ENOSYS;
510     return -1;
511 }
512 #endif
513 #endif /* TARGET_NR_utimensat */
514 
515 #ifdef TARGET_NR_renameat2
516 #if defined(__NR_renameat2)
517 #define __NR_sys_renameat2 __NR_renameat2
518 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
519           const char *, new, unsigned int, flags)
520 #else
521 static int sys_renameat2(int oldfd, const char *old,
522                          int newfd, const char *new, int flags)
523 {
524     if (flags == 0) {
525         return renameat(oldfd, old, newfd, new);
526     }
527     errno = ENOSYS;
528     return -1;
529 }
530 #endif
531 #endif /* TARGET_NR_renameat2 */
532 
533 #ifdef CONFIG_INOTIFY
534 #include <sys/inotify.h>
535 #else
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY  */
542 
543 #if defined(TARGET_NR_prlimit64)
544 #ifndef __NR_prlimit64
545 # define __NR_prlimit64 -1
546 #endif
547 #define __NR_sys_prlimit64 __NR_prlimit64
548 /* The glibc rlimit structure may not be that used by the underlying syscall */
549 struct host_rlimit64 {
550     uint64_t rlim_cur;
551     uint64_t rlim_max;
552 };
553 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
554           const struct host_rlimit64 *, new_limit,
555           struct host_rlimit64 *, old_limit)
556 #endif
557 
558 
559 #if defined(TARGET_NR_timer_create)
560 /* Maximum of 32 active POSIX timers allowed at any one time. */
561 #define GUEST_TIMER_MAX 32
562 static timer_t g_posix_timers[GUEST_TIMER_MAX];
563 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
564 
565 static inline int next_free_host_timer(void)
566 {
567     int k;
568     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
569         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
570             return k;
571         }
572     }
573     return -1;
574 }
575 
576 static inline void free_host_timer_slot(int id)
577 {
578     qatomic_store_release(g_posix_timer_allocated + id, 0);
579 }
580 #endif
581 
582 static inline int host_to_target_errno(int host_errno)
583 {
584     switch (host_errno) {
585 #define E(X)  case X: return TARGET_##X;
586 #include "errnos.c.inc"
587 #undef E
588     default:
589         return host_errno;
590     }
591 }
592 
593 static inline int target_to_host_errno(int target_errno)
594 {
595     switch (target_errno) {
596 #define E(X)  case TARGET_##X: return X;
597 #include "errnos.c.inc"
598 #undef E
599     default:
600         return target_errno;
601     }
602 }
603 
604 abi_long get_errno(abi_long ret)
605 {
606     if (ret == -1)
607         return -host_to_target_errno(errno);
608     else
609         return ret;
610 }
611 
612 const char *target_strerror(int err)
613 {
614     if (err == QEMU_ERESTARTSYS) {
615         return "To be restarted";
616     }
617     if (err == QEMU_ESIGRETURN) {
618         return "Successful exit from sigreturn";
619     }
620 
621     return strerror(target_to_host_errno(err));
622 }
623 
624 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
625 {
626     int i;
627     uint8_t b;
628     if (usize <= ksize) {
629         return 1;
630     }
631     for (i = ksize; i < usize; i++) {
632         if (get_user_u8(b, addr + i)) {
633             return -TARGET_EFAULT;
634         }
635         if (b != 0) {
636             return 0;
637         }
638     }
639     return 1;
640 }
641 
642 #define safe_syscall0(type, name) \
643 static type safe_##name(void) \
644 { \
645     return safe_syscall(__NR_##name); \
646 }
647 
648 #define safe_syscall1(type, name, type1, arg1) \
649 static type safe_##name(type1 arg1) \
650 { \
651     return safe_syscall(__NR_##name, arg1); \
652 }
653 
654 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
655 static type safe_##name(type1 arg1, type2 arg2) \
656 { \
657     return safe_syscall(__NR_##name, arg1, arg2); \
658 }
659 
660 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
661 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
662 { \
663     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
664 }
665 
666 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
667     type4, arg4) \
668 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
669 { \
670     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
671 }
672 
673 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
674     type4, arg4, type5, arg5) \
675 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
676     type5 arg5) \
677 { \
678     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
679 }
680 
681 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
682     type4, arg4, type5, arg5, type6, arg6) \
683 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
684     type5 arg5, type6 arg6) \
685 { \
686     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
687 }
688 
689 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
690 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
691 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
692               int, flags, mode_t, mode)
693 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
694 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
695               struct rusage *, rusage)
696 #endif
697 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
698               int, options, struct rusage *, rusage)
699 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
700               char **, argv, char **, envp, int, flags)
701 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
702     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
703 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
704               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
705 #endif
706 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
707 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
708               struct timespec *, tsp, const sigset_t *, sigmask,
709               size_t, sigsetsize)
710 #endif
711 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
712               int, maxevents, int, timeout, const sigset_t *, sigmask,
713               size_t, sigsetsize)
714 #if defined(__NR_futex)
715 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
716               const struct timespec *,timeout,int *,uaddr2,int,val3)
717 #endif
718 #if defined(__NR_futex_time64)
719 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
720               const struct timespec *,timeout,int *,uaddr2,int,val3)
721 #endif
722 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
723 safe_syscall2(int, kill, pid_t, pid, int, sig)
724 safe_syscall2(int, tkill, int, tid, int, sig)
725 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
726 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
727 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
728 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
729               unsigned long, pos_l, unsigned long, pos_h)
730 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
731               unsigned long, pos_l, unsigned long, pos_h)
732 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
733               socklen_t, addrlen)
734 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
735               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
736 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
737               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
738 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
739 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
740 safe_syscall2(int, flock, int, fd, int, operation)
741 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
742 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
743               const struct timespec *, uts, size_t, sigsetsize)
744 #endif
745 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
746               int, flags)
747 #if defined(TARGET_NR_nanosleep)
748 safe_syscall2(int, nanosleep, const struct timespec *, req,
749               struct timespec *, rem)
750 #endif
751 #if defined(TARGET_NR_clock_nanosleep) || \
752     defined(TARGET_NR_clock_nanosleep_time64)
753 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
754               const struct timespec *, req, struct timespec *, rem)
755 #endif
756 #ifdef __NR_ipc
757 #ifdef __s390x__
758 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
759               void *, ptr)
760 #else
761 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
762               void *, ptr, long, fifth)
763 #endif
764 #endif
765 #ifdef __NR_msgsnd
766 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
767               int, flags)
768 #endif
769 #ifdef __NR_msgrcv
770 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
771               long, msgtype, int, flags)
772 #endif
773 #ifdef __NR_semtimedop
774 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
775               unsigned, nsops, const struct timespec *, timeout)
776 #endif
777 #if defined(TARGET_NR_mq_timedsend) || \
778     defined(TARGET_NR_mq_timedsend_time64)
779 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
780               size_t, len, unsigned, prio, const struct timespec *, timeout)
781 #endif
782 #if defined(TARGET_NR_mq_timedreceive) || \
783     defined(TARGET_NR_mq_timedreceive_time64)
784 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
785               size_t, len, unsigned *, prio, const struct timespec *, timeout)
786 #endif
787 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
788 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
789               int, outfd, loff_t *, poutoff, size_t, length,
790               unsigned int, flags)
791 #endif
792 
793 /* We do ioctl like this rather than via safe_syscall3 to preserve the
794  * "third argument might be integer or pointer or not present" behaviour of
795  * the libc function.
796  */
797 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
798 /* Similarly for fcntl. Note that callers must always:
799  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
800  *  use the flock64 struct rather than unsuffixed flock
801  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
802  */
803 #ifdef __NR_fcntl64
804 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
805 #else
806 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
807 #endif
808 
809 static inline int host_to_target_sock_type(int host_type)
810 {
811     int target_type;
812 
813     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
814     case SOCK_DGRAM:
815         target_type = TARGET_SOCK_DGRAM;
816         break;
817     case SOCK_STREAM:
818         target_type = TARGET_SOCK_STREAM;
819         break;
820     default:
821         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
822         break;
823     }
824 
825 #if defined(SOCK_CLOEXEC)
826     if (host_type & SOCK_CLOEXEC) {
827         target_type |= TARGET_SOCK_CLOEXEC;
828     }
829 #endif
830 
831 #if defined(SOCK_NONBLOCK)
832     if (host_type & SOCK_NONBLOCK) {
833         target_type |= TARGET_SOCK_NONBLOCK;
834     }
835 #endif
836 
837     return target_type;
838 }
839 
840 static abi_ulong target_brk;
841 static abi_ulong target_original_brk;
842 static abi_ulong brk_page;
843 
844 void target_set_brk(abi_ulong new_brk)
845 {
846     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
847     brk_page = HOST_PAGE_ALIGN(target_brk);
848 }
849 
850 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
851 #define DEBUGF_BRK(message, args...)
852 
853 /* do_brk() must return target values and target errnos. */
854 abi_long do_brk(abi_ulong new_brk)
855 {
856     abi_long mapped_addr;
857     abi_ulong new_alloc_size;
858 
859     /* brk pointers are always untagged */
860 
861     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
862 
863     if (!new_brk) {
864         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
865         return target_brk;
866     }
867     if (new_brk < target_original_brk) {
868         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
869                    target_brk);
870         return target_brk;
871     }
872 
873     /* If the new brk is less than the highest page reserved to the
874      * target heap allocation, set it and we're almost done...  */
875     if (new_brk <= brk_page) {
876         /* Heap contents are initialized to zero, as for anonymous
877          * mapped pages.  */
878         if (new_brk > target_brk) {
879             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
880         }
881 	target_brk = new_brk;
882         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
883 	return target_brk;
884     }
885 
886     /* We need to allocate more memory after the brk... Note that
887      * we don't use MAP_FIXED because that will map over the top of
888      * any existing mapping (like the one with the host libc or qemu
889      * itself); instead we treat "mapped but at wrong address" as
890      * a failure and unmap again.
891      */
892     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
893     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
894                                         PROT_READ|PROT_WRITE,
895                                         MAP_ANON|MAP_PRIVATE, 0, 0));
896 
897     if (mapped_addr == brk_page) {
898         /* Heap contents are initialized to zero, as for anonymous
899          * mapped pages.  Technically the new pages are already
900          * initialized to zero since they *are* anonymous mapped
901          * pages, however we have to take care with the contents that
902          * come from the remaining part of the previous page: it may
903          * contains garbage data due to a previous heap usage (grown
904          * then shrunken).  */
905         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
906 
907         target_brk = new_brk;
908         brk_page = HOST_PAGE_ALIGN(target_brk);
909         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
910             target_brk);
911         return target_brk;
912     } else if (mapped_addr != -1) {
913         /* Mapped but at wrong address, meaning there wasn't actually
914          * enough space for this brk.
915          */
916         target_munmap(mapped_addr, new_alloc_size);
917         mapped_addr = -1;
918         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
919     }
920     else {
921         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
922     }
923 
924 #if defined(TARGET_ALPHA)
925     /* We (partially) emulate OSF/1 on Alpha, which requires we
926        return a proper errno, not an unchanged brk value.  */
927     return -TARGET_ENOMEM;
928 #endif
929     /* For everything else, return the previous break. */
930     return target_brk;
931 }
932 
933 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
934     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
935 static inline abi_long copy_from_user_fdset(fd_set *fds,
936                                             abi_ulong target_fds_addr,
937                                             int n)
938 {
939     int i, nw, j, k;
940     abi_ulong b, *target_fds;
941 
942     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
943     if (!(target_fds = lock_user(VERIFY_READ,
944                                  target_fds_addr,
945                                  sizeof(abi_ulong) * nw,
946                                  1)))
947         return -TARGET_EFAULT;
948 
949     FD_ZERO(fds);
950     k = 0;
951     for (i = 0; i < nw; i++) {
952         /* grab the abi_ulong */
953         __get_user(b, &target_fds[i]);
954         for (j = 0; j < TARGET_ABI_BITS; j++) {
955             /* check the bit inside the abi_ulong */
956             if ((b >> j) & 1)
957                 FD_SET(k, fds);
958             k++;
959         }
960     }
961 
962     unlock_user(target_fds, target_fds_addr, 0);
963 
964     return 0;
965 }
966 
967 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
968                                                  abi_ulong target_fds_addr,
969                                                  int n)
970 {
971     if (target_fds_addr) {
972         if (copy_from_user_fdset(fds, target_fds_addr, n))
973             return -TARGET_EFAULT;
974         *fds_ptr = fds;
975     } else {
976         *fds_ptr = NULL;
977     }
978     return 0;
979 }
980 
981 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
982                                           const fd_set *fds,
983                                           int n)
984 {
985     int i, nw, j, k;
986     abi_long v;
987     abi_ulong *target_fds;
988 
989     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
990     if (!(target_fds = lock_user(VERIFY_WRITE,
991                                  target_fds_addr,
992                                  sizeof(abi_ulong) * nw,
993                                  0)))
994         return -TARGET_EFAULT;
995 
996     k = 0;
997     for (i = 0; i < nw; i++) {
998         v = 0;
999         for (j = 0; j < TARGET_ABI_BITS; j++) {
1000             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1001             k++;
1002         }
1003         __put_user(v, &target_fds[i]);
1004     }
1005 
1006     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1007 
1008     return 0;
1009 }
1010 #endif
1011 
1012 #if defined(__alpha__)
1013 #define HOST_HZ 1024
1014 #else
1015 #define HOST_HZ 100
1016 #endif
1017 
1018 static inline abi_long host_to_target_clock_t(long ticks)
1019 {
1020 #if HOST_HZ == TARGET_HZ
1021     return ticks;
1022 #else
1023     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1024 #endif
1025 }
1026 
1027 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1028                                              const struct rusage *rusage)
1029 {
1030     struct target_rusage *target_rusage;
1031 
1032     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1033         return -TARGET_EFAULT;
1034     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1035     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1036     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1037     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1038     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1039     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1040     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1041     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1042     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1043     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1044     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1045     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1046     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1047     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1048     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1049     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1050     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1051     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1052     unlock_user_struct(target_rusage, target_addr, 1);
1053 
1054     return 0;
1055 }
1056 
1057 #ifdef TARGET_NR_setrlimit
1058 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1059 {
1060     abi_ulong target_rlim_swap;
1061     rlim_t result;
1062 
1063     target_rlim_swap = tswapal(target_rlim);
1064     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1065         return RLIM_INFINITY;
1066 
1067     result = target_rlim_swap;
1068     if (target_rlim_swap != (rlim_t)result)
1069         return RLIM_INFINITY;
1070 
1071     return result;
1072 }
1073 #endif
1074 
1075 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1076 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1077 {
1078     abi_ulong target_rlim_swap;
1079     abi_ulong result;
1080 
1081     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1082         target_rlim_swap = TARGET_RLIM_INFINITY;
1083     else
1084         target_rlim_swap = rlim;
1085     result = tswapal(target_rlim_swap);
1086 
1087     return result;
1088 }
1089 #endif
1090 
1091 static inline int target_to_host_resource(int code)
1092 {
1093     switch (code) {
1094     case TARGET_RLIMIT_AS:
1095         return RLIMIT_AS;
1096     case TARGET_RLIMIT_CORE:
1097         return RLIMIT_CORE;
1098     case TARGET_RLIMIT_CPU:
1099         return RLIMIT_CPU;
1100     case TARGET_RLIMIT_DATA:
1101         return RLIMIT_DATA;
1102     case TARGET_RLIMIT_FSIZE:
1103         return RLIMIT_FSIZE;
1104     case TARGET_RLIMIT_LOCKS:
1105         return RLIMIT_LOCKS;
1106     case TARGET_RLIMIT_MEMLOCK:
1107         return RLIMIT_MEMLOCK;
1108     case TARGET_RLIMIT_MSGQUEUE:
1109         return RLIMIT_MSGQUEUE;
1110     case TARGET_RLIMIT_NICE:
1111         return RLIMIT_NICE;
1112     case TARGET_RLIMIT_NOFILE:
1113         return RLIMIT_NOFILE;
1114     case TARGET_RLIMIT_NPROC:
1115         return RLIMIT_NPROC;
1116     case TARGET_RLIMIT_RSS:
1117         return RLIMIT_RSS;
1118     case TARGET_RLIMIT_RTPRIO:
1119         return RLIMIT_RTPRIO;
1120 #ifdef RLIMIT_RTTIME
1121     case TARGET_RLIMIT_RTTIME:
1122         return RLIMIT_RTTIME;
1123 #endif
1124     case TARGET_RLIMIT_SIGPENDING:
1125         return RLIMIT_SIGPENDING;
1126     case TARGET_RLIMIT_STACK:
1127         return RLIMIT_STACK;
1128     default:
1129         return code;
1130     }
1131 }
1132 
1133 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1134                                               abi_ulong target_tv_addr)
1135 {
1136     struct target_timeval *target_tv;
1137 
1138     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1139         return -TARGET_EFAULT;
1140     }
1141 
1142     __get_user(tv->tv_sec, &target_tv->tv_sec);
1143     __get_user(tv->tv_usec, &target_tv->tv_usec);
1144 
1145     unlock_user_struct(target_tv, target_tv_addr, 0);
1146 
1147     return 0;
1148 }
1149 
1150 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1151                                             const struct timeval *tv)
1152 {
1153     struct target_timeval *target_tv;
1154 
1155     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1156         return -TARGET_EFAULT;
1157     }
1158 
1159     __put_user(tv->tv_sec, &target_tv->tv_sec);
1160     __put_user(tv->tv_usec, &target_tv->tv_usec);
1161 
1162     unlock_user_struct(target_tv, target_tv_addr, 1);
1163 
1164     return 0;
1165 }
1166 
1167 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1168 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1169                                                 abi_ulong target_tv_addr)
1170 {
1171     struct target__kernel_sock_timeval *target_tv;
1172 
1173     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1174         return -TARGET_EFAULT;
1175     }
1176 
1177     __get_user(tv->tv_sec, &target_tv->tv_sec);
1178     __get_user(tv->tv_usec, &target_tv->tv_usec);
1179 
1180     unlock_user_struct(target_tv, target_tv_addr, 0);
1181 
1182     return 0;
1183 }
1184 #endif
1185 
1186 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1187                                               const struct timeval *tv)
1188 {
1189     struct target__kernel_sock_timeval *target_tv;
1190 
1191     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1192         return -TARGET_EFAULT;
1193     }
1194 
1195     __put_user(tv->tv_sec, &target_tv->tv_sec);
1196     __put_user(tv->tv_usec, &target_tv->tv_usec);
1197 
1198     unlock_user_struct(target_tv, target_tv_addr, 1);
1199 
1200     return 0;
1201 }
1202 
1203 #if defined(TARGET_NR_futex) || \
1204     defined(TARGET_NR_rt_sigtimedwait) || \
1205     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1206     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1207     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1208     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1209     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1210     defined(TARGET_NR_timer_settime) || \
1211     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1212 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1213                                                abi_ulong target_addr)
1214 {
1215     struct target_timespec *target_ts;
1216 
1217     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1218         return -TARGET_EFAULT;
1219     }
1220     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1221     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1222     unlock_user_struct(target_ts, target_addr, 0);
1223     return 0;
1224 }
1225 #endif
1226 
1227 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1228     defined(TARGET_NR_timer_settime64) || \
1229     defined(TARGET_NR_mq_timedsend_time64) || \
1230     defined(TARGET_NR_mq_timedreceive_time64) || \
1231     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1232     defined(TARGET_NR_clock_nanosleep_time64) || \
1233     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1234     defined(TARGET_NR_utimensat) || \
1235     defined(TARGET_NR_utimensat_time64) || \
1236     defined(TARGET_NR_semtimedop_time64) || \
1237     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1238 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1239                                                  abi_ulong target_addr)
1240 {
1241     struct target__kernel_timespec *target_ts;
1242 
1243     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1244         return -TARGET_EFAULT;
1245     }
1246     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1247     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1248     /* in 32bit mode, this drops the padding */
1249     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1250     unlock_user_struct(target_ts, target_addr, 0);
1251     return 0;
1252 }
1253 #endif
1254 
1255 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1256                                                struct timespec *host_ts)
1257 {
1258     struct target_timespec *target_ts;
1259 
1260     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1261         return -TARGET_EFAULT;
1262     }
1263     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1264     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1265     unlock_user_struct(target_ts, target_addr, 1);
1266     return 0;
1267 }
1268 
1269 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1270                                                  struct timespec *host_ts)
1271 {
1272     struct target__kernel_timespec *target_ts;
1273 
1274     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1275         return -TARGET_EFAULT;
1276     }
1277     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1278     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1279     unlock_user_struct(target_ts, target_addr, 1);
1280     return 0;
1281 }
1282 
1283 #if defined(TARGET_NR_gettimeofday)
1284 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1285                                              struct timezone *tz)
1286 {
1287     struct target_timezone *target_tz;
1288 
1289     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1290         return -TARGET_EFAULT;
1291     }
1292 
1293     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1294     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1295 
1296     unlock_user_struct(target_tz, target_tz_addr, 1);
1297 
1298     return 0;
1299 }
1300 #endif
1301 
1302 #if defined(TARGET_NR_settimeofday)
1303 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1304                                                abi_ulong target_tz_addr)
1305 {
1306     struct target_timezone *target_tz;
1307 
1308     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1309         return -TARGET_EFAULT;
1310     }
1311 
1312     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1313     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1314 
1315     unlock_user_struct(target_tz, target_tz_addr, 0);
1316 
1317     return 0;
1318 }
1319 #endif
1320 
1321 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1322 #include <mqueue.h>
1323 
1324 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1325                                               abi_ulong target_mq_attr_addr)
1326 {
1327     struct target_mq_attr *target_mq_attr;
1328 
1329     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1330                           target_mq_attr_addr, 1))
1331         return -TARGET_EFAULT;
1332 
1333     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1334     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1335     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1336     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1337 
1338     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1339 
1340     return 0;
1341 }
1342 
1343 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1344                                             const struct mq_attr *attr)
1345 {
1346     struct target_mq_attr *target_mq_attr;
1347 
1348     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1349                           target_mq_attr_addr, 0))
1350         return -TARGET_EFAULT;
1351 
1352     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1353     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1354     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1355     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1356 
1357     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1358 
1359     return 0;
1360 }
1361 #endif
1362 
1363 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1364 /* do_select() must return target values and target errnos. */
1365 static abi_long do_select(int n,
1366                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1367                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1368 {
1369     fd_set rfds, wfds, efds;
1370     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1371     struct timeval tv;
1372     struct timespec ts, *ts_ptr;
1373     abi_long ret;
1374 
1375     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1376     if (ret) {
1377         return ret;
1378     }
1379     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1380     if (ret) {
1381         return ret;
1382     }
1383     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1384     if (ret) {
1385         return ret;
1386     }
1387 
1388     if (target_tv_addr) {
1389         if (copy_from_user_timeval(&tv, target_tv_addr))
1390             return -TARGET_EFAULT;
1391         ts.tv_sec = tv.tv_sec;
1392         ts.tv_nsec = tv.tv_usec * 1000;
1393         ts_ptr = &ts;
1394     } else {
1395         ts_ptr = NULL;
1396     }
1397 
1398     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1399                                   ts_ptr, NULL));
1400 
1401     if (!is_error(ret)) {
1402         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1403             return -TARGET_EFAULT;
1404         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1405             return -TARGET_EFAULT;
1406         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1407             return -TARGET_EFAULT;
1408 
1409         if (target_tv_addr) {
1410             tv.tv_sec = ts.tv_sec;
1411             tv.tv_usec = ts.tv_nsec / 1000;
1412             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1413                 return -TARGET_EFAULT;
1414             }
1415         }
1416     }
1417 
1418     return ret;
1419 }
1420 
1421 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1422 static abi_long do_old_select(abi_ulong arg1)
1423 {
1424     struct target_sel_arg_struct *sel;
1425     abi_ulong inp, outp, exp, tvp;
1426     long nsel;
1427 
1428     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1429         return -TARGET_EFAULT;
1430     }
1431 
1432     nsel = tswapal(sel->n);
1433     inp = tswapal(sel->inp);
1434     outp = tswapal(sel->outp);
1435     exp = tswapal(sel->exp);
1436     tvp = tswapal(sel->tvp);
1437 
1438     unlock_user_struct(sel, arg1, 0);
1439 
1440     return do_select(nsel, inp, outp, exp, tvp);
1441 }
1442 #endif
1443 #endif
1444 
1445 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1446 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1447                             abi_long arg4, abi_long arg5, abi_long arg6,
1448                             bool time64)
1449 {
1450     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1451     fd_set rfds, wfds, efds;
1452     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1453     struct timespec ts, *ts_ptr;
1454     abi_long ret;
1455 
1456     /*
1457      * The 6th arg is actually two args smashed together,
1458      * so we cannot use the C library.
1459      */
1460     struct {
1461         sigset_t *set;
1462         size_t size;
1463     } sig, *sig_ptr;
1464 
1465     abi_ulong arg_sigset, arg_sigsize, *arg7;
1466 
1467     n = arg1;
1468     rfd_addr = arg2;
1469     wfd_addr = arg3;
1470     efd_addr = arg4;
1471     ts_addr = arg5;
1472 
1473     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1474     if (ret) {
1475         return ret;
1476     }
1477     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1478     if (ret) {
1479         return ret;
1480     }
1481     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1482     if (ret) {
1483         return ret;
1484     }
1485 
1486     /*
1487      * This takes a timespec, and not a timeval, so we cannot
1488      * use the do_select() helper ...
1489      */
1490     if (ts_addr) {
1491         if (time64) {
1492             if (target_to_host_timespec64(&ts, ts_addr)) {
1493                 return -TARGET_EFAULT;
1494             }
1495         } else {
1496             if (target_to_host_timespec(&ts, ts_addr)) {
1497                 return -TARGET_EFAULT;
1498             }
1499         }
1500             ts_ptr = &ts;
1501     } else {
1502         ts_ptr = NULL;
1503     }
1504 
1505     /* Extract the two packed args for the sigset */
1506     sig_ptr = NULL;
1507     if (arg6) {
1508         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1509         if (!arg7) {
1510             return -TARGET_EFAULT;
1511         }
1512         arg_sigset = tswapal(arg7[0]);
1513         arg_sigsize = tswapal(arg7[1]);
1514         unlock_user(arg7, arg6, 0);
1515 
1516         if (arg_sigset) {
1517             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1518             if (ret != 0) {
1519                 return ret;
1520             }
1521             sig_ptr = &sig;
1522             sig.size = SIGSET_T_SIZE;
1523         }
1524     }
1525 
1526     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1527                                   ts_ptr, sig_ptr));
1528 
1529     if (sig_ptr) {
1530         finish_sigsuspend_mask(ret);
1531     }
1532 
1533     if (!is_error(ret)) {
1534         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1535             return -TARGET_EFAULT;
1536         }
1537         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1538             return -TARGET_EFAULT;
1539         }
1540         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1541             return -TARGET_EFAULT;
1542         }
1543         if (time64) {
1544             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1545                 return -TARGET_EFAULT;
1546             }
1547         } else {
1548             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1549                 return -TARGET_EFAULT;
1550             }
1551         }
1552     }
1553     return ret;
1554 }
1555 #endif
1556 
1557 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1558     defined(TARGET_NR_ppoll_time64)
1559 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1560                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1561 {
1562     struct target_pollfd *target_pfd;
1563     unsigned int nfds = arg2;
1564     struct pollfd *pfd;
1565     unsigned int i;
1566     abi_long ret;
1567 
1568     pfd = NULL;
1569     target_pfd = NULL;
1570     if (nfds) {
1571         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1572             return -TARGET_EINVAL;
1573         }
1574         target_pfd = lock_user(VERIFY_WRITE, arg1,
1575                                sizeof(struct target_pollfd) * nfds, 1);
1576         if (!target_pfd) {
1577             return -TARGET_EFAULT;
1578         }
1579 
1580         pfd = alloca(sizeof(struct pollfd) * nfds);
1581         for (i = 0; i < nfds; i++) {
1582             pfd[i].fd = tswap32(target_pfd[i].fd);
1583             pfd[i].events = tswap16(target_pfd[i].events);
1584         }
1585     }
1586     if (ppoll) {
1587         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1588         sigset_t *set = NULL;
1589 
1590         if (arg3) {
1591             if (time64) {
1592                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1593                     unlock_user(target_pfd, arg1, 0);
1594                     return -TARGET_EFAULT;
1595                 }
1596             } else {
1597                 if (target_to_host_timespec(timeout_ts, arg3)) {
1598                     unlock_user(target_pfd, arg1, 0);
1599                     return -TARGET_EFAULT;
1600                 }
1601             }
1602         } else {
1603             timeout_ts = NULL;
1604         }
1605 
1606         if (arg4) {
1607             ret = process_sigsuspend_mask(&set, arg4, arg5);
1608             if (ret != 0) {
1609                 unlock_user(target_pfd, arg1, 0);
1610                 return ret;
1611             }
1612         }
1613 
1614         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1615                                    set, SIGSET_T_SIZE));
1616 
1617         if (set) {
1618             finish_sigsuspend_mask(ret);
1619         }
1620         if (!is_error(ret) && arg3) {
1621             if (time64) {
1622                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1623                     return -TARGET_EFAULT;
1624                 }
1625             } else {
1626                 if (host_to_target_timespec(arg3, timeout_ts)) {
1627                     return -TARGET_EFAULT;
1628                 }
1629             }
1630         }
1631     } else {
1632           struct timespec ts, *pts;
1633 
1634           if (arg3 >= 0) {
1635               /* Convert ms to secs, ns */
1636               ts.tv_sec = arg3 / 1000;
1637               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1638               pts = &ts;
1639           } else {
1640               /* -ve poll() timeout means "infinite" */
1641               pts = NULL;
1642           }
1643           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1644     }
1645 
1646     if (!is_error(ret)) {
1647         for (i = 0; i < nfds; i++) {
1648             target_pfd[i].revents = tswap16(pfd[i].revents);
1649         }
1650     }
1651     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1652     return ret;
1653 }
1654 #endif
1655 
1656 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1657                         int flags, int is_pipe2)
1658 {
1659     int host_pipe[2];
1660     abi_long ret;
1661     ret = pipe2(host_pipe, flags);
1662 
1663     if (is_error(ret))
1664         return get_errno(ret);
1665 
1666     /* Several targets have special calling conventions for the original
1667        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1668     if (!is_pipe2) {
1669 #if defined(TARGET_ALPHA)
1670         cpu_env->ir[IR_A4] = host_pipe[1];
1671         return host_pipe[0];
1672 #elif defined(TARGET_MIPS)
1673         cpu_env->active_tc.gpr[3] = host_pipe[1];
1674         return host_pipe[0];
1675 #elif defined(TARGET_SH4)
1676         cpu_env->gregs[1] = host_pipe[1];
1677         return host_pipe[0];
1678 #elif defined(TARGET_SPARC)
1679         cpu_env->regwptr[1] = host_pipe[1];
1680         return host_pipe[0];
1681 #endif
1682     }
1683 
1684     if (put_user_s32(host_pipe[0], pipedes)
1685         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1686         return -TARGET_EFAULT;
1687     return get_errno(ret);
1688 }
1689 
1690 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1691                                               abi_ulong target_addr,
1692                                               socklen_t len)
1693 {
1694     struct target_ip_mreqn *target_smreqn;
1695 
1696     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1697     if (!target_smreqn)
1698         return -TARGET_EFAULT;
1699     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1700     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1701     if (len == sizeof(struct target_ip_mreqn))
1702         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1703     unlock_user(target_smreqn, target_addr, 0);
1704 
1705     return 0;
1706 }
1707 
1708 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1709                                                abi_ulong target_addr,
1710                                                socklen_t len)
1711 {
1712     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1713     sa_family_t sa_family;
1714     struct target_sockaddr *target_saddr;
1715 
1716     if (fd_trans_target_to_host_addr(fd)) {
1717         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1718     }
1719 
1720     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1721     if (!target_saddr)
1722         return -TARGET_EFAULT;
1723 
1724     sa_family = tswap16(target_saddr->sa_family);
1725 
1726     /* Oops. The caller might send a incomplete sun_path; sun_path
1727      * must be terminated by \0 (see the manual page), but
1728      * unfortunately it is quite common to specify sockaddr_un
1729      * length as "strlen(x->sun_path)" while it should be
1730      * "strlen(...) + 1". We'll fix that here if needed.
1731      * Linux kernel has a similar feature.
1732      */
1733 
1734     if (sa_family == AF_UNIX) {
1735         if (len < unix_maxlen && len > 0) {
1736             char *cp = (char*)target_saddr;
1737 
1738             if ( cp[len-1] && !cp[len] )
1739                 len++;
1740         }
1741         if (len > unix_maxlen)
1742             len = unix_maxlen;
1743     }
1744 
1745     memcpy(addr, target_saddr, len);
1746     addr->sa_family = sa_family;
1747     if (sa_family == AF_NETLINK) {
1748         struct sockaddr_nl *nladdr;
1749 
1750         nladdr = (struct sockaddr_nl *)addr;
1751         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1752         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1753     } else if (sa_family == AF_PACKET) {
1754 	struct target_sockaddr_ll *lladdr;
1755 
1756 	lladdr = (struct target_sockaddr_ll *)addr;
1757 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1758 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1759     }
1760     unlock_user(target_saddr, target_addr, 0);
1761 
1762     return 0;
1763 }
1764 
1765 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1766                                                struct sockaddr *addr,
1767                                                socklen_t len)
1768 {
1769     struct target_sockaddr *target_saddr;
1770 
1771     if (len == 0) {
1772         return 0;
1773     }
1774     assert(addr);
1775 
1776     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1777     if (!target_saddr)
1778         return -TARGET_EFAULT;
1779     memcpy(target_saddr, addr, len);
1780     if (len >= offsetof(struct target_sockaddr, sa_family) +
1781         sizeof(target_saddr->sa_family)) {
1782         target_saddr->sa_family = tswap16(addr->sa_family);
1783     }
1784     if (addr->sa_family == AF_NETLINK &&
1785         len >= sizeof(struct target_sockaddr_nl)) {
1786         struct target_sockaddr_nl *target_nl =
1787                (struct target_sockaddr_nl *)target_saddr;
1788         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1789         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1790     } else if (addr->sa_family == AF_PACKET) {
1791         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1792         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1793         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1794     } else if (addr->sa_family == AF_INET6 &&
1795                len >= sizeof(struct target_sockaddr_in6)) {
1796         struct target_sockaddr_in6 *target_in6 =
1797                (struct target_sockaddr_in6 *)target_saddr;
1798         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1799     }
1800     unlock_user(target_saddr, target_addr, len);
1801 
1802     return 0;
1803 }
1804 
1805 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1806                                            struct target_msghdr *target_msgh)
1807 {
1808     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1809     abi_long msg_controllen;
1810     abi_ulong target_cmsg_addr;
1811     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1812     socklen_t space = 0;
1813 
1814     msg_controllen = tswapal(target_msgh->msg_controllen);
1815     if (msg_controllen < sizeof (struct target_cmsghdr))
1816         goto the_end;
1817     target_cmsg_addr = tswapal(target_msgh->msg_control);
1818     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1819     target_cmsg_start = target_cmsg;
1820     if (!target_cmsg)
1821         return -TARGET_EFAULT;
1822 
1823     while (cmsg && target_cmsg) {
1824         void *data = CMSG_DATA(cmsg);
1825         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1826 
1827         int len = tswapal(target_cmsg->cmsg_len)
1828             - sizeof(struct target_cmsghdr);
1829 
1830         space += CMSG_SPACE(len);
1831         if (space > msgh->msg_controllen) {
1832             space -= CMSG_SPACE(len);
1833             /* This is a QEMU bug, since we allocated the payload
1834              * area ourselves (unlike overflow in host-to-target
1835              * conversion, which is just the guest giving us a buffer
1836              * that's too small). It can't happen for the payload types
1837              * we currently support; if it becomes an issue in future
1838              * we would need to improve our allocation strategy to
1839              * something more intelligent than "twice the size of the
1840              * target buffer we're reading from".
1841              */
1842             qemu_log_mask(LOG_UNIMP,
1843                           ("Unsupported ancillary data %d/%d: "
1844                            "unhandled msg size\n"),
1845                           tswap32(target_cmsg->cmsg_level),
1846                           tswap32(target_cmsg->cmsg_type));
1847             break;
1848         }
1849 
1850         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1851             cmsg->cmsg_level = SOL_SOCKET;
1852         } else {
1853             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1854         }
1855         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1856         cmsg->cmsg_len = CMSG_LEN(len);
1857 
1858         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1859             int *fd = (int *)data;
1860             int *target_fd = (int *)target_data;
1861             int i, numfds = len / sizeof(int);
1862 
1863             for (i = 0; i < numfds; i++) {
1864                 __get_user(fd[i], target_fd + i);
1865             }
1866         } else if (cmsg->cmsg_level == SOL_SOCKET
1867                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1868             struct ucred *cred = (struct ucred *)data;
1869             struct target_ucred *target_cred =
1870                 (struct target_ucred *)target_data;
1871 
1872             __get_user(cred->pid, &target_cred->pid);
1873             __get_user(cred->uid, &target_cred->uid);
1874             __get_user(cred->gid, &target_cred->gid);
1875         } else {
1876             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1877                           cmsg->cmsg_level, cmsg->cmsg_type);
1878             memcpy(data, target_data, len);
1879         }
1880 
1881         cmsg = CMSG_NXTHDR(msgh, cmsg);
1882         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1883                                          target_cmsg_start);
1884     }
1885     unlock_user(target_cmsg, target_cmsg_addr, 0);
1886  the_end:
1887     msgh->msg_controllen = space;
1888     return 0;
1889 }
1890 
1891 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1892                                            struct msghdr *msgh)
1893 {
1894     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1895     abi_long msg_controllen;
1896     abi_ulong target_cmsg_addr;
1897     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1898     socklen_t space = 0;
1899 
1900     msg_controllen = tswapal(target_msgh->msg_controllen);
1901     if (msg_controllen < sizeof (struct target_cmsghdr))
1902         goto the_end;
1903     target_cmsg_addr = tswapal(target_msgh->msg_control);
1904     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1905     target_cmsg_start = target_cmsg;
1906     if (!target_cmsg)
1907         return -TARGET_EFAULT;
1908 
1909     while (cmsg && target_cmsg) {
1910         void *data = CMSG_DATA(cmsg);
1911         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1912 
1913         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1914         int tgt_len, tgt_space;
1915 
1916         /* We never copy a half-header but may copy half-data;
1917          * this is Linux's behaviour in put_cmsg(). Note that
1918          * truncation here is a guest problem (which we report
1919          * to the guest via the CTRUNC bit), unlike truncation
1920          * in target_to_host_cmsg, which is a QEMU bug.
1921          */
1922         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1923             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1924             break;
1925         }
1926 
1927         if (cmsg->cmsg_level == SOL_SOCKET) {
1928             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1929         } else {
1930             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1931         }
1932         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1933 
1934         /* Payload types which need a different size of payload on
1935          * the target must adjust tgt_len here.
1936          */
1937         tgt_len = len;
1938         switch (cmsg->cmsg_level) {
1939         case SOL_SOCKET:
1940             switch (cmsg->cmsg_type) {
1941             case SO_TIMESTAMP:
1942                 tgt_len = sizeof(struct target_timeval);
1943                 break;
1944             default:
1945                 break;
1946             }
1947             break;
1948         default:
1949             break;
1950         }
1951 
1952         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1953             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1954             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1955         }
1956 
1957         /* We must now copy-and-convert len bytes of payload
1958          * into tgt_len bytes of destination space. Bear in mind
1959          * that in both source and destination we may be dealing
1960          * with a truncated value!
1961          */
1962         switch (cmsg->cmsg_level) {
1963         case SOL_SOCKET:
1964             switch (cmsg->cmsg_type) {
1965             case SCM_RIGHTS:
1966             {
1967                 int *fd = (int *)data;
1968                 int *target_fd = (int *)target_data;
1969                 int i, numfds = tgt_len / sizeof(int);
1970 
1971                 for (i = 0; i < numfds; i++) {
1972                     __put_user(fd[i], target_fd + i);
1973                 }
1974                 break;
1975             }
1976             case SO_TIMESTAMP:
1977             {
1978                 struct timeval *tv = (struct timeval *)data;
1979                 struct target_timeval *target_tv =
1980                     (struct target_timeval *)target_data;
1981 
1982                 if (len != sizeof(struct timeval) ||
1983                     tgt_len != sizeof(struct target_timeval)) {
1984                     goto unimplemented;
1985                 }
1986 
1987                 /* copy struct timeval to target */
1988                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1989                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1990                 break;
1991             }
1992             case SCM_CREDENTIALS:
1993             {
1994                 struct ucred *cred = (struct ucred *)data;
1995                 struct target_ucred *target_cred =
1996                     (struct target_ucred *)target_data;
1997 
1998                 __put_user(cred->pid, &target_cred->pid);
1999                 __put_user(cred->uid, &target_cred->uid);
2000                 __put_user(cred->gid, &target_cred->gid);
2001                 break;
2002             }
2003             default:
2004                 goto unimplemented;
2005             }
2006             break;
2007 
2008         case SOL_IP:
2009             switch (cmsg->cmsg_type) {
2010             case IP_TTL:
2011             {
2012                 uint32_t *v = (uint32_t *)data;
2013                 uint32_t *t_int = (uint32_t *)target_data;
2014 
2015                 if (len != sizeof(uint32_t) ||
2016                     tgt_len != sizeof(uint32_t)) {
2017                     goto unimplemented;
2018                 }
2019                 __put_user(*v, t_int);
2020                 break;
2021             }
2022             case IP_RECVERR:
2023             {
2024                 struct errhdr_t {
2025                    struct sock_extended_err ee;
2026                    struct sockaddr_in offender;
2027                 };
2028                 struct errhdr_t *errh = (struct errhdr_t *)data;
2029                 struct errhdr_t *target_errh =
2030                     (struct errhdr_t *)target_data;
2031 
2032                 if (len != sizeof(struct errhdr_t) ||
2033                     tgt_len != sizeof(struct errhdr_t)) {
2034                     goto unimplemented;
2035                 }
2036                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2037                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2038                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2039                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2040                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2041                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2042                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2043                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2044                     (void *) &errh->offender, sizeof(errh->offender));
2045                 break;
2046             }
2047             default:
2048                 goto unimplemented;
2049             }
2050             break;
2051 
2052         case SOL_IPV6:
2053             switch (cmsg->cmsg_type) {
2054             case IPV6_HOPLIMIT:
2055             {
2056                 uint32_t *v = (uint32_t *)data;
2057                 uint32_t *t_int = (uint32_t *)target_data;
2058 
2059                 if (len != sizeof(uint32_t) ||
2060                     tgt_len != sizeof(uint32_t)) {
2061                     goto unimplemented;
2062                 }
2063                 __put_user(*v, t_int);
2064                 break;
2065             }
2066             case IPV6_RECVERR:
2067             {
2068                 struct errhdr6_t {
2069                    struct sock_extended_err ee;
2070                    struct sockaddr_in6 offender;
2071                 };
2072                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2073                 struct errhdr6_t *target_errh =
2074                     (struct errhdr6_t *)target_data;
2075 
2076                 if (len != sizeof(struct errhdr6_t) ||
2077                     tgt_len != sizeof(struct errhdr6_t)) {
2078                     goto unimplemented;
2079                 }
2080                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2081                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2082                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2083                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2084                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2085                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2086                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2087                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2088                     (void *) &errh->offender, sizeof(errh->offender));
2089                 break;
2090             }
2091             default:
2092                 goto unimplemented;
2093             }
2094             break;
2095 
2096         default:
2097         unimplemented:
2098             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2099                           cmsg->cmsg_level, cmsg->cmsg_type);
2100             memcpy(target_data, data, MIN(len, tgt_len));
2101             if (tgt_len > len) {
2102                 memset(target_data + len, 0, tgt_len - len);
2103             }
2104         }
2105 
2106         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2107         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2108         if (msg_controllen < tgt_space) {
2109             tgt_space = msg_controllen;
2110         }
2111         msg_controllen -= tgt_space;
2112         space += tgt_space;
2113         cmsg = CMSG_NXTHDR(msgh, cmsg);
2114         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2115                                          target_cmsg_start);
2116     }
2117     unlock_user(target_cmsg, target_cmsg_addr, space);
2118  the_end:
2119     target_msgh->msg_controllen = tswapal(space);
2120     return 0;
2121 }
2122 
2123 /* do_setsockopt() Must return target values and target errnos. */
2124 static abi_long do_setsockopt(int sockfd, int level, int optname,
2125                               abi_ulong optval_addr, socklen_t optlen)
2126 {
2127     abi_long ret;
2128     int val;
2129     struct ip_mreqn *ip_mreq;
2130     struct ip_mreq_source *ip_mreq_source;
2131 
2132     switch(level) {
2133     case SOL_TCP:
2134     case SOL_UDP:
2135         /* TCP and UDP options all take an 'int' value.  */
2136         if (optlen < sizeof(uint32_t))
2137             return -TARGET_EINVAL;
2138 
2139         if (get_user_u32(val, optval_addr))
2140             return -TARGET_EFAULT;
2141         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2142         break;
2143     case SOL_IP:
2144         switch(optname) {
2145         case IP_TOS:
2146         case IP_TTL:
2147         case IP_HDRINCL:
2148         case IP_ROUTER_ALERT:
2149         case IP_RECVOPTS:
2150         case IP_RETOPTS:
2151         case IP_PKTINFO:
2152         case IP_MTU_DISCOVER:
2153         case IP_RECVERR:
2154         case IP_RECVTTL:
2155         case IP_RECVTOS:
2156 #ifdef IP_FREEBIND
2157         case IP_FREEBIND:
2158 #endif
2159         case IP_MULTICAST_TTL:
2160         case IP_MULTICAST_LOOP:
2161             val = 0;
2162             if (optlen >= sizeof(uint32_t)) {
2163                 if (get_user_u32(val, optval_addr))
2164                     return -TARGET_EFAULT;
2165             } else if (optlen >= 1) {
2166                 if (get_user_u8(val, optval_addr))
2167                     return -TARGET_EFAULT;
2168             }
2169             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2170             break;
2171         case IP_ADD_MEMBERSHIP:
2172         case IP_DROP_MEMBERSHIP:
2173             if (optlen < sizeof (struct target_ip_mreq) ||
2174                 optlen > sizeof (struct target_ip_mreqn))
2175                 return -TARGET_EINVAL;
2176 
2177             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2178             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2179             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2180             break;
2181 
2182         case IP_BLOCK_SOURCE:
2183         case IP_UNBLOCK_SOURCE:
2184         case IP_ADD_SOURCE_MEMBERSHIP:
2185         case IP_DROP_SOURCE_MEMBERSHIP:
2186             if (optlen != sizeof (struct target_ip_mreq_source))
2187                 return -TARGET_EINVAL;
2188 
2189             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2190             if (!ip_mreq_source) {
2191                 return -TARGET_EFAULT;
2192             }
2193             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2194             unlock_user (ip_mreq_source, optval_addr, 0);
2195             break;
2196 
2197         default:
2198             goto unimplemented;
2199         }
2200         break;
2201     case SOL_IPV6:
2202         switch (optname) {
2203         case IPV6_MTU_DISCOVER:
2204         case IPV6_MTU:
2205         case IPV6_V6ONLY:
2206         case IPV6_RECVPKTINFO:
2207         case IPV6_UNICAST_HOPS:
2208         case IPV6_MULTICAST_HOPS:
2209         case IPV6_MULTICAST_LOOP:
2210         case IPV6_RECVERR:
2211         case IPV6_RECVHOPLIMIT:
2212         case IPV6_2292HOPLIMIT:
2213         case IPV6_CHECKSUM:
2214         case IPV6_ADDRFORM:
2215         case IPV6_2292PKTINFO:
2216         case IPV6_RECVTCLASS:
2217         case IPV6_RECVRTHDR:
2218         case IPV6_2292RTHDR:
2219         case IPV6_RECVHOPOPTS:
2220         case IPV6_2292HOPOPTS:
2221         case IPV6_RECVDSTOPTS:
2222         case IPV6_2292DSTOPTS:
2223         case IPV6_TCLASS:
2224         case IPV6_ADDR_PREFERENCES:
2225 #ifdef IPV6_RECVPATHMTU
2226         case IPV6_RECVPATHMTU:
2227 #endif
2228 #ifdef IPV6_TRANSPARENT
2229         case IPV6_TRANSPARENT:
2230 #endif
2231 #ifdef IPV6_FREEBIND
2232         case IPV6_FREEBIND:
2233 #endif
2234 #ifdef IPV6_RECVORIGDSTADDR
2235         case IPV6_RECVORIGDSTADDR:
2236 #endif
2237             val = 0;
2238             if (optlen < sizeof(uint32_t)) {
2239                 return -TARGET_EINVAL;
2240             }
2241             if (get_user_u32(val, optval_addr)) {
2242                 return -TARGET_EFAULT;
2243             }
2244             ret = get_errno(setsockopt(sockfd, level, optname,
2245                                        &val, sizeof(val)));
2246             break;
2247         case IPV6_PKTINFO:
2248         {
2249             struct in6_pktinfo pki;
2250 
2251             if (optlen < sizeof(pki)) {
2252                 return -TARGET_EINVAL;
2253             }
2254 
2255             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2256                 return -TARGET_EFAULT;
2257             }
2258 
2259             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2260 
2261             ret = get_errno(setsockopt(sockfd, level, optname,
2262                                        &pki, sizeof(pki)));
2263             break;
2264         }
2265         case IPV6_ADD_MEMBERSHIP:
2266         case IPV6_DROP_MEMBERSHIP:
2267         {
2268             struct ipv6_mreq ipv6mreq;
2269 
2270             if (optlen < sizeof(ipv6mreq)) {
2271                 return -TARGET_EINVAL;
2272             }
2273 
2274             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2275                 return -TARGET_EFAULT;
2276             }
2277 
2278             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2279 
2280             ret = get_errno(setsockopt(sockfd, level, optname,
2281                                        &ipv6mreq, sizeof(ipv6mreq)));
2282             break;
2283         }
2284         default:
2285             goto unimplemented;
2286         }
2287         break;
2288     case SOL_ICMPV6:
2289         switch (optname) {
2290         case ICMPV6_FILTER:
2291         {
2292             struct icmp6_filter icmp6f;
2293 
2294             if (optlen > sizeof(icmp6f)) {
2295                 optlen = sizeof(icmp6f);
2296             }
2297 
2298             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2299                 return -TARGET_EFAULT;
2300             }
2301 
2302             for (val = 0; val < 8; val++) {
2303                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2304             }
2305 
2306             ret = get_errno(setsockopt(sockfd, level, optname,
2307                                        &icmp6f, optlen));
2308             break;
2309         }
2310         default:
2311             goto unimplemented;
2312         }
2313         break;
2314     case SOL_RAW:
2315         switch (optname) {
2316         case ICMP_FILTER:
2317         case IPV6_CHECKSUM:
2318             /* those take an u32 value */
2319             if (optlen < sizeof(uint32_t)) {
2320                 return -TARGET_EINVAL;
2321             }
2322 
2323             if (get_user_u32(val, optval_addr)) {
2324                 return -TARGET_EFAULT;
2325             }
2326             ret = get_errno(setsockopt(sockfd, level, optname,
2327                                        &val, sizeof(val)));
2328             break;
2329 
2330         default:
2331             goto unimplemented;
2332         }
2333         break;
2334 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2335     case SOL_ALG:
2336         switch (optname) {
2337         case ALG_SET_KEY:
2338         {
2339             char *alg_key = g_malloc(optlen);
2340 
2341             if (!alg_key) {
2342                 return -TARGET_ENOMEM;
2343             }
2344             if (copy_from_user(alg_key, optval_addr, optlen)) {
2345                 g_free(alg_key);
2346                 return -TARGET_EFAULT;
2347             }
2348             ret = get_errno(setsockopt(sockfd, level, optname,
2349                                        alg_key, optlen));
2350             g_free(alg_key);
2351             break;
2352         }
2353         case ALG_SET_AEAD_AUTHSIZE:
2354         {
2355             ret = get_errno(setsockopt(sockfd, level, optname,
2356                                        NULL, optlen));
2357             break;
2358         }
2359         default:
2360             goto unimplemented;
2361         }
2362         break;
2363 #endif
2364     case TARGET_SOL_SOCKET:
2365         switch (optname) {
2366         case TARGET_SO_RCVTIMEO:
2367         {
2368                 struct timeval tv;
2369 
2370                 optname = SO_RCVTIMEO;
2371 
2372 set_timeout:
2373                 if (optlen != sizeof(struct target_timeval)) {
2374                     return -TARGET_EINVAL;
2375                 }
2376 
2377                 if (copy_from_user_timeval(&tv, optval_addr)) {
2378                     return -TARGET_EFAULT;
2379                 }
2380 
2381                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2382                                 &tv, sizeof(tv)));
2383                 return ret;
2384         }
2385         case TARGET_SO_SNDTIMEO:
2386                 optname = SO_SNDTIMEO;
2387                 goto set_timeout;
2388         case TARGET_SO_ATTACH_FILTER:
2389         {
2390                 struct target_sock_fprog *tfprog;
2391                 struct target_sock_filter *tfilter;
2392                 struct sock_fprog fprog;
2393                 struct sock_filter *filter;
2394                 int i;
2395 
2396                 if (optlen != sizeof(*tfprog)) {
2397                     return -TARGET_EINVAL;
2398                 }
2399                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2400                     return -TARGET_EFAULT;
2401                 }
2402                 if (!lock_user_struct(VERIFY_READ, tfilter,
2403                                       tswapal(tfprog->filter), 0)) {
2404                     unlock_user_struct(tfprog, optval_addr, 1);
2405                     return -TARGET_EFAULT;
2406                 }
2407 
2408                 fprog.len = tswap16(tfprog->len);
2409                 filter = g_try_new(struct sock_filter, fprog.len);
2410                 if (filter == NULL) {
2411                     unlock_user_struct(tfilter, tfprog->filter, 1);
2412                     unlock_user_struct(tfprog, optval_addr, 1);
2413                     return -TARGET_ENOMEM;
2414                 }
2415                 for (i = 0; i < fprog.len; i++) {
2416                     filter[i].code = tswap16(tfilter[i].code);
2417                     filter[i].jt = tfilter[i].jt;
2418                     filter[i].jf = tfilter[i].jf;
2419                     filter[i].k = tswap32(tfilter[i].k);
2420                 }
2421                 fprog.filter = filter;
2422 
2423                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2424                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2425                 g_free(filter);
2426 
2427                 unlock_user_struct(tfilter, tfprog->filter, 1);
2428                 unlock_user_struct(tfprog, optval_addr, 1);
2429                 return ret;
2430         }
2431 	case TARGET_SO_BINDTODEVICE:
2432 	{
2433 		char *dev_ifname, *addr_ifname;
2434 
2435 		if (optlen > IFNAMSIZ - 1) {
2436 		    optlen = IFNAMSIZ - 1;
2437 		}
2438 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2439 		if (!dev_ifname) {
2440 		    return -TARGET_EFAULT;
2441 		}
2442 		optname = SO_BINDTODEVICE;
2443 		addr_ifname = alloca(IFNAMSIZ);
2444 		memcpy(addr_ifname, dev_ifname, optlen);
2445 		addr_ifname[optlen] = 0;
2446 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2447                                            addr_ifname, optlen));
2448 		unlock_user (dev_ifname, optval_addr, 0);
2449 		return ret;
2450 	}
2451         case TARGET_SO_LINGER:
2452         {
2453                 struct linger lg;
2454                 struct target_linger *tlg;
2455 
2456                 if (optlen != sizeof(struct target_linger)) {
2457                     return -TARGET_EINVAL;
2458                 }
2459                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2460                     return -TARGET_EFAULT;
2461                 }
2462                 __get_user(lg.l_onoff, &tlg->l_onoff);
2463                 __get_user(lg.l_linger, &tlg->l_linger);
2464                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2465                                 &lg, sizeof(lg)));
2466                 unlock_user_struct(tlg, optval_addr, 0);
2467                 return ret;
2468         }
2469             /* Options with 'int' argument.  */
2470         case TARGET_SO_DEBUG:
2471 		optname = SO_DEBUG;
2472 		break;
2473         case TARGET_SO_REUSEADDR:
2474 		optname = SO_REUSEADDR;
2475 		break;
2476 #ifdef SO_REUSEPORT
2477         case TARGET_SO_REUSEPORT:
2478                 optname = SO_REUSEPORT;
2479                 break;
2480 #endif
2481         case TARGET_SO_TYPE:
2482 		optname = SO_TYPE;
2483 		break;
2484         case TARGET_SO_ERROR:
2485 		optname = SO_ERROR;
2486 		break;
2487         case TARGET_SO_DONTROUTE:
2488 		optname = SO_DONTROUTE;
2489 		break;
2490         case TARGET_SO_BROADCAST:
2491 		optname = SO_BROADCAST;
2492 		break;
2493         case TARGET_SO_SNDBUF:
2494 		optname = SO_SNDBUF;
2495 		break;
2496         case TARGET_SO_SNDBUFFORCE:
2497                 optname = SO_SNDBUFFORCE;
2498                 break;
2499         case TARGET_SO_RCVBUF:
2500 		optname = SO_RCVBUF;
2501 		break;
2502         case TARGET_SO_RCVBUFFORCE:
2503                 optname = SO_RCVBUFFORCE;
2504                 break;
2505         case TARGET_SO_KEEPALIVE:
2506 		optname = SO_KEEPALIVE;
2507 		break;
2508         case TARGET_SO_OOBINLINE:
2509 		optname = SO_OOBINLINE;
2510 		break;
2511         case TARGET_SO_NO_CHECK:
2512 		optname = SO_NO_CHECK;
2513 		break;
2514         case TARGET_SO_PRIORITY:
2515 		optname = SO_PRIORITY;
2516 		break;
2517 #ifdef SO_BSDCOMPAT
2518         case TARGET_SO_BSDCOMPAT:
2519 		optname = SO_BSDCOMPAT;
2520 		break;
2521 #endif
2522         case TARGET_SO_PASSCRED:
2523 		optname = SO_PASSCRED;
2524 		break;
2525         case TARGET_SO_PASSSEC:
2526                 optname = SO_PASSSEC;
2527                 break;
2528         case TARGET_SO_TIMESTAMP:
2529 		optname = SO_TIMESTAMP;
2530 		break;
2531         case TARGET_SO_RCVLOWAT:
2532 		optname = SO_RCVLOWAT;
2533 		break;
2534         default:
2535             goto unimplemented;
2536         }
2537 	if (optlen < sizeof(uint32_t))
2538             return -TARGET_EINVAL;
2539 
2540 	if (get_user_u32(val, optval_addr))
2541             return -TARGET_EFAULT;
2542 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2543         break;
2544 #ifdef SOL_NETLINK
2545     case SOL_NETLINK:
2546         switch (optname) {
2547         case NETLINK_PKTINFO:
2548         case NETLINK_ADD_MEMBERSHIP:
2549         case NETLINK_DROP_MEMBERSHIP:
2550         case NETLINK_BROADCAST_ERROR:
2551         case NETLINK_NO_ENOBUFS:
2552 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2553         case NETLINK_LISTEN_ALL_NSID:
2554         case NETLINK_CAP_ACK:
2555 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2556 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2557         case NETLINK_EXT_ACK:
2558 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2559 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2560         case NETLINK_GET_STRICT_CHK:
2561 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2562             break;
2563         default:
2564             goto unimplemented;
2565         }
2566         val = 0;
2567         if (optlen < sizeof(uint32_t)) {
2568             return -TARGET_EINVAL;
2569         }
2570         if (get_user_u32(val, optval_addr)) {
2571             return -TARGET_EFAULT;
2572         }
2573         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2574                                    sizeof(val)));
2575         break;
2576 #endif /* SOL_NETLINK */
2577     default:
2578     unimplemented:
2579         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2580                       level, optname);
2581         ret = -TARGET_ENOPROTOOPT;
2582     }
2583     return ret;
2584 }
2585 
2586 /* do_getsockopt() Must return target values and target errnos. */
2587 static abi_long do_getsockopt(int sockfd, int level, int optname,
2588                               abi_ulong optval_addr, abi_ulong optlen)
2589 {
2590     abi_long ret;
2591     int len, val;
2592     socklen_t lv;
2593 
2594     switch(level) {
2595     case TARGET_SOL_SOCKET:
2596         level = SOL_SOCKET;
2597         switch (optname) {
2598         /* These don't just return a single integer */
2599         case TARGET_SO_PEERNAME:
2600             goto unimplemented;
2601         case TARGET_SO_RCVTIMEO: {
2602             struct timeval tv;
2603             socklen_t tvlen;
2604 
2605             optname = SO_RCVTIMEO;
2606 
2607 get_timeout:
2608             if (get_user_u32(len, optlen)) {
2609                 return -TARGET_EFAULT;
2610             }
2611             if (len < 0) {
2612                 return -TARGET_EINVAL;
2613             }
2614 
2615             tvlen = sizeof(tv);
2616             ret = get_errno(getsockopt(sockfd, level, optname,
2617                                        &tv, &tvlen));
2618             if (ret < 0) {
2619                 return ret;
2620             }
2621             if (len > sizeof(struct target_timeval)) {
2622                 len = sizeof(struct target_timeval);
2623             }
2624             if (copy_to_user_timeval(optval_addr, &tv)) {
2625                 return -TARGET_EFAULT;
2626             }
2627             if (put_user_u32(len, optlen)) {
2628                 return -TARGET_EFAULT;
2629             }
2630             break;
2631         }
2632         case TARGET_SO_SNDTIMEO:
2633             optname = SO_SNDTIMEO;
2634             goto get_timeout;
2635         case TARGET_SO_PEERCRED: {
2636             struct ucred cr;
2637             socklen_t crlen;
2638             struct target_ucred *tcr;
2639 
2640             if (get_user_u32(len, optlen)) {
2641                 return -TARGET_EFAULT;
2642             }
2643             if (len < 0) {
2644                 return -TARGET_EINVAL;
2645             }
2646 
2647             crlen = sizeof(cr);
2648             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2649                                        &cr, &crlen));
2650             if (ret < 0) {
2651                 return ret;
2652             }
2653             if (len > crlen) {
2654                 len = crlen;
2655             }
2656             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2657                 return -TARGET_EFAULT;
2658             }
2659             __put_user(cr.pid, &tcr->pid);
2660             __put_user(cr.uid, &tcr->uid);
2661             __put_user(cr.gid, &tcr->gid);
2662             unlock_user_struct(tcr, optval_addr, 1);
2663             if (put_user_u32(len, optlen)) {
2664                 return -TARGET_EFAULT;
2665             }
2666             break;
2667         }
2668         case TARGET_SO_PEERSEC: {
2669             char *name;
2670 
2671             if (get_user_u32(len, optlen)) {
2672                 return -TARGET_EFAULT;
2673             }
2674             if (len < 0) {
2675                 return -TARGET_EINVAL;
2676             }
2677             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2678             if (!name) {
2679                 return -TARGET_EFAULT;
2680             }
2681             lv = len;
2682             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2683                                        name, &lv));
2684             if (put_user_u32(lv, optlen)) {
2685                 ret = -TARGET_EFAULT;
2686             }
2687             unlock_user(name, optval_addr, lv);
2688             break;
2689         }
2690         case TARGET_SO_LINGER:
2691         {
2692             struct linger lg;
2693             socklen_t lglen;
2694             struct target_linger *tlg;
2695 
2696             if (get_user_u32(len, optlen)) {
2697                 return -TARGET_EFAULT;
2698             }
2699             if (len < 0) {
2700                 return -TARGET_EINVAL;
2701             }
2702 
2703             lglen = sizeof(lg);
2704             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2705                                        &lg, &lglen));
2706             if (ret < 0) {
2707                 return ret;
2708             }
2709             if (len > lglen) {
2710                 len = lglen;
2711             }
2712             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2713                 return -TARGET_EFAULT;
2714             }
2715             __put_user(lg.l_onoff, &tlg->l_onoff);
2716             __put_user(lg.l_linger, &tlg->l_linger);
2717             unlock_user_struct(tlg, optval_addr, 1);
2718             if (put_user_u32(len, optlen)) {
2719                 return -TARGET_EFAULT;
2720             }
2721             break;
2722         }
2723         /* Options with 'int' argument.  */
2724         case TARGET_SO_DEBUG:
2725             optname = SO_DEBUG;
2726             goto int_case;
2727         case TARGET_SO_REUSEADDR:
2728             optname = SO_REUSEADDR;
2729             goto int_case;
2730 #ifdef SO_REUSEPORT
2731         case TARGET_SO_REUSEPORT:
2732             optname = SO_REUSEPORT;
2733             goto int_case;
2734 #endif
2735         case TARGET_SO_TYPE:
2736             optname = SO_TYPE;
2737             goto int_case;
2738         case TARGET_SO_ERROR:
2739             optname = SO_ERROR;
2740             goto int_case;
2741         case TARGET_SO_DONTROUTE:
2742             optname = SO_DONTROUTE;
2743             goto int_case;
2744         case TARGET_SO_BROADCAST:
2745             optname = SO_BROADCAST;
2746             goto int_case;
2747         case TARGET_SO_SNDBUF:
2748             optname = SO_SNDBUF;
2749             goto int_case;
2750         case TARGET_SO_RCVBUF:
2751             optname = SO_RCVBUF;
2752             goto int_case;
2753         case TARGET_SO_KEEPALIVE:
2754             optname = SO_KEEPALIVE;
2755             goto int_case;
2756         case TARGET_SO_OOBINLINE:
2757             optname = SO_OOBINLINE;
2758             goto int_case;
2759         case TARGET_SO_NO_CHECK:
2760             optname = SO_NO_CHECK;
2761             goto int_case;
2762         case TARGET_SO_PRIORITY:
2763             optname = SO_PRIORITY;
2764             goto int_case;
2765 #ifdef SO_BSDCOMPAT
2766         case TARGET_SO_BSDCOMPAT:
2767             optname = SO_BSDCOMPAT;
2768             goto int_case;
2769 #endif
2770         case TARGET_SO_PASSCRED:
2771             optname = SO_PASSCRED;
2772             goto int_case;
2773         case TARGET_SO_TIMESTAMP:
2774             optname = SO_TIMESTAMP;
2775             goto int_case;
2776         case TARGET_SO_RCVLOWAT:
2777             optname = SO_RCVLOWAT;
2778             goto int_case;
2779         case TARGET_SO_ACCEPTCONN:
2780             optname = SO_ACCEPTCONN;
2781             goto int_case;
2782         case TARGET_SO_PROTOCOL:
2783             optname = SO_PROTOCOL;
2784             goto int_case;
2785         case TARGET_SO_DOMAIN:
2786             optname = SO_DOMAIN;
2787             goto int_case;
2788         default:
2789             goto int_case;
2790         }
2791         break;
2792     case SOL_TCP:
2793     case SOL_UDP:
2794         /* TCP and UDP options all take an 'int' value.  */
2795     int_case:
2796         if (get_user_u32(len, optlen))
2797             return -TARGET_EFAULT;
2798         if (len < 0)
2799             return -TARGET_EINVAL;
2800         lv = sizeof(lv);
2801         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2802         if (ret < 0)
2803             return ret;
2804         if (optname == SO_TYPE) {
2805             val = host_to_target_sock_type(val);
2806         }
2807         if (len > lv)
2808             len = lv;
2809         if (len == 4) {
2810             if (put_user_u32(val, optval_addr))
2811                 return -TARGET_EFAULT;
2812         } else {
2813             if (put_user_u8(val, optval_addr))
2814                 return -TARGET_EFAULT;
2815         }
2816         if (put_user_u32(len, optlen))
2817             return -TARGET_EFAULT;
2818         break;
2819     case SOL_IP:
2820         switch(optname) {
2821         case IP_TOS:
2822         case IP_TTL:
2823         case IP_HDRINCL:
2824         case IP_ROUTER_ALERT:
2825         case IP_RECVOPTS:
2826         case IP_RETOPTS:
2827         case IP_PKTINFO:
2828         case IP_MTU_DISCOVER:
2829         case IP_RECVERR:
2830         case IP_RECVTOS:
2831 #ifdef IP_FREEBIND
2832         case IP_FREEBIND:
2833 #endif
2834         case IP_MULTICAST_TTL:
2835         case IP_MULTICAST_LOOP:
2836             if (get_user_u32(len, optlen))
2837                 return -TARGET_EFAULT;
2838             if (len < 0)
2839                 return -TARGET_EINVAL;
2840             lv = sizeof(lv);
2841             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2842             if (ret < 0)
2843                 return ret;
2844             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2845                 len = 1;
2846                 if (put_user_u32(len, optlen)
2847                     || put_user_u8(val, optval_addr))
2848                     return -TARGET_EFAULT;
2849             } else {
2850                 if (len > sizeof(int))
2851                     len = sizeof(int);
2852                 if (put_user_u32(len, optlen)
2853                     || put_user_u32(val, optval_addr))
2854                     return -TARGET_EFAULT;
2855             }
2856             break;
2857         default:
2858             ret = -TARGET_ENOPROTOOPT;
2859             break;
2860         }
2861         break;
2862     case SOL_IPV6:
2863         switch (optname) {
2864         case IPV6_MTU_DISCOVER:
2865         case IPV6_MTU:
2866         case IPV6_V6ONLY:
2867         case IPV6_RECVPKTINFO:
2868         case IPV6_UNICAST_HOPS:
2869         case IPV6_MULTICAST_HOPS:
2870         case IPV6_MULTICAST_LOOP:
2871         case IPV6_RECVERR:
2872         case IPV6_RECVHOPLIMIT:
2873         case IPV6_2292HOPLIMIT:
2874         case IPV6_CHECKSUM:
2875         case IPV6_ADDRFORM:
2876         case IPV6_2292PKTINFO:
2877         case IPV6_RECVTCLASS:
2878         case IPV6_RECVRTHDR:
2879         case IPV6_2292RTHDR:
2880         case IPV6_RECVHOPOPTS:
2881         case IPV6_2292HOPOPTS:
2882         case IPV6_RECVDSTOPTS:
2883         case IPV6_2292DSTOPTS:
2884         case IPV6_TCLASS:
2885         case IPV6_ADDR_PREFERENCES:
2886 #ifdef IPV6_RECVPATHMTU
2887         case IPV6_RECVPATHMTU:
2888 #endif
2889 #ifdef IPV6_TRANSPARENT
2890         case IPV6_TRANSPARENT:
2891 #endif
2892 #ifdef IPV6_FREEBIND
2893         case IPV6_FREEBIND:
2894 #endif
2895 #ifdef IPV6_RECVORIGDSTADDR
2896         case IPV6_RECVORIGDSTADDR:
2897 #endif
2898             if (get_user_u32(len, optlen))
2899                 return -TARGET_EFAULT;
2900             if (len < 0)
2901                 return -TARGET_EINVAL;
2902             lv = sizeof(lv);
2903             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2904             if (ret < 0)
2905                 return ret;
2906             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2907                 len = 1;
2908                 if (put_user_u32(len, optlen)
2909                     || put_user_u8(val, optval_addr))
2910                     return -TARGET_EFAULT;
2911             } else {
2912                 if (len > sizeof(int))
2913                     len = sizeof(int);
2914                 if (put_user_u32(len, optlen)
2915                     || put_user_u32(val, optval_addr))
2916                     return -TARGET_EFAULT;
2917             }
2918             break;
2919         default:
2920             ret = -TARGET_ENOPROTOOPT;
2921             break;
2922         }
2923         break;
2924 #ifdef SOL_NETLINK
2925     case SOL_NETLINK:
2926         switch (optname) {
2927         case NETLINK_PKTINFO:
2928         case NETLINK_BROADCAST_ERROR:
2929         case NETLINK_NO_ENOBUFS:
2930 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2931         case NETLINK_LISTEN_ALL_NSID:
2932         case NETLINK_CAP_ACK:
2933 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2934 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2935         case NETLINK_EXT_ACK:
2936 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2937 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2938         case NETLINK_GET_STRICT_CHK:
2939 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2940             if (get_user_u32(len, optlen)) {
2941                 return -TARGET_EFAULT;
2942             }
2943             if (len != sizeof(val)) {
2944                 return -TARGET_EINVAL;
2945             }
2946             lv = len;
2947             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2948             if (ret < 0) {
2949                 return ret;
2950             }
2951             if (put_user_u32(lv, optlen)
2952                 || put_user_u32(val, optval_addr)) {
2953                 return -TARGET_EFAULT;
2954             }
2955             break;
2956 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2957         case NETLINK_LIST_MEMBERSHIPS:
2958         {
2959             uint32_t *results;
2960             int i;
2961             if (get_user_u32(len, optlen)) {
2962                 return -TARGET_EFAULT;
2963             }
2964             if (len < 0) {
2965                 return -TARGET_EINVAL;
2966             }
2967             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2968             if (!results && len > 0) {
2969                 return -TARGET_EFAULT;
2970             }
2971             lv = len;
2972             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2973             if (ret < 0) {
2974                 unlock_user(results, optval_addr, 0);
2975                 return ret;
2976             }
2977             /* swap host endianess to target endianess. */
2978             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2979                 results[i] = tswap32(results[i]);
2980             }
2981             if (put_user_u32(lv, optlen)) {
2982                 return -TARGET_EFAULT;
2983             }
2984             unlock_user(results, optval_addr, 0);
2985             break;
2986         }
2987 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2988         default:
2989             goto unimplemented;
2990         }
2991         break;
2992 #endif /* SOL_NETLINK */
2993     default:
2994     unimplemented:
2995         qemu_log_mask(LOG_UNIMP,
2996                       "getsockopt level=%d optname=%d not yet supported\n",
2997                       level, optname);
2998         ret = -TARGET_EOPNOTSUPP;
2999         break;
3000     }
3001     return ret;
3002 }
3003 
3004 /* Convert target low/high pair representing file offset into the host
3005  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3006  * as the kernel doesn't handle them either.
3007  */
3008 static void target_to_host_low_high(abi_ulong tlow,
3009                                     abi_ulong thigh,
3010                                     unsigned long *hlow,
3011                                     unsigned long *hhigh)
3012 {
3013     uint64_t off = tlow |
3014         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3015         TARGET_LONG_BITS / 2;
3016 
3017     *hlow = off;
3018     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3019 }
3020 
3021 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3022                                 abi_ulong count, int copy)
3023 {
3024     struct target_iovec *target_vec;
3025     struct iovec *vec;
3026     abi_ulong total_len, max_len;
3027     int i;
3028     int err = 0;
3029     bool bad_address = false;
3030 
3031     if (count == 0) {
3032         errno = 0;
3033         return NULL;
3034     }
3035     if (count > IOV_MAX) {
3036         errno = EINVAL;
3037         return NULL;
3038     }
3039 
3040     vec = g_try_new0(struct iovec, count);
3041     if (vec == NULL) {
3042         errno = ENOMEM;
3043         return NULL;
3044     }
3045 
3046     target_vec = lock_user(VERIFY_READ, target_addr,
3047                            count * sizeof(struct target_iovec), 1);
3048     if (target_vec == NULL) {
3049         err = EFAULT;
3050         goto fail2;
3051     }
3052 
3053     /* ??? If host page size > target page size, this will result in a
3054        value larger than what we can actually support.  */
3055     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3056     total_len = 0;
3057 
3058     for (i = 0; i < count; i++) {
3059         abi_ulong base = tswapal(target_vec[i].iov_base);
3060         abi_long len = tswapal(target_vec[i].iov_len);
3061 
3062         if (len < 0) {
3063             err = EINVAL;
3064             goto fail;
3065         } else if (len == 0) {
3066             /* Zero length pointer is ignored.  */
3067             vec[i].iov_base = 0;
3068         } else {
3069             vec[i].iov_base = lock_user(type, base, len, copy);
3070             /* If the first buffer pointer is bad, this is a fault.  But
3071              * subsequent bad buffers will result in a partial write; this
3072              * is realized by filling the vector with null pointers and
3073              * zero lengths. */
3074             if (!vec[i].iov_base) {
3075                 if (i == 0) {
3076                     err = EFAULT;
3077                     goto fail;
3078                 } else {
3079                     bad_address = true;
3080                 }
3081             }
3082             if (bad_address) {
3083                 len = 0;
3084             }
3085             if (len > max_len - total_len) {
3086                 len = max_len - total_len;
3087             }
3088         }
3089         vec[i].iov_len = len;
3090         total_len += len;
3091     }
3092 
3093     unlock_user(target_vec, target_addr, 0);
3094     return vec;
3095 
3096  fail:
3097     while (--i >= 0) {
3098         if (tswapal(target_vec[i].iov_len) > 0) {
3099             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3100         }
3101     }
3102     unlock_user(target_vec, target_addr, 0);
3103  fail2:
3104     g_free(vec);
3105     errno = err;
3106     return NULL;
3107 }
3108 
3109 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3110                          abi_ulong count, int copy)
3111 {
3112     struct target_iovec *target_vec;
3113     int i;
3114 
3115     target_vec = lock_user(VERIFY_READ, target_addr,
3116                            count * sizeof(struct target_iovec), 1);
3117     if (target_vec) {
3118         for (i = 0; i < count; i++) {
3119             abi_ulong base = tswapal(target_vec[i].iov_base);
3120             abi_long len = tswapal(target_vec[i].iov_len);
3121             if (len < 0) {
3122                 break;
3123             }
3124             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3125         }
3126         unlock_user(target_vec, target_addr, 0);
3127     }
3128 
3129     g_free(vec);
3130 }
3131 
3132 static inline int target_to_host_sock_type(int *type)
3133 {
3134     int host_type = 0;
3135     int target_type = *type;
3136 
3137     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3138     case TARGET_SOCK_DGRAM:
3139         host_type = SOCK_DGRAM;
3140         break;
3141     case TARGET_SOCK_STREAM:
3142         host_type = SOCK_STREAM;
3143         break;
3144     default:
3145         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3146         break;
3147     }
3148     if (target_type & TARGET_SOCK_CLOEXEC) {
3149 #if defined(SOCK_CLOEXEC)
3150         host_type |= SOCK_CLOEXEC;
3151 #else
3152         return -TARGET_EINVAL;
3153 #endif
3154     }
3155     if (target_type & TARGET_SOCK_NONBLOCK) {
3156 #if defined(SOCK_NONBLOCK)
3157         host_type |= SOCK_NONBLOCK;
3158 #elif !defined(O_NONBLOCK)
3159         return -TARGET_EINVAL;
3160 #endif
3161     }
3162     *type = host_type;
3163     return 0;
3164 }
3165 
3166 /* Try to emulate socket type flags after socket creation.  */
3167 static int sock_flags_fixup(int fd, int target_type)
3168 {
3169 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3170     if (target_type & TARGET_SOCK_NONBLOCK) {
3171         int flags = fcntl(fd, F_GETFL);
3172         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3173             close(fd);
3174             return -TARGET_EINVAL;
3175         }
3176     }
3177 #endif
3178     return fd;
3179 }
3180 
3181 /* do_socket() Must return target values and target errnos. */
3182 static abi_long do_socket(int domain, int type, int protocol)
3183 {
3184     int target_type = type;
3185     int ret;
3186 
3187     ret = target_to_host_sock_type(&type);
3188     if (ret) {
3189         return ret;
3190     }
3191 
3192     if (domain == PF_NETLINK && !(
3193 #ifdef CONFIG_RTNETLINK
3194          protocol == NETLINK_ROUTE ||
3195 #endif
3196          protocol == NETLINK_KOBJECT_UEVENT ||
3197          protocol == NETLINK_AUDIT)) {
3198         return -TARGET_EPROTONOSUPPORT;
3199     }
3200 
3201     if (domain == AF_PACKET ||
3202         (domain == AF_INET && type == SOCK_PACKET)) {
3203         protocol = tswap16(protocol);
3204     }
3205 
3206     ret = get_errno(socket(domain, type, protocol));
3207     if (ret >= 0) {
3208         ret = sock_flags_fixup(ret, target_type);
3209         if (type == SOCK_PACKET) {
3210             /* Manage an obsolete case :
3211              * if socket type is SOCK_PACKET, bind by name
3212              */
3213             fd_trans_register(ret, &target_packet_trans);
3214         } else if (domain == PF_NETLINK) {
3215             switch (protocol) {
3216 #ifdef CONFIG_RTNETLINK
3217             case NETLINK_ROUTE:
3218                 fd_trans_register(ret, &target_netlink_route_trans);
3219                 break;
3220 #endif
3221             case NETLINK_KOBJECT_UEVENT:
3222                 /* nothing to do: messages are strings */
3223                 break;
3224             case NETLINK_AUDIT:
3225                 fd_trans_register(ret, &target_netlink_audit_trans);
3226                 break;
3227             default:
3228                 g_assert_not_reached();
3229             }
3230         }
3231     }
3232     return ret;
3233 }
3234 
3235 /* do_bind() Must return target values and target errnos. */
3236 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3237                         socklen_t addrlen)
3238 {
3239     void *addr;
3240     abi_long ret;
3241 
3242     if ((int)addrlen < 0) {
3243         return -TARGET_EINVAL;
3244     }
3245 
3246     addr = alloca(addrlen+1);
3247 
3248     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3249     if (ret)
3250         return ret;
3251 
3252     return get_errno(bind(sockfd, addr, addrlen));
3253 }
3254 
3255 /* do_connect() Must return target values and target errnos. */
3256 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3257                            socklen_t addrlen)
3258 {
3259     void *addr;
3260     abi_long ret;
3261 
3262     if ((int)addrlen < 0) {
3263         return -TARGET_EINVAL;
3264     }
3265 
3266     addr = alloca(addrlen+1);
3267 
3268     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3269     if (ret)
3270         return ret;
3271 
3272     return get_errno(safe_connect(sockfd, addr, addrlen));
3273 }
3274 
3275 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3276 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3277                                       int flags, int send)
3278 {
3279     abi_long ret, len;
3280     struct msghdr msg;
3281     abi_ulong count;
3282     struct iovec *vec;
3283     abi_ulong target_vec;
3284 
3285     if (msgp->msg_name) {
3286         msg.msg_namelen = tswap32(msgp->msg_namelen);
3287         msg.msg_name = alloca(msg.msg_namelen+1);
3288         ret = target_to_host_sockaddr(fd, msg.msg_name,
3289                                       tswapal(msgp->msg_name),
3290                                       msg.msg_namelen);
3291         if (ret == -TARGET_EFAULT) {
3292             /* For connected sockets msg_name and msg_namelen must
3293              * be ignored, so returning EFAULT immediately is wrong.
3294              * Instead, pass a bad msg_name to the host kernel, and
3295              * let it decide whether to return EFAULT or not.
3296              */
3297             msg.msg_name = (void *)-1;
3298         } else if (ret) {
3299             goto out2;
3300         }
3301     } else {
3302         msg.msg_name = NULL;
3303         msg.msg_namelen = 0;
3304     }
3305     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3306     msg.msg_control = alloca(msg.msg_controllen);
3307     memset(msg.msg_control, 0, msg.msg_controllen);
3308 
3309     msg.msg_flags = tswap32(msgp->msg_flags);
3310 
3311     count = tswapal(msgp->msg_iovlen);
3312     target_vec = tswapal(msgp->msg_iov);
3313 
3314     if (count > IOV_MAX) {
3315         /* sendrcvmsg returns a different errno for this condition than
3316          * readv/writev, so we must catch it here before lock_iovec() does.
3317          */
3318         ret = -TARGET_EMSGSIZE;
3319         goto out2;
3320     }
3321 
3322     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3323                      target_vec, count, send);
3324     if (vec == NULL) {
3325         ret = -host_to_target_errno(errno);
3326         goto out2;
3327     }
3328     msg.msg_iovlen = count;
3329     msg.msg_iov = vec;
3330 
3331     if (send) {
3332         if (fd_trans_target_to_host_data(fd)) {
3333             void *host_msg;
3334 
3335             host_msg = g_malloc(msg.msg_iov->iov_len);
3336             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3337             ret = fd_trans_target_to_host_data(fd)(host_msg,
3338                                                    msg.msg_iov->iov_len);
3339             if (ret >= 0) {
3340                 msg.msg_iov->iov_base = host_msg;
3341                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3342             }
3343             g_free(host_msg);
3344         } else {
3345             ret = target_to_host_cmsg(&msg, msgp);
3346             if (ret == 0) {
3347                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3348             }
3349         }
3350     } else {
3351         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3352         if (!is_error(ret)) {
3353             len = ret;
3354             if (fd_trans_host_to_target_data(fd)) {
3355                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3356                                                MIN(msg.msg_iov->iov_len, len));
3357             }
3358             if (!is_error(ret)) {
3359                 ret = host_to_target_cmsg(msgp, &msg);
3360             }
3361             if (!is_error(ret)) {
3362                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3363                 msgp->msg_flags = tswap32(msg.msg_flags);
3364                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3365                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3366                                     msg.msg_name, msg.msg_namelen);
3367                     if (ret) {
3368                         goto out;
3369                     }
3370                 }
3371 
3372                 ret = len;
3373             }
3374         }
3375     }
3376 
3377 out:
3378     unlock_iovec(vec, target_vec, count, !send);
3379 out2:
3380     return ret;
3381 }
3382 
3383 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3384                                int flags, int send)
3385 {
3386     abi_long ret;
3387     struct target_msghdr *msgp;
3388 
3389     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3390                           msgp,
3391                           target_msg,
3392                           send ? 1 : 0)) {
3393         return -TARGET_EFAULT;
3394     }
3395     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3396     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3397     return ret;
3398 }
3399 
3400 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3401  * so it might not have this *mmsg-specific flag either.
3402  */
3403 #ifndef MSG_WAITFORONE
3404 #define MSG_WAITFORONE 0x10000
3405 #endif
3406 
3407 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3408                                 unsigned int vlen, unsigned int flags,
3409                                 int send)
3410 {
3411     struct target_mmsghdr *mmsgp;
3412     abi_long ret = 0;
3413     int i;
3414 
3415     if (vlen > UIO_MAXIOV) {
3416         vlen = UIO_MAXIOV;
3417     }
3418 
3419     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3420     if (!mmsgp) {
3421         return -TARGET_EFAULT;
3422     }
3423 
3424     for (i = 0; i < vlen; i++) {
3425         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3426         if (is_error(ret)) {
3427             break;
3428         }
3429         mmsgp[i].msg_len = tswap32(ret);
3430         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3431         if (flags & MSG_WAITFORONE) {
3432             flags |= MSG_DONTWAIT;
3433         }
3434     }
3435 
3436     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3437 
3438     /* Return number of datagrams sent if we sent any at all;
3439      * otherwise return the error.
3440      */
3441     if (i) {
3442         return i;
3443     }
3444     return ret;
3445 }
3446 
3447 /* do_accept4() Must return target values and target errnos. */
3448 static abi_long do_accept4(int fd, abi_ulong target_addr,
3449                            abi_ulong target_addrlen_addr, int flags)
3450 {
3451     socklen_t addrlen, ret_addrlen;
3452     void *addr;
3453     abi_long ret;
3454     int host_flags;
3455 
3456     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3457 
3458     if (target_addr == 0) {
3459         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3460     }
3461 
3462     /* linux returns EFAULT if addrlen pointer is invalid */
3463     if (get_user_u32(addrlen, target_addrlen_addr))
3464         return -TARGET_EFAULT;
3465 
3466     if ((int)addrlen < 0) {
3467         return -TARGET_EINVAL;
3468     }
3469 
3470     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3471         return -TARGET_EFAULT;
3472     }
3473 
3474     addr = alloca(addrlen);
3475 
3476     ret_addrlen = addrlen;
3477     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3478     if (!is_error(ret)) {
3479         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3480         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3481             ret = -TARGET_EFAULT;
3482         }
3483     }
3484     return ret;
3485 }
3486 
3487 /* do_getpeername() Must return target values and target errnos. */
3488 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3489                                abi_ulong target_addrlen_addr)
3490 {
3491     socklen_t addrlen, ret_addrlen;
3492     void *addr;
3493     abi_long ret;
3494 
3495     if (get_user_u32(addrlen, target_addrlen_addr))
3496         return -TARGET_EFAULT;
3497 
3498     if ((int)addrlen < 0) {
3499         return -TARGET_EINVAL;
3500     }
3501 
3502     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3503         return -TARGET_EFAULT;
3504     }
3505 
3506     addr = alloca(addrlen);
3507 
3508     ret_addrlen = addrlen;
3509     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3510     if (!is_error(ret)) {
3511         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3512         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3513             ret = -TARGET_EFAULT;
3514         }
3515     }
3516     return ret;
3517 }
3518 
3519 /* do_getsockname() Must return target values and target errnos. */
3520 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3521                                abi_ulong target_addrlen_addr)
3522 {
3523     socklen_t addrlen, ret_addrlen;
3524     void *addr;
3525     abi_long ret;
3526 
3527     if (get_user_u32(addrlen, target_addrlen_addr))
3528         return -TARGET_EFAULT;
3529 
3530     if ((int)addrlen < 0) {
3531         return -TARGET_EINVAL;
3532     }
3533 
3534     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3535         return -TARGET_EFAULT;
3536     }
3537 
3538     addr = alloca(addrlen);
3539 
3540     ret_addrlen = addrlen;
3541     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3542     if (!is_error(ret)) {
3543         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3544         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3545             ret = -TARGET_EFAULT;
3546         }
3547     }
3548     return ret;
3549 }
3550 
3551 /* do_socketpair() Must return target values and target errnos. */
3552 static abi_long do_socketpair(int domain, int type, int protocol,
3553                               abi_ulong target_tab_addr)
3554 {
3555     int tab[2];
3556     abi_long ret;
3557 
3558     target_to_host_sock_type(&type);
3559 
3560     ret = get_errno(socketpair(domain, type, protocol, tab));
3561     if (!is_error(ret)) {
3562         if (put_user_s32(tab[0], target_tab_addr)
3563             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3564             ret = -TARGET_EFAULT;
3565     }
3566     return ret;
3567 }
3568 
3569 /* do_sendto() Must return target values and target errnos. */
3570 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3571                           abi_ulong target_addr, socklen_t addrlen)
3572 {
3573     void *addr;
3574     void *host_msg;
3575     void *copy_msg = NULL;
3576     abi_long ret;
3577 
3578     if ((int)addrlen < 0) {
3579         return -TARGET_EINVAL;
3580     }
3581 
3582     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3583     if (!host_msg)
3584         return -TARGET_EFAULT;
3585     if (fd_trans_target_to_host_data(fd)) {
3586         copy_msg = host_msg;
3587         host_msg = g_malloc(len);
3588         memcpy(host_msg, copy_msg, len);
3589         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3590         if (ret < 0) {
3591             goto fail;
3592         }
3593     }
3594     if (target_addr) {
3595         addr = alloca(addrlen+1);
3596         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3597         if (ret) {
3598             goto fail;
3599         }
3600         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3601     } else {
3602         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3603     }
3604 fail:
3605     if (copy_msg) {
3606         g_free(host_msg);
3607         host_msg = copy_msg;
3608     }
3609     unlock_user(host_msg, msg, 0);
3610     return ret;
3611 }
3612 
3613 /* do_recvfrom() Must return target values and target errnos. */
3614 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3615                             abi_ulong target_addr,
3616                             abi_ulong target_addrlen)
3617 {
3618     socklen_t addrlen, ret_addrlen;
3619     void *addr;
3620     void *host_msg;
3621     abi_long ret;
3622 
3623     if (!msg) {
3624         host_msg = NULL;
3625     } else {
3626         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3627         if (!host_msg) {
3628             return -TARGET_EFAULT;
3629         }
3630     }
3631     if (target_addr) {
3632         if (get_user_u32(addrlen, target_addrlen)) {
3633             ret = -TARGET_EFAULT;
3634             goto fail;
3635         }
3636         if ((int)addrlen < 0) {
3637             ret = -TARGET_EINVAL;
3638             goto fail;
3639         }
3640         addr = alloca(addrlen);
3641         ret_addrlen = addrlen;
3642         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3643                                       addr, &ret_addrlen));
3644     } else {
3645         addr = NULL; /* To keep compiler quiet.  */
3646         addrlen = 0; /* To keep compiler quiet.  */
3647         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3648     }
3649     if (!is_error(ret)) {
3650         if (fd_trans_host_to_target_data(fd)) {
3651             abi_long trans;
3652             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3653             if (is_error(trans)) {
3654                 ret = trans;
3655                 goto fail;
3656             }
3657         }
3658         if (target_addr) {
3659             host_to_target_sockaddr(target_addr, addr,
3660                                     MIN(addrlen, ret_addrlen));
3661             if (put_user_u32(ret_addrlen, target_addrlen)) {
3662                 ret = -TARGET_EFAULT;
3663                 goto fail;
3664             }
3665         }
3666         unlock_user(host_msg, msg, len);
3667     } else {
3668 fail:
3669         unlock_user(host_msg, msg, 0);
3670     }
3671     return ret;
3672 }
3673 
3674 #ifdef TARGET_NR_socketcall
3675 /* do_socketcall() must return target values and target errnos. */
3676 static abi_long do_socketcall(int num, abi_ulong vptr)
3677 {
3678     static const unsigned nargs[] = { /* number of arguments per operation */
3679         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3680         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3681         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3682         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3683         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3684         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3685         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3686         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3687         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3688         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3689         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3690         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3691         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3692         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3693         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3694         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3695         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3696         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3697         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3698         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3699     };
3700     abi_long a[6]; /* max 6 args */
3701     unsigned i;
3702 
3703     /* check the range of the first argument num */
3704     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3705     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3706         return -TARGET_EINVAL;
3707     }
3708     /* ensure we have space for args */
3709     if (nargs[num] > ARRAY_SIZE(a)) {
3710         return -TARGET_EINVAL;
3711     }
3712     /* collect the arguments in a[] according to nargs[] */
3713     for (i = 0; i < nargs[num]; ++i) {
3714         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3715             return -TARGET_EFAULT;
3716         }
3717     }
3718     /* now when we have the args, invoke the appropriate underlying function */
3719     switch (num) {
3720     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3721         return do_socket(a[0], a[1], a[2]);
3722     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3723         return do_bind(a[0], a[1], a[2]);
3724     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3725         return do_connect(a[0], a[1], a[2]);
3726     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3727         return get_errno(listen(a[0], a[1]));
3728     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3729         return do_accept4(a[0], a[1], a[2], 0);
3730     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3731         return do_getsockname(a[0], a[1], a[2]);
3732     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3733         return do_getpeername(a[0], a[1], a[2]);
3734     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3735         return do_socketpair(a[0], a[1], a[2], a[3]);
3736     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3737         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3738     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3739         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3740     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3741         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3742     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3743         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3744     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3745         return get_errno(shutdown(a[0], a[1]));
3746     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3747         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3748     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3749         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3750     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3751         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3752     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3753         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3754     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3755         return do_accept4(a[0], a[1], a[2], a[3]);
3756     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3757         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3758     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3759         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3760     default:
3761         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3762         return -TARGET_EINVAL;
3763     }
3764 }
3765 #endif
3766 
3767 #define N_SHM_REGIONS	32
3768 
3769 static struct shm_region {
3770     abi_ulong start;
3771     abi_ulong size;
3772     bool in_use;
3773 } shm_regions[N_SHM_REGIONS];
3774 
3775 #ifndef TARGET_SEMID64_DS
3776 /* asm-generic version of this struct */
3777 struct target_semid64_ds
3778 {
3779   struct target_ipc_perm sem_perm;
3780   abi_ulong sem_otime;
3781 #if TARGET_ABI_BITS == 32
3782   abi_ulong __unused1;
3783 #endif
3784   abi_ulong sem_ctime;
3785 #if TARGET_ABI_BITS == 32
3786   abi_ulong __unused2;
3787 #endif
3788   abi_ulong sem_nsems;
3789   abi_ulong __unused3;
3790   abi_ulong __unused4;
3791 };
3792 #endif
3793 
3794 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3795                                                abi_ulong target_addr)
3796 {
3797     struct target_ipc_perm *target_ip;
3798     struct target_semid64_ds *target_sd;
3799 
3800     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3801         return -TARGET_EFAULT;
3802     target_ip = &(target_sd->sem_perm);
3803     host_ip->__key = tswap32(target_ip->__key);
3804     host_ip->uid = tswap32(target_ip->uid);
3805     host_ip->gid = tswap32(target_ip->gid);
3806     host_ip->cuid = tswap32(target_ip->cuid);
3807     host_ip->cgid = tswap32(target_ip->cgid);
3808 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3809     host_ip->mode = tswap32(target_ip->mode);
3810 #else
3811     host_ip->mode = tswap16(target_ip->mode);
3812 #endif
3813 #if defined(TARGET_PPC)
3814     host_ip->__seq = tswap32(target_ip->__seq);
3815 #else
3816     host_ip->__seq = tswap16(target_ip->__seq);
3817 #endif
3818     unlock_user_struct(target_sd, target_addr, 0);
3819     return 0;
3820 }
3821 
3822 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3823                                                struct ipc_perm *host_ip)
3824 {
3825     struct target_ipc_perm *target_ip;
3826     struct target_semid64_ds *target_sd;
3827 
3828     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3829         return -TARGET_EFAULT;
3830     target_ip = &(target_sd->sem_perm);
3831     target_ip->__key = tswap32(host_ip->__key);
3832     target_ip->uid = tswap32(host_ip->uid);
3833     target_ip->gid = tswap32(host_ip->gid);
3834     target_ip->cuid = tswap32(host_ip->cuid);
3835     target_ip->cgid = tswap32(host_ip->cgid);
3836 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3837     target_ip->mode = tswap32(host_ip->mode);
3838 #else
3839     target_ip->mode = tswap16(host_ip->mode);
3840 #endif
3841 #if defined(TARGET_PPC)
3842     target_ip->__seq = tswap32(host_ip->__seq);
3843 #else
3844     target_ip->__seq = tswap16(host_ip->__seq);
3845 #endif
3846     unlock_user_struct(target_sd, target_addr, 1);
3847     return 0;
3848 }
3849 
3850 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3851                                                abi_ulong target_addr)
3852 {
3853     struct target_semid64_ds *target_sd;
3854 
3855     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3856         return -TARGET_EFAULT;
3857     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3858         return -TARGET_EFAULT;
3859     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3860     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3861     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3862     unlock_user_struct(target_sd, target_addr, 0);
3863     return 0;
3864 }
3865 
3866 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3867                                                struct semid_ds *host_sd)
3868 {
3869     struct target_semid64_ds *target_sd;
3870 
3871     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3872         return -TARGET_EFAULT;
3873     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3874         return -TARGET_EFAULT;
3875     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3876     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3877     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3878     unlock_user_struct(target_sd, target_addr, 1);
3879     return 0;
3880 }
3881 
3882 struct target_seminfo {
3883     int semmap;
3884     int semmni;
3885     int semmns;
3886     int semmnu;
3887     int semmsl;
3888     int semopm;
3889     int semume;
3890     int semusz;
3891     int semvmx;
3892     int semaem;
3893 };
3894 
3895 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3896                                               struct seminfo *host_seminfo)
3897 {
3898     struct target_seminfo *target_seminfo;
3899     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3900         return -TARGET_EFAULT;
3901     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3902     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3903     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3904     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3905     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3906     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3907     __put_user(host_seminfo->semume, &target_seminfo->semume);
3908     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3909     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3910     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3911     unlock_user_struct(target_seminfo, target_addr, 1);
3912     return 0;
3913 }
3914 
3915 union semun {
3916 	int val;
3917 	struct semid_ds *buf;
3918 	unsigned short *array;
3919 	struct seminfo *__buf;
3920 };
3921 
3922 union target_semun {
3923 	int val;
3924 	abi_ulong buf;
3925 	abi_ulong array;
3926 	abi_ulong __buf;
3927 };
3928 
3929 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3930                                                abi_ulong target_addr)
3931 {
3932     int nsems;
3933     unsigned short *array;
3934     union semun semun;
3935     struct semid_ds semid_ds;
3936     int i, ret;
3937 
3938     semun.buf = &semid_ds;
3939 
3940     ret = semctl(semid, 0, IPC_STAT, semun);
3941     if (ret == -1)
3942         return get_errno(ret);
3943 
3944     nsems = semid_ds.sem_nsems;
3945 
3946     *host_array = g_try_new(unsigned short, nsems);
3947     if (!*host_array) {
3948         return -TARGET_ENOMEM;
3949     }
3950     array = lock_user(VERIFY_READ, target_addr,
3951                       nsems*sizeof(unsigned short), 1);
3952     if (!array) {
3953         g_free(*host_array);
3954         return -TARGET_EFAULT;
3955     }
3956 
3957     for(i=0; i<nsems; i++) {
3958         __get_user((*host_array)[i], &array[i]);
3959     }
3960     unlock_user(array, target_addr, 0);
3961 
3962     return 0;
3963 }
3964 
3965 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3966                                                unsigned short **host_array)
3967 {
3968     int nsems;
3969     unsigned short *array;
3970     union semun semun;
3971     struct semid_ds semid_ds;
3972     int i, ret;
3973 
3974     semun.buf = &semid_ds;
3975 
3976     ret = semctl(semid, 0, IPC_STAT, semun);
3977     if (ret == -1)
3978         return get_errno(ret);
3979 
3980     nsems = semid_ds.sem_nsems;
3981 
3982     array = lock_user(VERIFY_WRITE, target_addr,
3983                       nsems*sizeof(unsigned short), 0);
3984     if (!array)
3985         return -TARGET_EFAULT;
3986 
3987     for(i=0; i<nsems; i++) {
3988         __put_user((*host_array)[i], &array[i]);
3989     }
3990     g_free(*host_array);
3991     unlock_user(array, target_addr, 1);
3992 
3993     return 0;
3994 }
3995 
3996 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3997                                  abi_ulong target_arg)
3998 {
3999     union target_semun target_su = { .buf = target_arg };
4000     union semun arg;
4001     struct semid_ds dsarg;
4002     unsigned short *array = NULL;
4003     struct seminfo seminfo;
4004     abi_long ret = -TARGET_EINVAL;
4005     abi_long err;
4006     cmd &= 0xff;
4007 
4008     switch( cmd ) {
4009 	case GETVAL:
4010 	case SETVAL:
4011             /* In 64 bit cross-endian situations, we will erroneously pick up
4012              * the wrong half of the union for the "val" element.  To rectify
4013              * this, the entire 8-byte structure is byteswapped, followed by
4014 	     * a swap of the 4 byte val field. In other cases, the data is
4015 	     * already in proper host byte order. */
4016 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4017 		target_su.buf = tswapal(target_su.buf);
4018 		arg.val = tswap32(target_su.val);
4019 	    } else {
4020 		arg.val = target_su.val;
4021 	    }
4022             ret = get_errno(semctl(semid, semnum, cmd, arg));
4023             break;
4024 	case GETALL:
4025 	case SETALL:
4026             err = target_to_host_semarray(semid, &array, target_su.array);
4027             if (err)
4028                 return err;
4029             arg.array = array;
4030             ret = get_errno(semctl(semid, semnum, cmd, arg));
4031             err = host_to_target_semarray(semid, target_su.array, &array);
4032             if (err)
4033                 return err;
4034             break;
4035 	case IPC_STAT:
4036 	case IPC_SET:
4037 	case SEM_STAT:
4038             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4039             if (err)
4040                 return err;
4041             arg.buf = &dsarg;
4042             ret = get_errno(semctl(semid, semnum, cmd, arg));
4043             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4044             if (err)
4045                 return err;
4046             break;
4047 	case IPC_INFO:
4048 	case SEM_INFO:
4049             arg.__buf = &seminfo;
4050             ret = get_errno(semctl(semid, semnum, cmd, arg));
4051             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4052             if (err)
4053                 return err;
4054             break;
4055 	case IPC_RMID:
4056 	case GETPID:
4057 	case GETNCNT:
4058 	case GETZCNT:
4059             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4060             break;
4061     }
4062 
4063     return ret;
4064 }
4065 
4066 struct target_sembuf {
4067     unsigned short sem_num;
4068     short sem_op;
4069     short sem_flg;
4070 };
4071 
4072 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4073                                              abi_ulong target_addr,
4074                                              unsigned nsops)
4075 {
4076     struct target_sembuf *target_sembuf;
4077     int i;
4078 
4079     target_sembuf = lock_user(VERIFY_READ, target_addr,
4080                               nsops*sizeof(struct target_sembuf), 1);
4081     if (!target_sembuf)
4082         return -TARGET_EFAULT;
4083 
4084     for(i=0; i<nsops; i++) {
4085         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4086         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4087         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4088     }
4089 
4090     unlock_user(target_sembuf, target_addr, 0);
4091 
4092     return 0;
4093 }
4094 
4095 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4096     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4097 
4098 /*
4099  * This macro is required to handle the s390 variants, which passes the
4100  * arguments in a different order than default.
4101  */
4102 #ifdef __s390x__
4103 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4104   (__nsops), (__timeout), (__sops)
4105 #else
4106 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4107   (__nsops), 0, (__sops), (__timeout)
4108 #endif
4109 
4110 static inline abi_long do_semtimedop(int semid,
4111                                      abi_long ptr,
4112                                      unsigned nsops,
4113                                      abi_long timeout, bool time64)
4114 {
4115     struct sembuf *sops;
4116     struct timespec ts, *pts = NULL;
4117     abi_long ret;
4118 
4119     if (timeout) {
4120         pts = &ts;
4121         if (time64) {
4122             if (target_to_host_timespec64(pts, timeout)) {
4123                 return -TARGET_EFAULT;
4124             }
4125         } else {
4126             if (target_to_host_timespec(pts, timeout)) {
4127                 return -TARGET_EFAULT;
4128             }
4129         }
4130     }
4131 
4132     if (nsops > TARGET_SEMOPM) {
4133         return -TARGET_E2BIG;
4134     }
4135 
4136     sops = g_new(struct sembuf, nsops);
4137 
4138     if (target_to_host_sembuf(sops, ptr, nsops)) {
4139         g_free(sops);
4140         return -TARGET_EFAULT;
4141     }
4142 
4143     ret = -TARGET_ENOSYS;
4144 #ifdef __NR_semtimedop
4145     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4146 #endif
4147 #ifdef __NR_ipc
4148     if (ret == -TARGET_ENOSYS) {
4149         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4150                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4151     }
4152 #endif
4153     g_free(sops);
4154     return ret;
4155 }
4156 #endif
4157 
4158 struct target_msqid_ds
4159 {
4160     struct target_ipc_perm msg_perm;
4161     abi_ulong msg_stime;
4162 #if TARGET_ABI_BITS == 32
4163     abi_ulong __unused1;
4164 #endif
4165     abi_ulong msg_rtime;
4166 #if TARGET_ABI_BITS == 32
4167     abi_ulong __unused2;
4168 #endif
4169     abi_ulong msg_ctime;
4170 #if TARGET_ABI_BITS == 32
4171     abi_ulong __unused3;
4172 #endif
4173     abi_ulong __msg_cbytes;
4174     abi_ulong msg_qnum;
4175     abi_ulong msg_qbytes;
4176     abi_ulong msg_lspid;
4177     abi_ulong msg_lrpid;
4178     abi_ulong __unused4;
4179     abi_ulong __unused5;
4180 };
4181 
4182 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4183                                                abi_ulong target_addr)
4184 {
4185     struct target_msqid_ds *target_md;
4186 
4187     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4188         return -TARGET_EFAULT;
4189     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4190         return -TARGET_EFAULT;
4191     host_md->msg_stime = tswapal(target_md->msg_stime);
4192     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4193     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4194     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4195     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4196     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4197     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4198     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4199     unlock_user_struct(target_md, target_addr, 0);
4200     return 0;
4201 }
4202 
4203 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4204                                                struct msqid_ds *host_md)
4205 {
4206     struct target_msqid_ds *target_md;
4207 
4208     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4209         return -TARGET_EFAULT;
4210     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4211         return -TARGET_EFAULT;
4212     target_md->msg_stime = tswapal(host_md->msg_stime);
4213     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4214     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4215     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4216     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4217     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4218     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4219     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4220     unlock_user_struct(target_md, target_addr, 1);
4221     return 0;
4222 }
4223 
4224 struct target_msginfo {
4225     int msgpool;
4226     int msgmap;
4227     int msgmax;
4228     int msgmnb;
4229     int msgmni;
4230     int msgssz;
4231     int msgtql;
4232     unsigned short int msgseg;
4233 };
4234 
4235 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4236                                               struct msginfo *host_msginfo)
4237 {
4238     struct target_msginfo *target_msginfo;
4239     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4240         return -TARGET_EFAULT;
4241     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4242     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4243     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4244     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4245     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4246     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4247     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4248     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4249     unlock_user_struct(target_msginfo, target_addr, 1);
4250     return 0;
4251 }
4252 
4253 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4254 {
4255     struct msqid_ds dsarg;
4256     struct msginfo msginfo;
4257     abi_long ret = -TARGET_EINVAL;
4258 
4259     cmd &= 0xff;
4260 
4261     switch (cmd) {
4262     case IPC_STAT:
4263     case IPC_SET:
4264     case MSG_STAT:
4265         if (target_to_host_msqid_ds(&dsarg,ptr))
4266             return -TARGET_EFAULT;
4267         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4268         if (host_to_target_msqid_ds(ptr,&dsarg))
4269             return -TARGET_EFAULT;
4270         break;
4271     case IPC_RMID:
4272         ret = get_errno(msgctl(msgid, cmd, NULL));
4273         break;
4274     case IPC_INFO:
4275     case MSG_INFO:
4276         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4277         if (host_to_target_msginfo(ptr, &msginfo))
4278             return -TARGET_EFAULT;
4279         break;
4280     }
4281 
4282     return ret;
4283 }
4284 
4285 struct target_msgbuf {
4286     abi_long mtype;
4287     char	mtext[1];
4288 };
4289 
4290 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4291                                  ssize_t msgsz, int msgflg)
4292 {
4293     struct target_msgbuf *target_mb;
4294     struct msgbuf *host_mb;
4295     abi_long ret = 0;
4296 
4297     if (msgsz < 0) {
4298         return -TARGET_EINVAL;
4299     }
4300 
4301     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4302         return -TARGET_EFAULT;
4303     host_mb = g_try_malloc(msgsz + sizeof(long));
4304     if (!host_mb) {
4305         unlock_user_struct(target_mb, msgp, 0);
4306         return -TARGET_ENOMEM;
4307     }
4308     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4309     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4310     ret = -TARGET_ENOSYS;
4311 #ifdef __NR_msgsnd
4312     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4313 #endif
4314 #ifdef __NR_ipc
4315     if (ret == -TARGET_ENOSYS) {
4316 #ifdef __s390x__
4317         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4318                                  host_mb));
4319 #else
4320         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4321                                  host_mb, 0));
4322 #endif
4323     }
4324 #endif
4325     g_free(host_mb);
4326     unlock_user_struct(target_mb, msgp, 0);
4327 
4328     return ret;
4329 }
4330 
4331 #ifdef __NR_ipc
4332 #if defined(__sparc__)
4333 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4334 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4335 #elif defined(__s390x__)
4336 /* The s390 sys_ipc variant has only five parameters.  */
4337 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4338     ((long int[]){(long int)__msgp, __msgtyp})
4339 #else
4340 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4341     ((long int[]){(long int)__msgp, __msgtyp}), 0
4342 #endif
4343 #endif
4344 
4345 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4346                                  ssize_t msgsz, abi_long msgtyp,
4347                                  int msgflg)
4348 {
4349     struct target_msgbuf *target_mb;
4350     char *target_mtext;
4351     struct msgbuf *host_mb;
4352     abi_long ret = 0;
4353 
4354     if (msgsz < 0) {
4355         return -TARGET_EINVAL;
4356     }
4357 
4358     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4359         return -TARGET_EFAULT;
4360 
4361     host_mb = g_try_malloc(msgsz + sizeof(long));
4362     if (!host_mb) {
4363         ret = -TARGET_ENOMEM;
4364         goto end;
4365     }
4366     ret = -TARGET_ENOSYS;
4367 #ifdef __NR_msgrcv
4368     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4369 #endif
4370 #ifdef __NR_ipc
4371     if (ret == -TARGET_ENOSYS) {
4372         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4373                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4374     }
4375 #endif
4376 
4377     if (ret > 0) {
4378         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4379         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4380         if (!target_mtext) {
4381             ret = -TARGET_EFAULT;
4382             goto end;
4383         }
4384         memcpy(target_mb->mtext, host_mb->mtext, ret);
4385         unlock_user(target_mtext, target_mtext_addr, ret);
4386     }
4387 
4388     target_mb->mtype = tswapal(host_mb->mtype);
4389 
4390 end:
4391     if (target_mb)
4392         unlock_user_struct(target_mb, msgp, 1);
4393     g_free(host_mb);
4394     return ret;
4395 }
4396 
4397 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4398                                                abi_ulong target_addr)
4399 {
4400     struct target_shmid_ds *target_sd;
4401 
4402     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4403         return -TARGET_EFAULT;
4404     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4405         return -TARGET_EFAULT;
4406     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4407     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4408     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4409     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4410     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4411     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4412     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4413     unlock_user_struct(target_sd, target_addr, 0);
4414     return 0;
4415 }
4416 
4417 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4418                                                struct shmid_ds *host_sd)
4419 {
4420     struct target_shmid_ds *target_sd;
4421 
4422     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4423         return -TARGET_EFAULT;
4424     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4425         return -TARGET_EFAULT;
4426     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4427     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4428     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4429     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4430     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4431     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4432     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4433     unlock_user_struct(target_sd, target_addr, 1);
4434     return 0;
4435 }
4436 
4437 struct  target_shminfo {
4438     abi_ulong shmmax;
4439     abi_ulong shmmin;
4440     abi_ulong shmmni;
4441     abi_ulong shmseg;
4442     abi_ulong shmall;
4443 };
4444 
4445 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4446                                               struct shminfo *host_shminfo)
4447 {
4448     struct target_shminfo *target_shminfo;
4449     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4450         return -TARGET_EFAULT;
4451     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4452     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4453     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4454     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4455     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4456     unlock_user_struct(target_shminfo, target_addr, 1);
4457     return 0;
4458 }
4459 
4460 struct target_shm_info {
4461     int used_ids;
4462     abi_ulong shm_tot;
4463     abi_ulong shm_rss;
4464     abi_ulong shm_swp;
4465     abi_ulong swap_attempts;
4466     abi_ulong swap_successes;
4467 };
4468 
4469 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4470                                                struct shm_info *host_shm_info)
4471 {
4472     struct target_shm_info *target_shm_info;
4473     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4474         return -TARGET_EFAULT;
4475     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4476     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4477     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4478     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4479     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4480     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4481     unlock_user_struct(target_shm_info, target_addr, 1);
4482     return 0;
4483 }
4484 
4485 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4486 {
4487     struct shmid_ds dsarg;
4488     struct shminfo shminfo;
4489     struct shm_info shm_info;
4490     abi_long ret = -TARGET_EINVAL;
4491 
4492     cmd &= 0xff;
4493 
4494     switch(cmd) {
4495     case IPC_STAT:
4496     case IPC_SET:
4497     case SHM_STAT:
4498         if (target_to_host_shmid_ds(&dsarg, buf))
4499             return -TARGET_EFAULT;
4500         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4501         if (host_to_target_shmid_ds(buf, &dsarg))
4502             return -TARGET_EFAULT;
4503         break;
4504     case IPC_INFO:
4505         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4506         if (host_to_target_shminfo(buf, &shminfo))
4507             return -TARGET_EFAULT;
4508         break;
4509     case SHM_INFO:
4510         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4511         if (host_to_target_shm_info(buf, &shm_info))
4512             return -TARGET_EFAULT;
4513         break;
4514     case IPC_RMID:
4515     case SHM_LOCK:
4516     case SHM_UNLOCK:
4517         ret = get_errno(shmctl(shmid, cmd, NULL));
4518         break;
4519     }
4520 
4521     return ret;
4522 }
4523 
4524 #ifndef TARGET_FORCE_SHMLBA
4525 /* For most architectures, SHMLBA is the same as the page size;
4526  * some architectures have larger values, in which case they should
4527  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4528  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4529  * and defining its own value for SHMLBA.
4530  *
4531  * The kernel also permits SHMLBA to be set by the architecture to a
4532  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4533  * this means that addresses are rounded to the large size if
4534  * SHM_RND is set but addresses not aligned to that size are not rejected
4535  * as long as they are at least page-aligned. Since the only architecture
4536  * which uses this is ia64 this code doesn't provide for that oddity.
4537  */
4538 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4539 {
4540     return TARGET_PAGE_SIZE;
4541 }
4542 #endif
4543 
4544 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4545                                  int shmid, abi_ulong shmaddr, int shmflg)
4546 {
4547     CPUState *cpu = env_cpu(cpu_env);
4548     abi_long raddr;
4549     void *host_raddr;
4550     struct shmid_ds shm_info;
4551     int i,ret;
4552     abi_ulong shmlba;
4553 
4554     /* shmat pointers are always untagged */
4555 
4556     /* find out the length of the shared memory segment */
4557     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4558     if (is_error(ret)) {
4559         /* can't get length, bail out */
4560         return ret;
4561     }
4562 
4563     shmlba = target_shmlba(cpu_env);
4564 
4565     if (shmaddr & (shmlba - 1)) {
4566         if (shmflg & SHM_RND) {
4567             shmaddr &= ~(shmlba - 1);
4568         } else {
4569             return -TARGET_EINVAL;
4570         }
4571     }
4572     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4573         return -TARGET_EINVAL;
4574     }
4575 
4576     mmap_lock();
4577 
4578     /*
4579      * We're mapping shared memory, so ensure we generate code for parallel
4580      * execution and flush old translations.  This will work up to the level
4581      * supported by the host -- anything that requires EXCP_ATOMIC will not
4582      * be atomic with respect to an external process.
4583      */
4584     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4585         cpu->tcg_cflags |= CF_PARALLEL;
4586         tb_flush(cpu);
4587     }
4588 
4589     if (shmaddr)
4590         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4591     else {
4592         abi_ulong mmap_start;
4593 
4594         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4595         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4596 
4597         if (mmap_start == -1) {
4598             errno = ENOMEM;
4599             host_raddr = (void *)-1;
4600         } else
4601             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4602                                shmflg | SHM_REMAP);
4603     }
4604 
4605     if (host_raddr == (void *)-1) {
4606         mmap_unlock();
4607         return get_errno((long)host_raddr);
4608     }
4609     raddr=h2g((unsigned long)host_raddr);
4610 
4611     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4612                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4613                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4614 
4615     for (i = 0; i < N_SHM_REGIONS; i++) {
4616         if (!shm_regions[i].in_use) {
4617             shm_regions[i].in_use = true;
4618             shm_regions[i].start = raddr;
4619             shm_regions[i].size = shm_info.shm_segsz;
4620             break;
4621         }
4622     }
4623 
4624     mmap_unlock();
4625     return raddr;
4626 
4627 }
4628 
4629 static inline abi_long do_shmdt(abi_ulong shmaddr)
4630 {
4631     int i;
4632     abi_long rv;
4633 
4634     /* shmdt pointers are always untagged */
4635 
4636     mmap_lock();
4637 
4638     for (i = 0; i < N_SHM_REGIONS; ++i) {
4639         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4640             shm_regions[i].in_use = false;
4641             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4642             break;
4643         }
4644     }
4645     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4646 
4647     mmap_unlock();
4648 
4649     return rv;
4650 }
4651 
4652 #ifdef TARGET_NR_ipc
4653 /* ??? This only works with linear mappings.  */
4654 /* do_ipc() must return target values and target errnos. */
4655 static abi_long do_ipc(CPUArchState *cpu_env,
4656                        unsigned int call, abi_long first,
4657                        abi_long second, abi_long third,
4658                        abi_long ptr, abi_long fifth)
4659 {
4660     int version;
4661     abi_long ret = 0;
4662 
4663     version = call >> 16;
4664     call &= 0xffff;
4665 
4666     switch (call) {
4667     case IPCOP_semop:
4668         ret = do_semtimedop(first, ptr, second, 0, false);
4669         break;
4670     case IPCOP_semtimedop:
4671     /*
4672      * The s390 sys_ipc variant has only five parameters instead of six
4673      * (as for default variant) and the only difference is the handling of
4674      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4675      * to a struct timespec where the generic variant uses fifth parameter.
4676      */
4677 #if defined(TARGET_S390X)
4678         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4679 #else
4680         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4681 #endif
4682         break;
4683 
4684     case IPCOP_semget:
4685         ret = get_errno(semget(first, second, third));
4686         break;
4687 
4688     case IPCOP_semctl: {
4689         /* The semun argument to semctl is passed by value, so dereference the
4690          * ptr argument. */
4691         abi_ulong atptr;
4692         get_user_ual(atptr, ptr);
4693         ret = do_semctl(first, second, third, atptr);
4694         break;
4695     }
4696 
4697     case IPCOP_msgget:
4698         ret = get_errno(msgget(first, second));
4699         break;
4700 
4701     case IPCOP_msgsnd:
4702         ret = do_msgsnd(first, ptr, second, third);
4703         break;
4704 
4705     case IPCOP_msgctl:
4706         ret = do_msgctl(first, second, ptr);
4707         break;
4708 
4709     case IPCOP_msgrcv:
4710         switch (version) {
4711         case 0:
4712             {
4713                 struct target_ipc_kludge {
4714                     abi_long msgp;
4715                     abi_long msgtyp;
4716                 } *tmp;
4717 
4718                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4719                     ret = -TARGET_EFAULT;
4720                     break;
4721                 }
4722 
4723                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4724 
4725                 unlock_user_struct(tmp, ptr, 0);
4726                 break;
4727             }
4728         default:
4729             ret = do_msgrcv(first, ptr, second, fifth, third);
4730         }
4731         break;
4732 
4733     case IPCOP_shmat:
4734         switch (version) {
4735         default:
4736         {
4737             abi_ulong raddr;
4738             raddr = do_shmat(cpu_env, first, ptr, second);
4739             if (is_error(raddr))
4740                 return get_errno(raddr);
4741             if (put_user_ual(raddr, third))
4742                 return -TARGET_EFAULT;
4743             break;
4744         }
4745         case 1:
4746             ret = -TARGET_EINVAL;
4747             break;
4748         }
4749 	break;
4750     case IPCOP_shmdt:
4751         ret = do_shmdt(ptr);
4752 	break;
4753 
4754     case IPCOP_shmget:
4755 	/* IPC_* flag values are the same on all linux platforms */
4756 	ret = get_errno(shmget(first, second, third));
4757 	break;
4758 
4759 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4760     case IPCOP_shmctl:
4761         ret = do_shmctl(first, second, ptr);
4762         break;
4763     default:
4764         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4765                       call, version);
4766 	ret = -TARGET_ENOSYS;
4767 	break;
4768     }
4769     return ret;
4770 }
4771 #endif
4772 
4773 /* kernel structure types definitions */
4774 
4775 #define STRUCT(name, ...) STRUCT_ ## name,
4776 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4777 enum {
4778 #include "syscall_types.h"
4779 STRUCT_MAX
4780 };
4781 #undef STRUCT
4782 #undef STRUCT_SPECIAL
4783 
4784 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4785 #define STRUCT_SPECIAL(name)
4786 #include "syscall_types.h"
4787 #undef STRUCT
4788 #undef STRUCT_SPECIAL
4789 
4790 #define MAX_STRUCT_SIZE 4096
4791 
4792 #ifdef CONFIG_FIEMAP
4793 /* So fiemap access checks don't overflow on 32 bit systems.
4794  * This is very slightly smaller than the limit imposed by
4795  * the underlying kernel.
4796  */
4797 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4798                             / sizeof(struct fiemap_extent))
4799 
4800 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4801                                        int fd, int cmd, abi_long arg)
4802 {
4803     /* The parameter for this ioctl is a struct fiemap followed
4804      * by an array of struct fiemap_extent whose size is set
4805      * in fiemap->fm_extent_count. The array is filled in by the
4806      * ioctl.
4807      */
4808     int target_size_in, target_size_out;
4809     struct fiemap *fm;
4810     const argtype *arg_type = ie->arg_type;
4811     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4812     void *argptr, *p;
4813     abi_long ret;
4814     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4815     uint32_t outbufsz;
4816     int free_fm = 0;
4817 
4818     assert(arg_type[0] == TYPE_PTR);
4819     assert(ie->access == IOC_RW);
4820     arg_type++;
4821     target_size_in = thunk_type_size(arg_type, 0);
4822     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4823     if (!argptr) {
4824         return -TARGET_EFAULT;
4825     }
4826     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4827     unlock_user(argptr, arg, 0);
4828     fm = (struct fiemap *)buf_temp;
4829     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4830         return -TARGET_EINVAL;
4831     }
4832 
4833     outbufsz = sizeof (*fm) +
4834         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4835 
4836     if (outbufsz > MAX_STRUCT_SIZE) {
4837         /* We can't fit all the extents into the fixed size buffer.
4838          * Allocate one that is large enough and use it instead.
4839          */
4840         fm = g_try_malloc(outbufsz);
4841         if (!fm) {
4842             return -TARGET_ENOMEM;
4843         }
4844         memcpy(fm, buf_temp, sizeof(struct fiemap));
4845         free_fm = 1;
4846     }
4847     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4848     if (!is_error(ret)) {
4849         target_size_out = target_size_in;
4850         /* An extent_count of 0 means we were only counting the extents
4851          * so there are no structs to copy
4852          */
4853         if (fm->fm_extent_count != 0) {
4854             target_size_out += fm->fm_mapped_extents * extent_size;
4855         }
4856         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4857         if (!argptr) {
4858             ret = -TARGET_EFAULT;
4859         } else {
4860             /* Convert the struct fiemap */
4861             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4862             if (fm->fm_extent_count != 0) {
4863                 p = argptr + target_size_in;
4864                 /* ...and then all the struct fiemap_extents */
4865                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4866                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4867                                   THUNK_TARGET);
4868                     p += extent_size;
4869                 }
4870             }
4871             unlock_user(argptr, arg, target_size_out);
4872         }
4873     }
4874     if (free_fm) {
4875         g_free(fm);
4876     }
4877     return ret;
4878 }
4879 #endif
4880 
4881 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4882                                 int fd, int cmd, abi_long arg)
4883 {
4884     const argtype *arg_type = ie->arg_type;
4885     int target_size;
4886     void *argptr;
4887     int ret;
4888     struct ifconf *host_ifconf;
4889     uint32_t outbufsz;
4890     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4891     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4892     int target_ifreq_size;
4893     int nb_ifreq;
4894     int free_buf = 0;
4895     int i;
4896     int target_ifc_len;
4897     abi_long target_ifc_buf;
4898     int host_ifc_len;
4899     char *host_ifc_buf;
4900 
4901     assert(arg_type[0] == TYPE_PTR);
4902     assert(ie->access == IOC_RW);
4903 
4904     arg_type++;
4905     target_size = thunk_type_size(arg_type, 0);
4906 
4907     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4908     if (!argptr)
4909         return -TARGET_EFAULT;
4910     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4911     unlock_user(argptr, arg, 0);
4912 
4913     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4914     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4915     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4916 
4917     if (target_ifc_buf != 0) {
4918         target_ifc_len = host_ifconf->ifc_len;
4919         nb_ifreq = target_ifc_len / target_ifreq_size;
4920         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4921 
4922         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4923         if (outbufsz > MAX_STRUCT_SIZE) {
4924             /*
4925              * We can't fit all the extents into the fixed size buffer.
4926              * Allocate one that is large enough and use it instead.
4927              */
4928             host_ifconf = g_try_malloc(outbufsz);
4929             if (!host_ifconf) {
4930                 return -TARGET_ENOMEM;
4931             }
4932             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4933             free_buf = 1;
4934         }
4935         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4936 
4937         host_ifconf->ifc_len = host_ifc_len;
4938     } else {
4939       host_ifc_buf = NULL;
4940     }
4941     host_ifconf->ifc_buf = host_ifc_buf;
4942 
4943     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4944     if (!is_error(ret)) {
4945 	/* convert host ifc_len to target ifc_len */
4946 
4947         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4948         target_ifc_len = nb_ifreq * target_ifreq_size;
4949         host_ifconf->ifc_len = target_ifc_len;
4950 
4951 	/* restore target ifc_buf */
4952 
4953         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4954 
4955 	/* copy struct ifconf to target user */
4956 
4957         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4958         if (!argptr)
4959             return -TARGET_EFAULT;
4960         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4961         unlock_user(argptr, arg, target_size);
4962 
4963         if (target_ifc_buf != 0) {
4964             /* copy ifreq[] to target user */
4965             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4966             for (i = 0; i < nb_ifreq ; i++) {
4967                 thunk_convert(argptr + i * target_ifreq_size,
4968                               host_ifc_buf + i * sizeof(struct ifreq),
4969                               ifreq_arg_type, THUNK_TARGET);
4970             }
4971             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4972         }
4973     }
4974 
4975     if (free_buf) {
4976         g_free(host_ifconf);
4977     }
4978 
4979     return ret;
4980 }
4981 
4982 #if defined(CONFIG_USBFS)
4983 #if HOST_LONG_BITS > 64
4984 #error USBDEVFS thunks do not support >64 bit hosts yet.
4985 #endif
4986 struct live_urb {
4987     uint64_t target_urb_adr;
4988     uint64_t target_buf_adr;
4989     char *target_buf_ptr;
4990     struct usbdevfs_urb host_urb;
4991 };
4992 
4993 static GHashTable *usbdevfs_urb_hashtable(void)
4994 {
4995     static GHashTable *urb_hashtable;
4996 
4997     if (!urb_hashtable) {
4998         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4999     }
5000     return urb_hashtable;
5001 }
5002 
5003 static void urb_hashtable_insert(struct live_urb *urb)
5004 {
5005     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5006     g_hash_table_insert(urb_hashtable, urb, urb);
5007 }
5008 
5009 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5010 {
5011     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5012     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5013 }
5014 
5015 static void urb_hashtable_remove(struct live_urb *urb)
5016 {
5017     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5018     g_hash_table_remove(urb_hashtable, urb);
5019 }
5020 
5021 static abi_long
5022 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5023                           int fd, int cmd, abi_long arg)
5024 {
5025     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5026     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5027     struct live_urb *lurb;
5028     void *argptr;
5029     uint64_t hurb;
5030     int target_size;
5031     uintptr_t target_urb_adr;
5032     abi_long ret;
5033 
5034     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5035 
5036     memset(buf_temp, 0, sizeof(uint64_t));
5037     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5038     if (is_error(ret)) {
5039         return ret;
5040     }
5041 
5042     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5043     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5044     if (!lurb->target_urb_adr) {
5045         return -TARGET_EFAULT;
5046     }
5047     urb_hashtable_remove(lurb);
5048     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5049         lurb->host_urb.buffer_length);
5050     lurb->target_buf_ptr = NULL;
5051 
5052     /* restore the guest buffer pointer */
5053     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5054 
5055     /* update the guest urb struct */
5056     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5057     if (!argptr) {
5058         g_free(lurb);
5059         return -TARGET_EFAULT;
5060     }
5061     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5062     unlock_user(argptr, lurb->target_urb_adr, target_size);
5063 
5064     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5065     /* write back the urb handle */
5066     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5067     if (!argptr) {
5068         g_free(lurb);
5069         return -TARGET_EFAULT;
5070     }
5071 
5072     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5073     target_urb_adr = lurb->target_urb_adr;
5074     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5075     unlock_user(argptr, arg, target_size);
5076 
5077     g_free(lurb);
5078     return ret;
5079 }
5080 
5081 static abi_long
5082 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5083                              uint8_t *buf_temp __attribute__((unused)),
5084                              int fd, int cmd, abi_long arg)
5085 {
5086     struct live_urb *lurb;
5087 
5088     /* map target address back to host URB with metadata. */
5089     lurb = urb_hashtable_lookup(arg);
5090     if (!lurb) {
5091         return -TARGET_EFAULT;
5092     }
5093     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5094 }
5095 
5096 static abi_long
5097 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5098                             int fd, int cmd, abi_long arg)
5099 {
5100     const argtype *arg_type = ie->arg_type;
5101     int target_size;
5102     abi_long ret;
5103     void *argptr;
5104     int rw_dir;
5105     struct live_urb *lurb;
5106 
5107     /*
5108      * each submitted URB needs to map to a unique ID for the
5109      * kernel, and that unique ID needs to be a pointer to
5110      * host memory.  hence, we need to malloc for each URB.
5111      * isochronous transfers have a variable length struct.
5112      */
5113     arg_type++;
5114     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5115 
5116     /* construct host copy of urb and metadata */
5117     lurb = g_try_new0(struct live_urb, 1);
5118     if (!lurb) {
5119         return -TARGET_ENOMEM;
5120     }
5121 
5122     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5123     if (!argptr) {
5124         g_free(lurb);
5125         return -TARGET_EFAULT;
5126     }
5127     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5128     unlock_user(argptr, arg, 0);
5129 
5130     lurb->target_urb_adr = arg;
5131     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5132 
5133     /* buffer space used depends on endpoint type so lock the entire buffer */
5134     /* control type urbs should check the buffer contents for true direction */
5135     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5136     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5137         lurb->host_urb.buffer_length, 1);
5138     if (lurb->target_buf_ptr == NULL) {
5139         g_free(lurb);
5140         return -TARGET_EFAULT;
5141     }
5142 
5143     /* update buffer pointer in host copy */
5144     lurb->host_urb.buffer = lurb->target_buf_ptr;
5145 
5146     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5147     if (is_error(ret)) {
5148         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5149         g_free(lurb);
5150     } else {
5151         urb_hashtable_insert(lurb);
5152     }
5153 
5154     return ret;
5155 }
5156 #endif /* CONFIG_USBFS */
5157 
5158 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5159                             int cmd, abi_long arg)
5160 {
5161     void *argptr;
5162     struct dm_ioctl *host_dm;
5163     abi_long guest_data;
5164     uint32_t guest_data_size;
5165     int target_size;
5166     const argtype *arg_type = ie->arg_type;
5167     abi_long ret;
5168     void *big_buf = NULL;
5169     char *host_data;
5170 
5171     arg_type++;
5172     target_size = thunk_type_size(arg_type, 0);
5173     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5174     if (!argptr) {
5175         ret = -TARGET_EFAULT;
5176         goto out;
5177     }
5178     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5179     unlock_user(argptr, arg, 0);
5180 
5181     /* buf_temp is too small, so fetch things into a bigger buffer */
5182     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5183     memcpy(big_buf, buf_temp, target_size);
5184     buf_temp = big_buf;
5185     host_dm = big_buf;
5186 
5187     guest_data = arg + host_dm->data_start;
5188     if ((guest_data - arg) < 0) {
5189         ret = -TARGET_EINVAL;
5190         goto out;
5191     }
5192     guest_data_size = host_dm->data_size - host_dm->data_start;
5193     host_data = (char*)host_dm + host_dm->data_start;
5194 
5195     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5196     if (!argptr) {
5197         ret = -TARGET_EFAULT;
5198         goto out;
5199     }
5200 
5201     switch (ie->host_cmd) {
5202     case DM_REMOVE_ALL:
5203     case DM_LIST_DEVICES:
5204     case DM_DEV_CREATE:
5205     case DM_DEV_REMOVE:
5206     case DM_DEV_SUSPEND:
5207     case DM_DEV_STATUS:
5208     case DM_DEV_WAIT:
5209     case DM_TABLE_STATUS:
5210     case DM_TABLE_CLEAR:
5211     case DM_TABLE_DEPS:
5212     case DM_LIST_VERSIONS:
5213         /* no input data */
5214         break;
5215     case DM_DEV_RENAME:
5216     case DM_DEV_SET_GEOMETRY:
5217         /* data contains only strings */
5218         memcpy(host_data, argptr, guest_data_size);
5219         break;
5220     case DM_TARGET_MSG:
5221         memcpy(host_data, argptr, guest_data_size);
5222         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5223         break;
5224     case DM_TABLE_LOAD:
5225     {
5226         void *gspec = argptr;
5227         void *cur_data = host_data;
5228         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5229         int spec_size = thunk_type_size(arg_type, 0);
5230         int i;
5231 
5232         for (i = 0; i < host_dm->target_count; i++) {
5233             struct dm_target_spec *spec = cur_data;
5234             uint32_t next;
5235             int slen;
5236 
5237             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5238             slen = strlen((char*)gspec + spec_size) + 1;
5239             next = spec->next;
5240             spec->next = sizeof(*spec) + slen;
5241             strcpy((char*)&spec[1], gspec + spec_size);
5242             gspec += next;
5243             cur_data += spec->next;
5244         }
5245         break;
5246     }
5247     default:
5248         ret = -TARGET_EINVAL;
5249         unlock_user(argptr, guest_data, 0);
5250         goto out;
5251     }
5252     unlock_user(argptr, guest_data, 0);
5253 
5254     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5255     if (!is_error(ret)) {
5256         guest_data = arg + host_dm->data_start;
5257         guest_data_size = host_dm->data_size - host_dm->data_start;
5258         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5259         switch (ie->host_cmd) {
5260         case DM_REMOVE_ALL:
5261         case DM_DEV_CREATE:
5262         case DM_DEV_REMOVE:
5263         case DM_DEV_RENAME:
5264         case DM_DEV_SUSPEND:
5265         case DM_DEV_STATUS:
5266         case DM_TABLE_LOAD:
5267         case DM_TABLE_CLEAR:
5268         case DM_TARGET_MSG:
5269         case DM_DEV_SET_GEOMETRY:
5270             /* no return data */
5271             break;
5272         case DM_LIST_DEVICES:
5273         {
5274             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5275             uint32_t remaining_data = guest_data_size;
5276             void *cur_data = argptr;
5277             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5278             int nl_size = 12; /* can't use thunk_size due to alignment */
5279 
5280             while (1) {
5281                 uint32_t next = nl->next;
5282                 if (next) {
5283                     nl->next = nl_size + (strlen(nl->name) + 1);
5284                 }
5285                 if (remaining_data < nl->next) {
5286                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5287                     break;
5288                 }
5289                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5290                 strcpy(cur_data + nl_size, nl->name);
5291                 cur_data += nl->next;
5292                 remaining_data -= nl->next;
5293                 if (!next) {
5294                     break;
5295                 }
5296                 nl = (void*)nl + next;
5297             }
5298             break;
5299         }
5300         case DM_DEV_WAIT:
5301         case DM_TABLE_STATUS:
5302         {
5303             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5304             void *cur_data = argptr;
5305             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5306             int spec_size = thunk_type_size(arg_type, 0);
5307             int i;
5308 
5309             for (i = 0; i < host_dm->target_count; i++) {
5310                 uint32_t next = spec->next;
5311                 int slen = strlen((char*)&spec[1]) + 1;
5312                 spec->next = (cur_data - argptr) + spec_size + slen;
5313                 if (guest_data_size < spec->next) {
5314                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5315                     break;
5316                 }
5317                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5318                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5319                 cur_data = argptr + spec->next;
5320                 spec = (void*)host_dm + host_dm->data_start + next;
5321             }
5322             break;
5323         }
5324         case DM_TABLE_DEPS:
5325         {
5326             void *hdata = (void*)host_dm + host_dm->data_start;
5327             int count = *(uint32_t*)hdata;
5328             uint64_t *hdev = hdata + 8;
5329             uint64_t *gdev = argptr + 8;
5330             int i;
5331 
5332             *(uint32_t*)argptr = tswap32(count);
5333             for (i = 0; i < count; i++) {
5334                 *gdev = tswap64(*hdev);
5335                 gdev++;
5336                 hdev++;
5337             }
5338             break;
5339         }
5340         case DM_LIST_VERSIONS:
5341         {
5342             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5343             uint32_t remaining_data = guest_data_size;
5344             void *cur_data = argptr;
5345             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5346             int vers_size = thunk_type_size(arg_type, 0);
5347 
5348             while (1) {
5349                 uint32_t next = vers->next;
5350                 if (next) {
5351                     vers->next = vers_size + (strlen(vers->name) + 1);
5352                 }
5353                 if (remaining_data < vers->next) {
5354                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5355                     break;
5356                 }
5357                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5358                 strcpy(cur_data + vers_size, vers->name);
5359                 cur_data += vers->next;
5360                 remaining_data -= vers->next;
5361                 if (!next) {
5362                     break;
5363                 }
5364                 vers = (void*)vers + next;
5365             }
5366             break;
5367         }
5368         default:
5369             unlock_user(argptr, guest_data, 0);
5370             ret = -TARGET_EINVAL;
5371             goto out;
5372         }
5373         unlock_user(argptr, guest_data, guest_data_size);
5374 
5375         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5376         if (!argptr) {
5377             ret = -TARGET_EFAULT;
5378             goto out;
5379         }
5380         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5381         unlock_user(argptr, arg, target_size);
5382     }
5383 out:
5384     g_free(big_buf);
5385     return ret;
5386 }
5387 
5388 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5389                                int cmd, abi_long arg)
5390 {
5391     void *argptr;
5392     int target_size;
5393     const argtype *arg_type = ie->arg_type;
5394     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5395     abi_long ret;
5396 
5397     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5398     struct blkpg_partition host_part;
5399 
5400     /* Read and convert blkpg */
5401     arg_type++;
5402     target_size = thunk_type_size(arg_type, 0);
5403     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5404     if (!argptr) {
5405         ret = -TARGET_EFAULT;
5406         goto out;
5407     }
5408     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5409     unlock_user(argptr, arg, 0);
5410 
5411     switch (host_blkpg->op) {
5412     case BLKPG_ADD_PARTITION:
5413     case BLKPG_DEL_PARTITION:
5414         /* payload is struct blkpg_partition */
5415         break;
5416     default:
5417         /* Unknown opcode */
5418         ret = -TARGET_EINVAL;
5419         goto out;
5420     }
5421 
5422     /* Read and convert blkpg->data */
5423     arg = (abi_long)(uintptr_t)host_blkpg->data;
5424     target_size = thunk_type_size(part_arg_type, 0);
5425     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5426     if (!argptr) {
5427         ret = -TARGET_EFAULT;
5428         goto out;
5429     }
5430     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5431     unlock_user(argptr, arg, 0);
5432 
5433     /* Swizzle the data pointer to our local copy and call! */
5434     host_blkpg->data = &host_part;
5435     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5436 
5437 out:
5438     return ret;
5439 }
5440 
5441 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5442                                 int fd, int cmd, abi_long arg)
5443 {
5444     const argtype *arg_type = ie->arg_type;
5445     const StructEntry *se;
5446     const argtype *field_types;
5447     const int *dst_offsets, *src_offsets;
5448     int target_size;
5449     void *argptr;
5450     abi_ulong *target_rt_dev_ptr = NULL;
5451     unsigned long *host_rt_dev_ptr = NULL;
5452     abi_long ret;
5453     int i;
5454 
5455     assert(ie->access == IOC_W);
5456     assert(*arg_type == TYPE_PTR);
5457     arg_type++;
5458     assert(*arg_type == TYPE_STRUCT);
5459     target_size = thunk_type_size(arg_type, 0);
5460     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5461     if (!argptr) {
5462         return -TARGET_EFAULT;
5463     }
5464     arg_type++;
5465     assert(*arg_type == (int)STRUCT_rtentry);
5466     se = struct_entries + *arg_type++;
5467     assert(se->convert[0] == NULL);
5468     /* convert struct here to be able to catch rt_dev string */
5469     field_types = se->field_types;
5470     dst_offsets = se->field_offsets[THUNK_HOST];
5471     src_offsets = se->field_offsets[THUNK_TARGET];
5472     for (i = 0; i < se->nb_fields; i++) {
5473         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5474             assert(*field_types == TYPE_PTRVOID);
5475             target_rt_dev_ptr = argptr + src_offsets[i];
5476             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5477             if (*target_rt_dev_ptr != 0) {
5478                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5479                                                   tswapal(*target_rt_dev_ptr));
5480                 if (!*host_rt_dev_ptr) {
5481                     unlock_user(argptr, arg, 0);
5482                     return -TARGET_EFAULT;
5483                 }
5484             } else {
5485                 *host_rt_dev_ptr = 0;
5486             }
5487             field_types++;
5488             continue;
5489         }
5490         field_types = thunk_convert(buf_temp + dst_offsets[i],
5491                                     argptr + src_offsets[i],
5492                                     field_types, THUNK_HOST);
5493     }
5494     unlock_user(argptr, arg, 0);
5495 
5496     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5497 
5498     assert(host_rt_dev_ptr != NULL);
5499     assert(target_rt_dev_ptr != NULL);
5500     if (*host_rt_dev_ptr != 0) {
5501         unlock_user((void *)*host_rt_dev_ptr,
5502                     *target_rt_dev_ptr, 0);
5503     }
5504     return ret;
5505 }
5506 
5507 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5508                                      int fd, int cmd, abi_long arg)
5509 {
5510     int sig = target_to_host_signal(arg);
5511     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5512 }
5513 
5514 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5515                                     int fd, int cmd, abi_long arg)
5516 {
5517     struct timeval tv;
5518     abi_long ret;
5519 
5520     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5521     if (is_error(ret)) {
5522         return ret;
5523     }
5524 
5525     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5526         if (copy_to_user_timeval(arg, &tv)) {
5527             return -TARGET_EFAULT;
5528         }
5529     } else {
5530         if (copy_to_user_timeval64(arg, &tv)) {
5531             return -TARGET_EFAULT;
5532         }
5533     }
5534 
5535     return ret;
5536 }
5537 
5538 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5539                                       int fd, int cmd, abi_long arg)
5540 {
5541     struct timespec ts;
5542     abi_long ret;
5543 
5544     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5545     if (is_error(ret)) {
5546         return ret;
5547     }
5548 
5549     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5550         if (host_to_target_timespec(arg, &ts)) {
5551             return -TARGET_EFAULT;
5552         }
5553     } else{
5554         if (host_to_target_timespec64(arg, &ts)) {
5555             return -TARGET_EFAULT;
5556         }
5557     }
5558 
5559     return ret;
5560 }
5561 
5562 #ifdef TIOCGPTPEER
5563 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5564                                      int fd, int cmd, abi_long arg)
5565 {
5566     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5567     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5568 }
5569 #endif
5570 
5571 #ifdef HAVE_DRM_H
5572 
5573 static void unlock_drm_version(struct drm_version *host_ver,
5574                                struct target_drm_version *target_ver,
5575                                bool copy)
5576 {
5577     unlock_user(host_ver->name, target_ver->name,
5578                                 copy ? host_ver->name_len : 0);
5579     unlock_user(host_ver->date, target_ver->date,
5580                                 copy ? host_ver->date_len : 0);
5581     unlock_user(host_ver->desc, target_ver->desc,
5582                                 copy ? host_ver->desc_len : 0);
5583 }
5584 
5585 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5586                                           struct target_drm_version *target_ver)
5587 {
5588     memset(host_ver, 0, sizeof(*host_ver));
5589 
5590     __get_user(host_ver->name_len, &target_ver->name_len);
5591     if (host_ver->name_len) {
5592         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5593                                    target_ver->name_len, 0);
5594         if (!host_ver->name) {
5595             return -EFAULT;
5596         }
5597     }
5598 
5599     __get_user(host_ver->date_len, &target_ver->date_len);
5600     if (host_ver->date_len) {
5601         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5602                                    target_ver->date_len, 0);
5603         if (!host_ver->date) {
5604             goto err;
5605         }
5606     }
5607 
5608     __get_user(host_ver->desc_len, &target_ver->desc_len);
5609     if (host_ver->desc_len) {
5610         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5611                                    target_ver->desc_len, 0);
5612         if (!host_ver->desc) {
5613             goto err;
5614         }
5615     }
5616 
5617     return 0;
5618 err:
5619     unlock_drm_version(host_ver, target_ver, false);
5620     return -EFAULT;
5621 }
5622 
5623 static inline void host_to_target_drmversion(
5624                                           struct target_drm_version *target_ver,
5625                                           struct drm_version *host_ver)
5626 {
5627     __put_user(host_ver->version_major, &target_ver->version_major);
5628     __put_user(host_ver->version_minor, &target_ver->version_minor);
5629     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5630     __put_user(host_ver->name_len, &target_ver->name_len);
5631     __put_user(host_ver->date_len, &target_ver->date_len);
5632     __put_user(host_ver->desc_len, &target_ver->desc_len);
5633     unlock_drm_version(host_ver, target_ver, true);
5634 }
5635 
5636 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5637                              int fd, int cmd, abi_long arg)
5638 {
5639     struct drm_version *ver;
5640     struct target_drm_version *target_ver;
5641     abi_long ret;
5642 
5643     switch (ie->host_cmd) {
5644     case DRM_IOCTL_VERSION:
5645         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5646             return -TARGET_EFAULT;
5647         }
5648         ver = (struct drm_version *)buf_temp;
5649         ret = target_to_host_drmversion(ver, target_ver);
5650         if (!is_error(ret)) {
5651             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5652             if (is_error(ret)) {
5653                 unlock_drm_version(ver, target_ver, false);
5654             } else {
5655                 host_to_target_drmversion(target_ver, ver);
5656             }
5657         }
5658         unlock_user_struct(target_ver, arg, 0);
5659         return ret;
5660     }
5661     return -TARGET_ENOSYS;
5662 }
5663 
5664 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5665                                            struct drm_i915_getparam *gparam,
5666                                            int fd, abi_long arg)
5667 {
5668     abi_long ret;
5669     int value;
5670     struct target_drm_i915_getparam *target_gparam;
5671 
5672     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5673         return -TARGET_EFAULT;
5674     }
5675 
5676     __get_user(gparam->param, &target_gparam->param);
5677     gparam->value = &value;
5678     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5679     put_user_s32(value, target_gparam->value);
5680 
5681     unlock_user_struct(target_gparam, arg, 0);
5682     return ret;
5683 }
5684 
5685 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5686                                   int fd, int cmd, abi_long arg)
5687 {
5688     switch (ie->host_cmd) {
5689     case DRM_IOCTL_I915_GETPARAM:
5690         return do_ioctl_drm_i915_getparam(ie,
5691                                           (struct drm_i915_getparam *)buf_temp,
5692                                           fd, arg);
5693     default:
5694         return -TARGET_ENOSYS;
5695     }
5696 }
5697 
5698 #endif
5699 
5700 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5701                                         int fd, int cmd, abi_long arg)
5702 {
5703     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5704     struct tun_filter *target_filter;
5705     char *target_addr;
5706 
5707     assert(ie->access == IOC_W);
5708 
5709     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5710     if (!target_filter) {
5711         return -TARGET_EFAULT;
5712     }
5713     filter->flags = tswap16(target_filter->flags);
5714     filter->count = tswap16(target_filter->count);
5715     unlock_user(target_filter, arg, 0);
5716 
5717     if (filter->count) {
5718         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5719             MAX_STRUCT_SIZE) {
5720             return -TARGET_EFAULT;
5721         }
5722 
5723         target_addr = lock_user(VERIFY_READ,
5724                                 arg + offsetof(struct tun_filter, addr),
5725                                 filter->count * ETH_ALEN, 1);
5726         if (!target_addr) {
5727             return -TARGET_EFAULT;
5728         }
5729         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5730         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5731     }
5732 
5733     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5734 }
5735 
5736 IOCTLEntry ioctl_entries[] = {
5737 #define IOCTL(cmd, access, ...) \
5738     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5739 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5740     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5741 #define IOCTL_IGNORE(cmd) \
5742     { TARGET_ ## cmd, 0, #cmd },
5743 #include "ioctls.h"
5744     { 0, 0, },
5745 };
5746 
5747 /* ??? Implement proper locking for ioctls.  */
5748 /* do_ioctl() Must return target values and target errnos. */
5749 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5750 {
5751     const IOCTLEntry *ie;
5752     const argtype *arg_type;
5753     abi_long ret;
5754     uint8_t buf_temp[MAX_STRUCT_SIZE];
5755     int target_size;
5756     void *argptr;
5757 
5758     ie = ioctl_entries;
5759     for(;;) {
5760         if (ie->target_cmd == 0) {
5761             qemu_log_mask(
5762                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5763             return -TARGET_ENOSYS;
5764         }
5765         if (ie->target_cmd == cmd)
5766             break;
5767         ie++;
5768     }
5769     arg_type = ie->arg_type;
5770     if (ie->do_ioctl) {
5771         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5772     } else if (!ie->host_cmd) {
5773         /* Some architectures define BSD ioctls in their headers
5774            that are not implemented in Linux.  */
5775         return -TARGET_ENOSYS;
5776     }
5777 
5778     switch(arg_type[0]) {
5779     case TYPE_NULL:
5780         /* no argument */
5781         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5782         break;
5783     case TYPE_PTRVOID:
5784     case TYPE_INT:
5785     case TYPE_LONG:
5786     case TYPE_ULONG:
5787         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5788         break;
5789     case TYPE_PTR:
5790         arg_type++;
5791         target_size = thunk_type_size(arg_type, 0);
5792         switch(ie->access) {
5793         case IOC_R:
5794             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5795             if (!is_error(ret)) {
5796                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5797                 if (!argptr)
5798                     return -TARGET_EFAULT;
5799                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5800                 unlock_user(argptr, arg, target_size);
5801             }
5802             break;
5803         case IOC_W:
5804             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5805             if (!argptr)
5806                 return -TARGET_EFAULT;
5807             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5808             unlock_user(argptr, arg, 0);
5809             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5810             break;
5811         default:
5812         case IOC_RW:
5813             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5814             if (!argptr)
5815                 return -TARGET_EFAULT;
5816             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5817             unlock_user(argptr, arg, 0);
5818             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5819             if (!is_error(ret)) {
5820                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5821                 if (!argptr)
5822                     return -TARGET_EFAULT;
5823                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5824                 unlock_user(argptr, arg, target_size);
5825             }
5826             break;
5827         }
5828         break;
5829     default:
5830         qemu_log_mask(LOG_UNIMP,
5831                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5832                       (long)cmd, arg_type[0]);
5833         ret = -TARGET_ENOSYS;
5834         break;
5835     }
5836     return ret;
5837 }
5838 
5839 static const bitmask_transtbl iflag_tbl[] = {
5840         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5841         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5842         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5843         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5844         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5845         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5846         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5847         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5848         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5849         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5850         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5851         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5852         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5853         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5854         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5855         { 0, 0, 0, 0 }
5856 };
5857 
5858 static const bitmask_transtbl oflag_tbl[] = {
5859 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5860 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5861 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5862 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5863 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5864 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5865 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5866 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5867 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5868 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5869 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5870 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5871 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5872 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5873 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5874 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5875 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5876 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5877 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5878 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5879 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5880 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5881 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5882 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5883 	{ 0, 0, 0, 0 }
5884 };
5885 
5886 static const bitmask_transtbl cflag_tbl[] = {
5887 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5888 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5889 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5890 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5891 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5892 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5893 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5894 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5895 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5896 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5897 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5898 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5899 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5900 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5901 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5902 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5903 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5904 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5905 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5906 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5907 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5908 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5909 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5910 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5911 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5912 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5913 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5914 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5915 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5916 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5917 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5918 	{ 0, 0, 0, 0 }
5919 };
5920 
5921 static const bitmask_transtbl lflag_tbl[] = {
5922   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5923   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5924   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5925   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5926   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5927   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5928   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5929   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5930   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5931   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5932   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5933   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5934   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5935   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5936   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5937   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5938   { 0, 0, 0, 0 }
5939 };
5940 
5941 static void target_to_host_termios (void *dst, const void *src)
5942 {
5943     struct host_termios *host = dst;
5944     const struct target_termios *target = src;
5945 
5946     host->c_iflag =
5947         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5948     host->c_oflag =
5949         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5950     host->c_cflag =
5951         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5952     host->c_lflag =
5953         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5954     host->c_line = target->c_line;
5955 
5956     memset(host->c_cc, 0, sizeof(host->c_cc));
5957     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5958     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5959     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5960     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5961     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5962     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5963     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5964     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5965     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5966     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5967     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5968     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5969     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5970     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5971     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5972     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5973     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5974 }
5975 
5976 static void host_to_target_termios (void *dst, const void *src)
5977 {
5978     struct target_termios *target = dst;
5979     const struct host_termios *host = src;
5980 
5981     target->c_iflag =
5982         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5983     target->c_oflag =
5984         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5985     target->c_cflag =
5986         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5987     target->c_lflag =
5988         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5989     target->c_line = host->c_line;
5990 
5991     memset(target->c_cc, 0, sizeof(target->c_cc));
5992     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5993     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5994     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5995     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5996     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5997     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5998     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5999     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6000     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6001     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6002     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6003     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6004     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6005     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6006     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6007     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6008     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6009 }
6010 
6011 static const StructEntry struct_termios_def = {
6012     .convert = { host_to_target_termios, target_to_host_termios },
6013     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6014     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6015     .print = print_termios,
6016 };
6017 
6018 static const bitmask_transtbl mmap_flags_tbl[] = {
6019     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6020     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6021     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6022     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6023       MAP_ANONYMOUS, MAP_ANONYMOUS },
6024     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6025       MAP_GROWSDOWN, MAP_GROWSDOWN },
6026     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6027       MAP_DENYWRITE, MAP_DENYWRITE },
6028     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6029       MAP_EXECUTABLE, MAP_EXECUTABLE },
6030     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6031     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6032       MAP_NORESERVE, MAP_NORESERVE },
6033     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6034     /* MAP_STACK had been ignored by the kernel for quite some time.
6035        Recognize it for the target insofar as we do not want to pass
6036        it through to the host.  */
6037     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6038     { 0, 0, 0, 0 }
6039 };
6040 
6041 /*
6042  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6043  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6044  */
6045 #if defined(TARGET_I386)
6046 
6047 /* NOTE: there is really one LDT for all the threads */
6048 static uint8_t *ldt_table;
6049 
6050 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6051 {
6052     int size;
6053     void *p;
6054 
6055     if (!ldt_table)
6056         return 0;
6057     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6058     if (size > bytecount)
6059         size = bytecount;
6060     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6061     if (!p)
6062         return -TARGET_EFAULT;
6063     /* ??? Should this by byteswapped?  */
6064     memcpy(p, ldt_table, size);
6065     unlock_user(p, ptr, size);
6066     return size;
6067 }
6068 
6069 /* XXX: add locking support */
6070 static abi_long write_ldt(CPUX86State *env,
6071                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6072 {
6073     struct target_modify_ldt_ldt_s ldt_info;
6074     struct target_modify_ldt_ldt_s *target_ldt_info;
6075     int seg_32bit, contents, read_exec_only, limit_in_pages;
6076     int seg_not_present, useable, lm;
6077     uint32_t *lp, entry_1, entry_2;
6078 
6079     if (bytecount != sizeof(ldt_info))
6080         return -TARGET_EINVAL;
6081     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6082         return -TARGET_EFAULT;
6083     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6084     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6085     ldt_info.limit = tswap32(target_ldt_info->limit);
6086     ldt_info.flags = tswap32(target_ldt_info->flags);
6087     unlock_user_struct(target_ldt_info, ptr, 0);
6088 
6089     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6090         return -TARGET_EINVAL;
6091     seg_32bit = ldt_info.flags & 1;
6092     contents = (ldt_info.flags >> 1) & 3;
6093     read_exec_only = (ldt_info.flags >> 3) & 1;
6094     limit_in_pages = (ldt_info.flags >> 4) & 1;
6095     seg_not_present = (ldt_info.flags >> 5) & 1;
6096     useable = (ldt_info.flags >> 6) & 1;
6097 #ifdef TARGET_ABI32
6098     lm = 0;
6099 #else
6100     lm = (ldt_info.flags >> 7) & 1;
6101 #endif
6102     if (contents == 3) {
6103         if (oldmode)
6104             return -TARGET_EINVAL;
6105         if (seg_not_present == 0)
6106             return -TARGET_EINVAL;
6107     }
6108     /* allocate the LDT */
6109     if (!ldt_table) {
6110         env->ldt.base = target_mmap(0,
6111                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6112                                     PROT_READ|PROT_WRITE,
6113                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6114         if (env->ldt.base == -1)
6115             return -TARGET_ENOMEM;
6116         memset(g2h_untagged(env->ldt.base), 0,
6117                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6118         env->ldt.limit = 0xffff;
6119         ldt_table = g2h_untagged(env->ldt.base);
6120     }
6121 
6122     /* NOTE: same code as Linux kernel */
6123     /* Allow LDTs to be cleared by the user. */
6124     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6125         if (oldmode ||
6126             (contents == 0		&&
6127              read_exec_only == 1	&&
6128              seg_32bit == 0		&&
6129              limit_in_pages == 0	&&
6130              seg_not_present == 1	&&
6131              useable == 0 )) {
6132             entry_1 = 0;
6133             entry_2 = 0;
6134             goto install;
6135         }
6136     }
6137 
6138     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6139         (ldt_info.limit & 0x0ffff);
6140     entry_2 = (ldt_info.base_addr & 0xff000000) |
6141         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6142         (ldt_info.limit & 0xf0000) |
6143         ((read_exec_only ^ 1) << 9) |
6144         (contents << 10) |
6145         ((seg_not_present ^ 1) << 15) |
6146         (seg_32bit << 22) |
6147         (limit_in_pages << 23) |
6148         (lm << 21) |
6149         0x7000;
6150     if (!oldmode)
6151         entry_2 |= (useable << 20);
6152 
6153     /* Install the new entry ...  */
6154 install:
6155     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6156     lp[0] = tswap32(entry_1);
6157     lp[1] = tswap32(entry_2);
6158     return 0;
6159 }
6160 
6161 /* specific and weird i386 syscalls */
6162 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6163                               unsigned long bytecount)
6164 {
6165     abi_long ret;
6166 
6167     switch (func) {
6168     case 0:
6169         ret = read_ldt(ptr, bytecount);
6170         break;
6171     case 1:
6172         ret = write_ldt(env, ptr, bytecount, 1);
6173         break;
6174     case 0x11:
6175         ret = write_ldt(env, ptr, bytecount, 0);
6176         break;
6177     default:
6178         ret = -TARGET_ENOSYS;
6179         break;
6180     }
6181     return ret;
6182 }
6183 
6184 #if defined(TARGET_ABI32)
6185 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6186 {
6187     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6188     struct target_modify_ldt_ldt_s ldt_info;
6189     struct target_modify_ldt_ldt_s *target_ldt_info;
6190     int seg_32bit, contents, read_exec_only, limit_in_pages;
6191     int seg_not_present, useable, lm;
6192     uint32_t *lp, entry_1, entry_2;
6193     int i;
6194 
6195     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6196     if (!target_ldt_info)
6197         return -TARGET_EFAULT;
6198     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6199     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6200     ldt_info.limit = tswap32(target_ldt_info->limit);
6201     ldt_info.flags = tswap32(target_ldt_info->flags);
6202     if (ldt_info.entry_number == -1) {
6203         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6204             if (gdt_table[i] == 0) {
6205                 ldt_info.entry_number = i;
6206                 target_ldt_info->entry_number = tswap32(i);
6207                 break;
6208             }
6209         }
6210     }
6211     unlock_user_struct(target_ldt_info, ptr, 1);
6212 
6213     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6214         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6215            return -TARGET_EINVAL;
6216     seg_32bit = ldt_info.flags & 1;
6217     contents = (ldt_info.flags >> 1) & 3;
6218     read_exec_only = (ldt_info.flags >> 3) & 1;
6219     limit_in_pages = (ldt_info.flags >> 4) & 1;
6220     seg_not_present = (ldt_info.flags >> 5) & 1;
6221     useable = (ldt_info.flags >> 6) & 1;
6222 #ifdef TARGET_ABI32
6223     lm = 0;
6224 #else
6225     lm = (ldt_info.flags >> 7) & 1;
6226 #endif
6227 
6228     if (contents == 3) {
6229         if (seg_not_present == 0)
6230             return -TARGET_EINVAL;
6231     }
6232 
6233     /* NOTE: same code as Linux kernel */
6234     /* Allow LDTs to be cleared by the user. */
6235     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6236         if ((contents == 0             &&
6237              read_exec_only == 1       &&
6238              seg_32bit == 0            &&
6239              limit_in_pages == 0       &&
6240              seg_not_present == 1      &&
6241              useable == 0 )) {
6242             entry_1 = 0;
6243             entry_2 = 0;
6244             goto install;
6245         }
6246     }
6247 
6248     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6249         (ldt_info.limit & 0x0ffff);
6250     entry_2 = (ldt_info.base_addr & 0xff000000) |
6251         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6252         (ldt_info.limit & 0xf0000) |
6253         ((read_exec_only ^ 1) << 9) |
6254         (contents << 10) |
6255         ((seg_not_present ^ 1) << 15) |
6256         (seg_32bit << 22) |
6257         (limit_in_pages << 23) |
6258         (useable << 20) |
6259         (lm << 21) |
6260         0x7000;
6261 
6262     /* Install the new entry ...  */
6263 install:
6264     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6265     lp[0] = tswap32(entry_1);
6266     lp[1] = tswap32(entry_2);
6267     return 0;
6268 }
6269 
6270 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6271 {
6272     struct target_modify_ldt_ldt_s *target_ldt_info;
6273     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6274     uint32_t base_addr, limit, flags;
6275     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6276     int seg_not_present, useable, lm;
6277     uint32_t *lp, entry_1, entry_2;
6278 
6279     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6280     if (!target_ldt_info)
6281         return -TARGET_EFAULT;
6282     idx = tswap32(target_ldt_info->entry_number);
6283     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6284         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6285         unlock_user_struct(target_ldt_info, ptr, 1);
6286         return -TARGET_EINVAL;
6287     }
6288     lp = (uint32_t *)(gdt_table + idx);
6289     entry_1 = tswap32(lp[0]);
6290     entry_2 = tswap32(lp[1]);
6291 
6292     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6293     contents = (entry_2 >> 10) & 3;
6294     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6295     seg_32bit = (entry_2 >> 22) & 1;
6296     limit_in_pages = (entry_2 >> 23) & 1;
6297     useable = (entry_2 >> 20) & 1;
6298 #ifdef TARGET_ABI32
6299     lm = 0;
6300 #else
6301     lm = (entry_2 >> 21) & 1;
6302 #endif
6303     flags = (seg_32bit << 0) | (contents << 1) |
6304         (read_exec_only << 3) | (limit_in_pages << 4) |
6305         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6306     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6307     base_addr = (entry_1 >> 16) |
6308         (entry_2 & 0xff000000) |
6309         ((entry_2 & 0xff) << 16);
6310     target_ldt_info->base_addr = tswapal(base_addr);
6311     target_ldt_info->limit = tswap32(limit);
6312     target_ldt_info->flags = tswap32(flags);
6313     unlock_user_struct(target_ldt_info, ptr, 1);
6314     return 0;
6315 }
6316 
6317 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6318 {
6319     return -TARGET_ENOSYS;
6320 }
6321 #else
6322 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6323 {
6324     abi_long ret = 0;
6325     abi_ulong val;
6326     int idx;
6327 
6328     switch(code) {
6329     case TARGET_ARCH_SET_GS:
6330     case TARGET_ARCH_SET_FS:
6331         if (code == TARGET_ARCH_SET_GS)
6332             idx = R_GS;
6333         else
6334             idx = R_FS;
6335         cpu_x86_load_seg(env, idx, 0);
6336         env->segs[idx].base = addr;
6337         break;
6338     case TARGET_ARCH_GET_GS:
6339     case TARGET_ARCH_GET_FS:
6340         if (code == TARGET_ARCH_GET_GS)
6341             idx = R_GS;
6342         else
6343             idx = R_FS;
6344         val = env->segs[idx].base;
6345         if (put_user(val, addr, abi_ulong))
6346             ret = -TARGET_EFAULT;
6347         break;
6348     default:
6349         ret = -TARGET_EINVAL;
6350         break;
6351     }
6352     return ret;
6353 }
6354 #endif /* defined(TARGET_ABI32 */
6355 #endif /* defined(TARGET_I386) */
6356 
6357 /*
6358  * These constants are generic.  Supply any that are missing from the host.
6359  */
6360 #ifndef PR_SET_NAME
6361 # define PR_SET_NAME    15
6362 # define PR_GET_NAME    16
6363 #endif
6364 #ifndef PR_SET_FP_MODE
6365 # define PR_SET_FP_MODE 45
6366 # define PR_GET_FP_MODE 46
6367 # define PR_FP_MODE_FR   (1 << 0)
6368 # define PR_FP_MODE_FRE  (1 << 1)
6369 #endif
6370 #ifndef PR_SVE_SET_VL
6371 # define PR_SVE_SET_VL  50
6372 # define PR_SVE_GET_VL  51
6373 # define PR_SVE_VL_LEN_MASK  0xffff
6374 # define PR_SVE_VL_INHERIT   (1 << 17)
6375 #endif
6376 #ifndef PR_PAC_RESET_KEYS
6377 # define PR_PAC_RESET_KEYS  54
6378 # define PR_PAC_APIAKEY   (1 << 0)
6379 # define PR_PAC_APIBKEY   (1 << 1)
6380 # define PR_PAC_APDAKEY   (1 << 2)
6381 # define PR_PAC_APDBKEY   (1 << 3)
6382 # define PR_PAC_APGAKEY   (1 << 4)
6383 #endif
6384 #ifndef PR_SET_TAGGED_ADDR_CTRL
6385 # define PR_SET_TAGGED_ADDR_CTRL 55
6386 # define PR_GET_TAGGED_ADDR_CTRL 56
6387 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6388 #endif
6389 #ifndef PR_MTE_TCF_SHIFT
6390 # define PR_MTE_TCF_SHIFT       1
6391 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6392 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6393 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6394 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6395 # define PR_MTE_TAG_SHIFT       3
6396 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6397 #endif
6398 #ifndef PR_SET_IO_FLUSHER
6399 # define PR_SET_IO_FLUSHER 57
6400 # define PR_GET_IO_FLUSHER 58
6401 #endif
6402 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6403 # define PR_SET_SYSCALL_USER_DISPATCH 59
6404 #endif
6405 #ifndef PR_SME_SET_VL
6406 # define PR_SME_SET_VL  63
6407 # define PR_SME_GET_VL  64
6408 # define PR_SME_VL_LEN_MASK  0xffff
6409 # define PR_SME_VL_INHERIT   (1 << 17)
6410 #endif
6411 
6412 #include "target_prctl.h"
6413 
6414 static abi_long do_prctl_inval0(CPUArchState *env)
6415 {
6416     return -TARGET_EINVAL;
6417 }
6418 
6419 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6420 {
6421     return -TARGET_EINVAL;
6422 }
6423 
6424 #ifndef do_prctl_get_fp_mode
6425 #define do_prctl_get_fp_mode do_prctl_inval0
6426 #endif
6427 #ifndef do_prctl_set_fp_mode
6428 #define do_prctl_set_fp_mode do_prctl_inval1
6429 #endif
6430 #ifndef do_prctl_sve_get_vl
6431 #define do_prctl_sve_get_vl do_prctl_inval0
6432 #endif
6433 #ifndef do_prctl_sve_set_vl
6434 #define do_prctl_sve_set_vl do_prctl_inval1
6435 #endif
6436 #ifndef do_prctl_reset_keys
6437 #define do_prctl_reset_keys do_prctl_inval1
6438 #endif
6439 #ifndef do_prctl_set_tagged_addr_ctrl
6440 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6441 #endif
6442 #ifndef do_prctl_get_tagged_addr_ctrl
6443 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6444 #endif
6445 #ifndef do_prctl_get_unalign
6446 #define do_prctl_get_unalign do_prctl_inval1
6447 #endif
6448 #ifndef do_prctl_set_unalign
6449 #define do_prctl_set_unalign do_prctl_inval1
6450 #endif
6451 #ifndef do_prctl_sme_get_vl
6452 #define do_prctl_sme_get_vl do_prctl_inval0
6453 #endif
6454 #ifndef do_prctl_sme_set_vl
6455 #define do_prctl_sme_set_vl do_prctl_inval1
6456 #endif
6457 
6458 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6459                          abi_long arg3, abi_long arg4, abi_long arg5)
6460 {
6461     abi_long ret;
6462 
6463     switch (option) {
6464     case PR_GET_PDEATHSIG:
6465         {
6466             int deathsig;
6467             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6468                                   arg3, arg4, arg5));
6469             if (!is_error(ret) &&
6470                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6471                 return -TARGET_EFAULT;
6472             }
6473             return ret;
6474         }
6475     case PR_SET_PDEATHSIG:
6476         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6477                                arg3, arg4, arg5));
6478     case PR_GET_NAME:
6479         {
6480             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6481             if (!name) {
6482                 return -TARGET_EFAULT;
6483             }
6484             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6485                                   arg3, arg4, arg5));
6486             unlock_user(name, arg2, 16);
6487             return ret;
6488         }
6489     case PR_SET_NAME:
6490         {
6491             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6492             if (!name) {
6493                 return -TARGET_EFAULT;
6494             }
6495             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6496                                   arg3, arg4, arg5));
6497             unlock_user(name, arg2, 0);
6498             return ret;
6499         }
6500     case PR_GET_FP_MODE:
6501         return do_prctl_get_fp_mode(env);
6502     case PR_SET_FP_MODE:
6503         return do_prctl_set_fp_mode(env, arg2);
6504     case PR_SVE_GET_VL:
6505         return do_prctl_sve_get_vl(env);
6506     case PR_SVE_SET_VL:
6507         return do_prctl_sve_set_vl(env, arg2);
6508     case PR_SME_GET_VL:
6509         return do_prctl_sme_get_vl(env);
6510     case PR_SME_SET_VL:
6511         return do_prctl_sme_set_vl(env, arg2);
6512     case PR_PAC_RESET_KEYS:
6513         if (arg3 || arg4 || arg5) {
6514             return -TARGET_EINVAL;
6515         }
6516         return do_prctl_reset_keys(env, arg2);
6517     case PR_SET_TAGGED_ADDR_CTRL:
6518         if (arg3 || arg4 || arg5) {
6519             return -TARGET_EINVAL;
6520         }
6521         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6522     case PR_GET_TAGGED_ADDR_CTRL:
6523         if (arg2 || arg3 || arg4 || arg5) {
6524             return -TARGET_EINVAL;
6525         }
6526         return do_prctl_get_tagged_addr_ctrl(env);
6527 
6528     case PR_GET_UNALIGN:
6529         return do_prctl_get_unalign(env, arg2);
6530     case PR_SET_UNALIGN:
6531         return do_prctl_set_unalign(env, arg2);
6532 
6533     case PR_CAP_AMBIENT:
6534     case PR_CAPBSET_READ:
6535     case PR_CAPBSET_DROP:
6536     case PR_GET_DUMPABLE:
6537     case PR_SET_DUMPABLE:
6538     case PR_GET_KEEPCAPS:
6539     case PR_SET_KEEPCAPS:
6540     case PR_GET_SECUREBITS:
6541     case PR_SET_SECUREBITS:
6542     case PR_GET_TIMING:
6543     case PR_SET_TIMING:
6544     case PR_GET_TIMERSLACK:
6545     case PR_SET_TIMERSLACK:
6546     case PR_MCE_KILL:
6547     case PR_MCE_KILL_GET:
6548     case PR_GET_NO_NEW_PRIVS:
6549     case PR_SET_NO_NEW_PRIVS:
6550     case PR_GET_IO_FLUSHER:
6551     case PR_SET_IO_FLUSHER:
6552         /* Some prctl options have no pointer arguments and we can pass on. */
6553         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6554 
6555     case PR_GET_CHILD_SUBREAPER:
6556     case PR_SET_CHILD_SUBREAPER:
6557     case PR_GET_SPECULATION_CTRL:
6558     case PR_SET_SPECULATION_CTRL:
6559     case PR_GET_TID_ADDRESS:
6560         /* TODO */
6561         return -TARGET_EINVAL;
6562 
6563     case PR_GET_FPEXC:
6564     case PR_SET_FPEXC:
6565         /* Was used for SPE on PowerPC. */
6566         return -TARGET_EINVAL;
6567 
6568     case PR_GET_ENDIAN:
6569     case PR_SET_ENDIAN:
6570     case PR_GET_FPEMU:
6571     case PR_SET_FPEMU:
6572     case PR_SET_MM:
6573     case PR_GET_SECCOMP:
6574     case PR_SET_SECCOMP:
6575     case PR_SET_SYSCALL_USER_DISPATCH:
6576     case PR_GET_THP_DISABLE:
6577     case PR_SET_THP_DISABLE:
6578     case PR_GET_TSC:
6579     case PR_SET_TSC:
6580         /* Disable to prevent the target disabling stuff we need. */
6581         return -TARGET_EINVAL;
6582 
6583     default:
6584         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6585                       option);
6586         return -TARGET_EINVAL;
6587     }
6588 }
6589 
6590 #define NEW_STACK_SIZE 0x40000
6591 
6592 
6593 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6594 typedef struct {
6595     CPUArchState *env;
6596     pthread_mutex_t mutex;
6597     pthread_cond_t cond;
6598     pthread_t thread;
6599     uint32_t tid;
6600     abi_ulong child_tidptr;
6601     abi_ulong parent_tidptr;
6602     sigset_t sigmask;
6603 } new_thread_info;
6604 
6605 static void *clone_func(void *arg)
6606 {
6607     new_thread_info *info = arg;
6608     CPUArchState *env;
6609     CPUState *cpu;
6610     TaskState *ts;
6611 
6612     rcu_register_thread();
6613     tcg_register_thread();
6614     env = info->env;
6615     cpu = env_cpu(env);
6616     thread_cpu = cpu;
6617     ts = (TaskState *)cpu->opaque;
6618     info->tid = sys_gettid();
6619     task_settid(ts);
6620     if (info->child_tidptr)
6621         put_user_u32(info->tid, info->child_tidptr);
6622     if (info->parent_tidptr)
6623         put_user_u32(info->tid, info->parent_tidptr);
6624     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6625     /* Enable signals.  */
6626     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6627     /* Signal to the parent that we're ready.  */
6628     pthread_mutex_lock(&info->mutex);
6629     pthread_cond_broadcast(&info->cond);
6630     pthread_mutex_unlock(&info->mutex);
6631     /* Wait until the parent has finished initializing the tls state.  */
6632     pthread_mutex_lock(&clone_lock);
6633     pthread_mutex_unlock(&clone_lock);
6634     cpu_loop(env);
6635     /* never exits */
6636     return NULL;
6637 }
6638 
6639 /* do_fork() Must return host values and target errnos (unlike most
6640    do_*() functions). */
6641 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6642                    abi_ulong parent_tidptr, target_ulong newtls,
6643                    abi_ulong child_tidptr)
6644 {
6645     CPUState *cpu = env_cpu(env);
6646     int ret;
6647     TaskState *ts;
6648     CPUState *new_cpu;
6649     CPUArchState *new_env;
6650     sigset_t sigmask;
6651 
6652     flags &= ~CLONE_IGNORED_FLAGS;
6653 
6654     /* Emulate vfork() with fork() */
6655     if (flags & CLONE_VFORK)
6656         flags &= ~(CLONE_VFORK | CLONE_VM);
6657 
6658     if (flags & CLONE_VM) {
6659         TaskState *parent_ts = (TaskState *)cpu->opaque;
6660         new_thread_info info;
6661         pthread_attr_t attr;
6662 
6663         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6664             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6665             return -TARGET_EINVAL;
6666         }
6667 
6668         ts = g_new0(TaskState, 1);
6669         init_task_state(ts);
6670 
6671         /* Grab a mutex so that thread setup appears atomic.  */
6672         pthread_mutex_lock(&clone_lock);
6673 
6674         /*
6675          * If this is our first additional thread, we need to ensure we
6676          * generate code for parallel execution and flush old translations.
6677          * Do this now so that the copy gets CF_PARALLEL too.
6678          */
6679         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6680             cpu->tcg_cflags |= CF_PARALLEL;
6681             tb_flush(cpu);
6682         }
6683 
6684         /* we create a new CPU instance. */
6685         new_env = cpu_copy(env);
6686         /* Init regs that differ from the parent.  */
6687         cpu_clone_regs_child(new_env, newsp, flags);
6688         cpu_clone_regs_parent(env, flags);
6689         new_cpu = env_cpu(new_env);
6690         new_cpu->opaque = ts;
6691         ts->bprm = parent_ts->bprm;
6692         ts->info = parent_ts->info;
6693         ts->signal_mask = parent_ts->signal_mask;
6694 
6695         if (flags & CLONE_CHILD_CLEARTID) {
6696             ts->child_tidptr = child_tidptr;
6697         }
6698 
6699         if (flags & CLONE_SETTLS) {
6700             cpu_set_tls (new_env, newtls);
6701         }
6702 
6703         memset(&info, 0, sizeof(info));
6704         pthread_mutex_init(&info.mutex, NULL);
6705         pthread_mutex_lock(&info.mutex);
6706         pthread_cond_init(&info.cond, NULL);
6707         info.env = new_env;
6708         if (flags & CLONE_CHILD_SETTID) {
6709             info.child_tidptr = child_tidptr;
6710         }
6711         if (flags & CLONE_PARENT_SETTID) {
6712             info.parent_tidptr = parent_tidptr;
6713         }
6714 
6715         ret = pthread_attr_init(&attr);
6716         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6717         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6718         /* It is not safe to deliver signals until the child has finished
6719            initializing, so temporarily block all signals.  */
6720         sigfillset(&sigmask);
6721         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6722         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6723 
6724         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6725         /* TODO: Free new CPU state if thread creation failed.  */
6726 
6727         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6728         pthread_attr_destroy(&attr);
6729         if (ret == 0) {
6730             /* Wait for the child to initialize.  */
6731             pthread_cond_wait(&info.cond, &info.mutex);
6732             ret = info.tid;
6733         } else {
6734             ret = -1;
6735         }
6736         pthread_mutex_unlock(&info.mutex);
6737         pthread_cond_destroy(&info.cond);
6738         pthread_mutex_destroy(&info.mutex);
6739         pthread_mutex_unlock(&clone_lock);
6740     } else {
6741         /* if no CLONE_VM, we consider it is a fork */
6742         if (flags & CLONE_INVALID_FORK_FLAGS) {
6743             return -TARGET_EINVAL;
6744         }
6745 
6746         /* We can't support custom termination signals */
6747         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6748             return -TARGET_EINVAL;
6749         }
6750 
6751         if (block_signals()) {
6752             return -QEMU_ERESTARTSYS;
6753         }
6754 
6755         fork_start();
6756         ret = fork();
6757         if (ret == 0) {
6758             /* Child Process.  */
6759             cpu_clone_regs_child(env, newsp, flags);
6760             fork_end(1);
6761             /* There is a race condition here.  The parent process could
6762                theoretically read the TID in the child process before the child
6763                tid is set.  This would require using either ptrace
6764                (not implemented) or having *_tidptr to point at a shared memory
6765                mapping.  We can't repeat the spinlock hack used above because
6766                the child process gets its own copy of the lock.  */
6767             if (flags & CLONE_CHILD_SETTID)
6768                 put_user_u32(sys_gettid(), child_tidptr);
6769             if (flags & CLONE_PARENT_SETTID)
6770                 put_user_u32(sys_gettid(), parent_tidptr);
6771             ts = (TaskState *)cpu->opaque;
6772             if (flags & CLONE_SETTLS)
6773                 cpu_set_tls (env, newtls);
6774             if (flags & CLONE_CHILD_CLEARTID)
6775                 ts->child_tidptr = child_tidptr;
6776         } else {
6777             cpu_clone_regs_parent(env, flags);
6778             fork_end(0);
6779         }
6780     }
6781     return ret;
6782 }
6783 
6784 /* warning : doesn't handle linux specific flags... */
6785 static int target_to_host_fcntl_cmd(int cmd)
6786 {
6787     int ret;
6788 
6789     switch(cmd) {
6790     case TARGET_F_DUPFD:
6791     case TARGET_F_GETFD:
6792     case TARGET_F_SETFD:
6793     case TARGET_F_GETFL:
6794     case TARGET_F_SETFL:
6795     case TARGET_F_OFD_GETLK:
6796     case TARGET_F_OFD_SETLK:
6797     case TARGET_F_OFD_SETLKW:
6798         ret = cmd;
6799         break;
6800     case TARGET_F_GETLK:
6801         ret = F_GETLK64;
6802         break;
6803     case TARGET_F_SETLK:
6804         ret = F_SETLK64;
6805         break;
6806     case TARGET_F_SETLKW:
6807         ret = F_SETLKW64;
6808         break;
6809     case TARGET_F_GETOWN:
6810         ret = F_GETOWN;
6811         break;
6812     case TARGET_F_SETOWN:
6813         ret = F_SETOWN;
6814         break;
6815     case TARGET_F_GETSIG:
6816         ret = F_GETSIG;
6817         break;
6818     case TARGET_F_SETSIG:
6819         ret = F_SETSIG;
6820         break;
6821 #if TARGET_ABI_BITS == 32
6822     case TARGET_F_GETLK64:
6823         ret = F_GETLK64;
6824         break;
6825     case TARGET_F_SETLK64:
6826         ret = F_SETLK64;
6827         break;
6828     case TARGET_F_SETLKW64:
6829         ret = F_SETLKW64;
6830         break;
6831 #endif
6832     case TARGET_F_SETLEASE:
6833         ret = F_SETLEASE;
6834         break;
6835     case TARGET_F_GETLEASE:
6836         ret = F_GETLEASE;
6837         break;
6838 #ifdef F_DUPFD_CLOEXEC
6839     case TARGET_F_DUPFD_CLOEXEC:
6840         ret = F_DUPFD_CLOEXEC;
6841         break;
6842 #endif
6843     case TARGET_F_NOTIFY:
6844         ret = F_NOTIFY;
6845         break;
6846 #ifdef F_GETOWN_EX
6847     case TARGET_F_GETOWN_EX:
6848         ret = F_GETOWN_EX;
6849         break;
6850 #endif
6851 #ifdef F_SETOWN_EX
6852     case TARGET_F_SETOWN_EX:
6853         ret = F_SETOWN_EX;
6854         break;
6855 #endif
6856 #ifdef F_SETPIPE_SZ
6857     case TARGET_F_SETPIPE_SZ:
6858         ret = F_SETPIPE_SZ;
6859         break;
6860     case TARGET_F_GETPIPE_SZ:
6861         ret = F_GETPIPE_SZ;
6862         break;
6863 #endif
6864 #ifdef F_ADD_SEALS
6865     case TARGET_F_ADD_SEALS:
6866         ret = F_ADD_SEALS;
6867         break;
6868     case TARGET_F_GET_SEALS:
6869         ret = F_GET_SEALS;
6870         break;
6871 #endif
6872     default:
6873         ret = -TARGET_EINVAL;
6874         break;
6875     }
6876 
6877 #if defined(__powerpc64__)
6878     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6879      * is not supported by kernel. The glibc fcntl call actually adjusts
6880      * them to 5, 6 and 7 before making the syscall(). Since we make the
6881      * syscall directly, adjust to what is supported by the kernel.
6882      */
6883     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6884         ret -= F_GETLK64 - 5;
6885     }
6886 #endif
6887 
6888     return ret;
6889 }
6890 
6891 #define FLOCK_TRANSTBL \
6892     switch (type) { \
6893     TRANSTBL_CONVERT(F_RDLCK); \
6894     TRANSTBL_CONVERT(F_WRLCK); \
6895     TRANSTBL_CONVERT(F_UNLCK); \
6896     }
6897 
6898 static int target_to_host_flock(int type)
6899 {
6900 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6901     FLOCK_TRANSTBL
6902 #undef  TRANSTBL_CONVERT
6903     return -TARGET_EINVAL;
6904 }
6905 
6906 static int host_to_target_flock(int type)
6907 {
6908 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6909     FLOCK_TRANSTBL
6910 #undef  TRANSTBL_CONVERT
6911     /* if we don't know how to convert the value coming
6912      * from the host we copy to the target field as-is
6913      */
6914     return type;
6915 }
6916 
6917 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6918                                             abi_ulong target_flock_addr)
6919 {
6920     struct target_flock *target_fl;
6921     int l_type;
6922 
6923     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6924         return -TARGET_EFAULT;
6925     }
6926 
6927     __get_user(l_type, &target_fl->l_type);
6928     l_type = target_to_host_flock(l_type);
6929     if (l_type < 0) {
6930         return l_type;
6931     }
6932     fl->l_type = l_type;
6933     __get_user(fl->l_whence, &target_fl->l_whence);
6934     __get_user(fl->l_start, &target_fl->l_start);
6935     __get_user(fl->l_len, &target_fl->l_len);
6936     __get_user(fl->l_pid, &target_fl->l_pid);
6937     unlock_user_struct(target_fl, target_flock_addr, 0);
6938     return 0;
6939 }
6940 
6941 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6942                                           const struct flock64 *fl)
6943 {
6944     struct target_flock *target_fl;
6945     short l_type;
6946 
6947     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6948         return -TARGET_EFAULT;
6949     }
6950 
6951     l_type = host_to_target_flock(fl->l_type);
6952     __put_user(l_type, &target_fl->l_type);
6953     __put_user(fl->l_whence, &target_fl->l_whence);
6954     __put_user(fl->l_start, &target_fl->l_start);
6955     __put_user(fl->l_len, &target_fl->l_len);
6956     __put_user(fl->l_pid, &target_fl->l_pid);
6957     unlock_user_struct(target_fl, target_flock_addr, 1);
6958     return 0;
6959 }
6960 
6961 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6962 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6963 
6964 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6965 struct target_oabi_flock64 {
6966     abi_short l_type;
6967     abi_short l_whence;
6968     abi_llong l_start;
6969     abi_llong l_len;
6970     abi_int   l_pid;
6971 } QEMU_PACKED;
6972 
6973 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6974                                                    abi_ulong target_flock_addr)
6975 {
6976     struct target_oabi_flock64 *target_fl;
6977     int l_type;
6978 
6979     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6980         return -TARGET_EFAULT;
6981     }
6982 
6983     __get_user(l_type, &target_fl->l_type);
6984     l_type = target_to_host_flock(l_type);
6985     if (l_type < 0) {
6986         return l_type;
6987     }
6988     fl->l_type = l_type;
6989     __get_user(fl->l_whence, &target_fl->l_whence);
6990     __get_user(fl->l_start, &target_fl->l_start);
6991     __get_user(fl->l_len, &target_fl->l_len);
6992     __get_user(fl->l_pid, &target_fl->l_pid);
6993     unlock_user_struct(target_fl, target_flock_addr, 0);
6994     return 0;
6995 }
6996 
6997 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6998                                                  const struct flock64 *fl)
6999 {
7000     struct target_oabi_flock64 *target_fl;
7001     short l_type;
7002 
7003     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7004         return -TARGET_EFAULT;
7005     }
7006 
7007     l_type = host_to_target_flock(fl->l_type);
7008     __put_user(l_type, &target_fl->l_type);
7009     __put_user(fl->l_whence, &target_fl->l_whence);
7010     __put_user(fl->l_start, &target_fl->l_start);
7011     __put_user(fl->l_len, &target_fl->l_len);
7012     __put_user(fl->l_pid, &target_fl->l_pid);
7013     unlock_user_struct(target_fl, target_flock_addr, 1);
7014     return 0;
7015 }
7016 #endif
7017 
7018 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7019                                               abi_ulong target_flock_addr)
7020 {
7021     struct target_flock64 *target_fl;
7022     int l_type;
7023 
7024     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7025         return -TARGET_EFAULT;
7026     }
7027 
7028     __get_user(l_type, &target_fl->l_type);
7029     l_type = target_to_host_flock(l_type);
7030     if (l_type < 0) {
7031         return l_type;
7032     }
7033     fl->l_type = l_type;
7034     __get_user(fl->l_whence, &target_fl->l_whence);
7035     __get_user(fl->l_start, &target_fl->l_start);
7036     __get_user(fl->l_len, &target_fl->l_len);
7037     __get_user(fl->l_pid, &target_fl->l_pid);
7038     unlock_user_struct(target_fl, target_flock_addr, 0);
7039     return 0;
7040 }
7041 
7042 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7043                                             const struct flock64 *fl)
7044 {
7045     struct target_flock64 *target_fl;
7046     short l_type;
7047 
7048     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7049         return -TARGET_EFAULT;
7050     }
7051 
7052     l_type = host_to_target_flock(fl->l_type);
7053     __put_user(l_type, &target_fl->l_type);
7054     __put_user(fl->l_whence, &target_fl->l_whence);
7055     __put_user(fl->l_start, &target_fl->l_start);
7056     __put_user(fl->l_len, &target_fl->l_len);
7057     __put_user(fl->l_pid, &target_fl->l_pid);
7058     unlock_user_struct(target_fl, target_flock_addr, 1);
7059     return 0;
7060 }
7061 
7062 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7063 {
7064     struct flock64 fl64;
7065 #ifdef F_GETOWN_EX
7066     struct f_owner_ex fox;
7067     struct target_f_owner_ex *target_fox;
7068 #endif
7069     abi_long ret;
7070     int host_cmd = target_to_host_fcntl_cmd(cmd);
7071 
7072     if (host_cmd == -TARGET_EINVAL)
7073 	    return host_cmd;
7074 
7075     switch(cmd) {
7076     case TARGET_F_GETLK:
7077         ret = copy_from_user_flock(&fl64, arg);
7078         if (ret) {
7079             return ret;
7080         }
7081         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7082         if (ret == 0) {
7083             ret = copy_to_user_flock(arg, &fl64);
7084         }
7085         break;
7086 
7087     case TARGET_F_SETLK:
7088     case TARGET_F_SETLKW:
7089         ret = copy_from_user_flock(&fl64, arg);
7090         if (ret) {
7091             return ret;
7092         }
7093         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7094         break;
7095 
7096     case TARGET_F_GETLK64:
7097     case TARGET_F_OFD_GETLK:
7098         ret = copy_from_user_flock64(&fl64, arg);
7099         if (ret) {
7100             return ret;
7101         }
7102         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7103         if (ret == 0) {
7104             ret = copy_to_user_flock64(arg, &fl64);
7105         }
7106         break;
7107     case TARGET_F_SETLK64:
7108     case TARGET_F_SETLKW64:
7109     case TARGET_F_OFD_SETLK:
7110     case TARGET_F_OFD_SETLKW:
7111         ret = copy_from_user_flock64(&fl64, arg);
7112         if (ret) {
7113             return ret;
7114         }
7115         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7116         break;
7117 
7118     case TARGET_F_GETFL:
7119         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7120         if (ret >= 0) {
7121             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7122         }
7123         break;
7124 
7125     case TARGET_F_SETFL:
7126         ret = get_errno(safe_fcntl(fd, host_cmd,
7127                                    target_to_host_bitmask(arg,
7128                                                           fcntl_flags_tbl)));
7129         break;
7130 
7131 #ifdef F_GETOWN_EX
7132     case TARGET_F_GETOWN_EX:
7133         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7134         if (ret >= 0) {
7135             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7136                 return -TARGET_EFAULT;
7137             target_fox->type = tswap32(fox.type);
7138             target_fox->pid = tswap32(fox.pid);
7139             unlock_user_struct(target_fox, arg, 1);
7140         }
7141         break;
7142 #endif
7143 
7144 #ifdef F_SETOWN_EX
7145     case TARGET_F_SETOWN_EX:
7146         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7147             return -TARGET_EFAULT;
7148         fox.type = tswap32(target_fox->type);
7149         fox.pid = tswap32(target_fox->pid);
7150         unlock_user_struct(target_fox, arg, 0);
7151         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7152         break;
7153 #endif
7154 
7155     case TARGET_F_SETSIG:
7156         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7157         break;
7158 
7159     case TARGET_F_GETSIG:
7160         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7161         break;
7162 
7163     case TARGET_F_SETOWN:
7164     case TARGET_F_GETOWN:
7165     case TARGET_F_SETLEASE:
7166     case TARGET_F_GETLEASE:
7167     case TARGET_F_SETPIPE_SZ:
7168     case TARGET_F_GETPIPE_SZ:
7169     case TARGET_F_ADD_SEALS:
7170     case TARGET_F_GET_SEALS:
7171         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7172         break;
7173 
7174     default:
7175         ret = get_errno(safe_fcntl(fd, cmd, arg));
7176         break;
7177     }
7178     return ret;
7179 }
7180 
7181 #ifdef USE_UID16
7182 
7183 static inline int high2lowuid(int uid)
7184 {
7185     if (uid > 65535)
7186         return 65534;
7187     else
7188         return uid;
7189 }
7190 
7191 static inline int high2lowgid(int gid)
7192 {
7193     if (gid > 65535)
7194         return 65534;
7195     else
7196         return gid;
7197 }
7198 
7199 static inline int low2highuid(int uid)
7200 {
7201     if ((int16_t)uid == -1)
7202         return -1;
7203     else
7204         return uid;
7205 }
7206 
7207 static inline int low2highgid(int gid)
7208 {
7209     if ((int16_t)gid == -1)
7210         return -1;
7211     else
7212         return gid;
7213 }
7214 static inline int tswapid(int id)
7215 {
7216     return tswap16(id);
7217 }
7218 
7219 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7220 
7221 #else /* !USE_UID16 */
7222 static inline int high2lowuid(int uid)
7223 {
7224     return uid;
7225 }
7226 static inline int high2lowgid(int gid)
7227 {
7228     return gid;
7229 }
7230 static inline int low2highuid(int uid)
7231 {
7232     return uid;
7233 }
7234 static inline int low2highgid(int gid)
7235 {
7236     return gid;
7237 }
7238 static inline int tswapid(int id)
7239 {
7240     return tswap32(id);
7241 }
7242 
7243 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7244 
7245 #endif /* USE_UID16 */
7246 
7247 /* We must do direct syscalls for setting UID/GID, because we want to
7248  * implement the Linux system call semantics of "change only for this thread",
7249  * not the libc/POSIX semantics of "change for all threads in process".
7250  * (See http://ewontfix.com/17/ for more details.)
7251  * We use the 32-bit version of the syscalls if present; if it is not
7252  * then either the host architecture supports 32-bit UIDs natively with
7253  * the standard syscall, or the 16-bit UID is the best we can do.
7254  */
7255 #ifdef __NR_setuid32
7256 #define __NR_sys_setuid __NR_setuid32
7257 #else
7258 #define __NR_sys_setuid __NR_setuid
7259 #endif
7260 #ifdef __NR_setgid32
7261 #define __NR_sys_setgid __NR_setgid32
7262 #else
7263 #define __NR_sys_setgid __NR_setgid
7264 #endif
7265 #ifdef __NR_setresuid32
7266 #define __NR_sys_setresuid __NR_setresuid32
7267 #else
7268 #define __NR_sys_setresuid __NR_setresuid
7269 #endif
7270 #ifdef __NR_setresgid32
7271 #define __NR_sys_setresgid __NR_setresgid32
7272 #else
7273 #define __NR_sys_setresgid __NR_setresgid
7274 #endif
7275 
7276 _syscall1(int, sys_setuid, uid_t, uid)
7277 _syscall1(int, sys_setgid, gid_t, gid)
7278 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7279 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7280 
7281 void syscall_init(void)
7282 {
7283     IOCTLEntry *ie;
7284     const argtype *arg_type;
7285     int size;
7286 
7287     thunk_init(STRUCT_MAX);
7288 
7289 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7290 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7291 #include "syscall_types.h"
7292 #undef STRUCT
7293 #undef STRUCT_SPECIAL
7294 
7295     /* we patch the ioctl size if necessary. We rely on the fact that
7296        no ioctl has all the bits at '1' in the size field */
7297     ie = ioctl_entries;
7298     while (ie->target_cmd != 0) {
7299         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7300             TARGET_IOC_SIZEMASK) {
7301             arg_type = ie->arg_type;
7302             if (arg_type[0] != TYPE_PTR) {
7303                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7304                         ie->target_cmd);
7305                 exit(1);
7306             }
7307             arg_type++;
7308             size = thunk_type_size(arg_type, 0);
7309             ie->target_cmd = (ie->target_cmd &
7310                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7311                 (size << TARGET_IOC_SIZESHIFT);
7312         }
7313 
7314         /* automatic consistency check if same arch */
7315 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7316     (defined(__x86_64__) && defined(TARGET_X86_64))
7317         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7318             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7319                     ie->name, ie->target_cmd, ie->host_cmd);
7320         }
7321 #endif
7322         ie++;
7323     }
7324 }
7325 
7326 #ifdef TARGET_NR_truncate64
7327 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7328                                          abi_long arg2,
7329                                          abi_long arg3,
7330                                          abi_long arg4)
7331 {
7332     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7333         arg2 = arg3;
7334         arg3 = arg4;
7335     }
7336     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7337 }
7338 #endif
7339 
7340 #ifdef TARGET_NR_ftruncate64
7341 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7342                                           abi_long arg2,
7343                                           abi_long arg3,
7344                                           abi_long arg4)
7345 {
7346     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7347         arg2 = arg3;
7348         arg3 = arg4;
7349     }
7350     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7351 }
7352 #endif
7353 
7354 #if defined(TARGET_NR_timer_settime) || \
7355     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7356 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7357                                                  abi_ulong target_addr)
7358 {
7359     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7360                                 offsetof(struct target_itimerspec,
7361                                          it_interval)) ||
7362         target_to_host_timespec(&host_its->it_value, target_addr +
7363                                 offsetof(struct target_itimerspec,
7364                                          it_value))) {
7365         return -TARGET_EFAULT;
7366     }
7367 
7368     return 0;
7369 }
7370 #endif
7371 
7372 #if defined(TARGET_NR_timer_settime64) || \
7373     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7374 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7375                                                    abi_ulong target_addr)
7376 {
7377     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7378                                   offsetof(struct target__kernel_itimerspec,
7379                                            it_interval)) ||
7380         target_to_host_timespec64(&host_its->it_value, target_addr +
7381                                   offsetof(struct target__kernel_itimerspec,
7382                                            it_value))) {
7383         return -TARGET_EFAULT;
7384     }
7385 
7386     return 0;
7387 }
7388 #endif
7389 
7390 #if ((defined(TARGET_NR_timerfd_gettime) || \
7391       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7392       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7393 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7394                                                  struct itimerspec *host_its)
7395 {
7396     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7397                                                        it_interval),
7398                                 &host_its->it_interval) ||
7399         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7400                                                        it_value),
7401                                 &host_its->it_value)) {
7402         return -TARGET_EFAULT;
7403     }
7404     return 0;
7405 }
7406 #endif
7407 
7408 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7409       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7410       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7411 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7412                                                    struct itimerspec *host_its)
7413 {
7414     if (host_to_target_timespec64(target_addr +
7415                                   offsetof(struct target__kernel_itimerspec,
7416                                            it_interval),
7417                                   &host_its->it_interval) ||
7418         host_to_target_timespec64(target_addr +
7419                                   offsetof(struct target__kernel_itimerspec,
7420                                            it_value),
7421                                   &host_its->it_value)) {
7422         return -TARGET_EFAULT;
7423     }
7424     return 0;
7425 }
7426 #endif
7427 
7428 #if defined(TARGET_NR_adjtimex) || \
7429     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7430 static inline abi_long target_to_host_timex(struct timex *host_tx,
7431                                             abi_long target_addr)
7432 {
7433     struct target_timex *target_tx;
7434 
7435     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7436         return -TARGET_EFAULT;
7437     }
7438 
7439     __get_user(host_tx->modes, &target_tx->modes);
7440     __get_user(host_tx->offset, &target_tx->offset);
7441     __get_user(host_tx->freq, &target_tx->freq);
7442     __get_user(host_tx->maxerror, &target_tx->maxerror);
7443     __get_user(host_tx->esterror, &target_tx->esterror);
7444     __get_user(host_tx->status, &target_tx->status);
7445     __get_user(host_tx->constant, &target_tx->constant);
7446     __get_user(host_tx->precision, &target_tx->precision);
7447     __get_user(host_tx->tolerance, &target_tx->tolerance);
7448     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7449     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7450     __get_user(host_tx->tick, &target_tx->tick);
7451     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7452     __get_user(host_tx->jitter, &target_tx->jitter);
7453     __get_user(host_tx->shift, &target_tx->shift);
7454     __get_user(host_tx->stabil, &target_tx->stabil);
7455     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7456     __get_user(host_tx->calcnt, &target_tx->calcnt);
7457     __get_user(host_tx->errcnt, &target_tx->errcnt);
7458     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7459     __get_user(host_tx->tai, &target_tx->tai);
7460 
7461     unlock_user_struct(target_tx, target_addr, 0);
7462     return 0;
7463 }
7464 
7465 static inline abi_long host_to_target_timex(abi_long target_addr,
7466                                             struct timex *host_tx)
7467 {
7468     struct target_timex *target_tx;
7469 
7470     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7471         return -TARGET_EFAULT;
7472     }
7473 
7474     __put_user(host_tx->modes, &target_tx->modes);
7475     __put_user(host_tx->offset, &target_tx->offset);
7476     __put_user(host_tx->freq, &target_tx->freq);
7477     __put_user(host_tx->maxerror, &target_tx->maxerror);
7478     __put_user(host_tx->esterror, &target_tx->esterror);
7479     __put_user(host_tx->status, &target_tx->status);
7480     __put_user(host_tx->constant, &target_tx->constant);
7481     __put_user(host_tx->precision, &target_tx->precision);
7482     __put_user(host_tx->tolerance, &target_tx->tolerance);
7483     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7484     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7485     __put_user(host_tx->tick, &target_tx->tick);
7486     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7487     __put_user(host_tx->jitter, &target_tx->jitter);
7488     __put_user(host_tx->shift, &target_tx->shift);
7489     __put_user(host_tx->stabil, &target_tx->stabil);
7490     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7491     __put_user(host_tx->calcnt, &target_tx->calcnt);
7492     __put_user(host_tx->errcnt, &target_tx->errcnt);
7493     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7494     __put_user(host_tx->tai, &target_tx->tai);
7495 
7496     unlock_user_struct(target_tx, target_addr, 1);
7497     return 0;
7498 }
7499 #endif
7500 
7501 
7502 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7503 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7504                                               abi_long target_addr)
7505 {
7506     struct target__kernel_timex *target_tx;
7507 
7508     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7509                                  offsetof(struct target__kernel_timex,
7510                                           time))) {
7511         return -TARGET_EFAULT;
7512     }
7513 
7514     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7515         return -TARGET_EFAULT;
7516     }
7517 
7518     __get_user(host_tx->modes, &target_tx->modes);
7519     __get_user(host_tx->offset, &target_tx->offset);
7520     __get_user(host_tx->freq, &target_tx->freq);
7521     __get_user(host_tx->maxerror, &target_tx->maxerror);
7522     __get_user(host_tx->esterror, &target_tx->esterror);
7523     __get_user(host_tx->status, &target_tx->status);
7524     __get_user(host_tx->constant, &target_tx->constant);
7525     __get_user(host_tx->precision, &target_tx->precision);
7526     __get_user(host_tx->tolerance, &target_tx->tolerance);
7527     __get_user(host_tx->tick, &target_tx->tick);
7528     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7529     __get_user(host_tx->jitter, &target_tx->jitter);
7530     __get_user(host_tx->shift, &target_tx->shift);
7531     __get_user(host_tx->stabil, &target_tx->stabil);
7532     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7533     __get_user(host_tx->calcnt, &target_tx->calcnt);
7534     __get_user(host_tx->errcnt, &target_tx->errcnt);
7535     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7536     __get_user(host_tx->tai, &target_tx->tai);
7537 
7538     unlock_user_struct(target_tx, target_addr, 0);
7539     return 0;
7540 }
7541 
7542 static inline abi_long host_to_target_timex64(abi_long target_addr,
7543                                               struct timex *host_tx)
7544 {
7545     struct target__kernel_timex *target_tx;
7546 
7547    if (copy_to_user_timeval64(target_addr +
7548                               offsetof(struct target__kernel_timex, time),
7549                               &host_tx->time)) {
7550         return -TARGET_EFAULT;
7551     }
7552 
7553     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7554         return -TARGET_EFAULT;
7555     }
7556 
7557     __put_user(host_tx->modes, &target_tx->modes);
7558     __put_user(host_tx->offset, &target_tx->offset);
7559     __put_user(host_tx->freq, &target_tx->freq);
7560     __put_user(host_tx->maxerror, &target_tx->maxerror);
7561     __put_user(host_tx->esterror, &target_tx->esterror);
7562     __put_user(host_tx->status, &target_tx->status);
7563     __put_user(host_tx->constant, &target_tx->constant);
7564     __put_user(host_tx->precision, &target_tx->precision);
7565     __put_user(host_tx->tolerance, &target_tx->tolerance);
7566     __put_user(host_tx->tick, &target_tx->tick);
7567     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7568     __put_user(host_tx->jitter, &target_tx->jitter);
7569     __put_user(host_tx->shift, &target_tx->shift);
7570     __put_user(host_tx->stabil, &target_tx->stabil);
7571     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7572     __put_user(host_tx->calcnt, &target_tx->calcnt);
7573     __put_user(host_tx->errcnt, &target_tx->errcnt);
7574     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7575     __put_user(host_tx->tai, &target_tx->tai);
7576 
7577     unlock_user_struct(target_tx, target_addr, 1);
7578     return 0;
7579 }
7580 #endif
7581 
7582 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7583 #define sigev_notify_thread_id _sigev_un._tid
7584 #endif
7585 
7586 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7587                                                abi_ulong target_addr)
7588 {
7589     struct target_sigevent *target_sevp;
7590 
7591     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7592         return -TARGET_EFAULT;
7593     }
7594 
7595     /* This union is awkward on 64 bit systems because it has a 32 bit
7596      * integer and a pointer in it; we follow the conversion approach
7597      * used for handling sigval types in signal.c so the guest should get
7598      * the correct value back even if we did a 64 bit byteswap and it's
7599      * using the 32 bit integer.
7600      */
7601     host_sevp->sigev_value.sival_ptr =
7602         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7603     host_sevp->sigev_signo =
7604         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7605     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7606     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7607 
7608     unlock_user_struct(target_sevp, target_addr, 1);
7609     return 0;
7610 }
7611 
7612 #if defined(TARGET_NR_mlockall)
7613 static inline int target_to_host_mlockall_arg(int arg)
7614 {
7615     int result = 0;
7616 
7617     if (arg & TARGET_MCL_CURRENT) {
7618         result |= MCL_CURRENT;
7619     }
7620     if (arg & TARGET_MCL_FUTURE) {
7621         result |= MCL_FUTURE;
7622     }
7623 #ifdef MCL_ONFAULT
7624     if (arg & TARGET_MCL_ONFAULT) {
7625         result |= MCL_ONFAULT;
7626     }
7627 #endif
7628 
7629     return result;
7630 }
7631 #endif
7632 
7633 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7634      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7635      defined(TARGET_NR_newfstatat))
7636 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7637                                              abi_ulong target_addr,
7638                                              struct stat *host_st)
7639 {
7640 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7641     if (cpu_env->eabi) {
7642         struct target_eabi_stat64 *target_st;
7643 
7644         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7645             return -TARGET_EFAULT;
7646         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7647         __put_user(host_st->st_dev, &target_st->st_dev);
7648         __put_user(host_st->st_ino, &target_st->st_ino);
7649 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7650         __put_user(host_st->st_ino, &target_st->__st_ino);
7651 #endif
7652         __put_user(host_st->st_mode, &target_st->st_mode);
7653         __put_user(host_st->st_nlink, &target_st->st_nlink);
7654         __put_user(host_st->st_uid, &target_st->st_uid);
7655         __put_user(host_st->st_gid, &target_st->st_gid);
7656         __put_user(host_st->st_rdev, &target_st->st_rdev);
7657         __put_user(host_st->st_size, &target_st->st_size);
7658         __put_user(host_st->st_blksize, &target_st->st_blksize);
7659         __put_user(host_st->st_blocks, &target_st->st_blocks);
7660         __put_user(host_st->st_atime, &target_st->target_st_atime);
7661         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7662         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7663 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7664         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7665         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7666         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7667 #endif
7668         unlock_user_struct(target_st, target_addr, 1);
7669     } else
7670 #endif
7671     {
7672 #if defined(TARGET_HAS_STRUCT_STAT64)
7673         struct target_stat64 *target_st;
7674 #else
7675         struct target_stat *target_st;
7676 #endif
7677 
7678         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7679             return -TARGET_EFAULT;
7680         memset(target_st, 0, sizeof(*target_st));
7681         __put_user(host_st->st_dev, &target_st->st_dev);
7682         __put_user(host_st->st_ino, &target_st->st_ino);
7683 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7684         __put_user(host_st->st_ino, &target_st->__st_ino);
7685 #endif
7686         __put_user(host_st->st_mode, &target_st->st_mode);
7687         __put_user(host_st->st_nlink, &target_st->st_nlink);
7688         __put_user(host_st->st_uid, &target_st->st_uid);
7689         __put_user(host_st->st_gid, &target_st->st_gid);
7690         __put_user(host_st->st_rdev, &target_st->st_rdev);
7691         /* XXX: better use of kernel struct */
7692         __put_user(host_st->st_size, &target_st->st_size);
7693         __put_user(host_st->st_blksize, &target_st->st_blksize);
7694         __put_user(host_st->st_blocks, &target_st->st_blocks);
7695         __put_user(host_st->st_atime, &target_st->target_st_atime);
7696         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7697         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7698 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7699         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7700         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7701         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7702 #endif
7703         unlock_user_struct(target_st, target_addr, 1);
7704     }
7705 
7706     return 0;
7707 }
7708 #endif
7709 
7710 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7711 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7712                                             abi_ulong target_addr)
7713 {
7714     struct target_statx *target_stx;
7715 
7716     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7717         return -TARGET_EFAULT;
7718     }
7719     memset(target_stx, 0, sizeof(*target_stx));
7720 
7721     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7722     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7723     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7724     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7725     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7726     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7727     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7728     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7729     __put_user(host_stx->stx_size, &target_stx->stx_size);
7730     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7731     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7732     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7733     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7734     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7735     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7736     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7737     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7738     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7739     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7740     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7741     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7742     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7743     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7744 
7745     unlock_user_struct(target_stx, target_addr, 1);
7746 
7747     return 0;
7748 }
7749 #endif
7750 
7751 static int do_sys_futex(int *uaddr, int op, int val,
7752                          const struct timespec *timeout, int *uaddr2,
7753                          int val3)
7754 {
7755 #if HOST_LONG_BITS == 64
7756 #if defined(__NR_futex)
7757     /* always a 64-bit time_t, it doesn't define _time64 version  */
7758     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7759 
7760 #endif
7761 #else /* HOST_LONG_BITS == 64 */
7762 #if defined(__NR_futex_time64)
7763     if (sizeof(timeout->tv_sec) == 8) {
7764         /* _time64 function on 32bit arch */
7765         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7766     }
7767 #endif
7768 #if defined(__NR_futex)
7769     /* old function on 32bit arch */
7770     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7771 #endif
7772 #endif /* HOST_LONG_BITS == 64 */
7773     g_assert_not_reached();
7774 }
7775 
7776 static int do_safe_futex(int *uaddr, int op, int val,
7777                          const struct timespec *timeout, int *uaddr2,
7778                          int val3)
7779 {
7780 #if HOST_LONG_BITS == 64
7781 #if defined(__NR_futex)
7782     /* always a 64-bit time_t, it doesn't define _time64 version  */
7783     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7784 #endif
7785 #else /* HOST_LONG_BITS == 64 */
7786 #if defined(__NR_futex_time64)
7787     if (sizeof(timeout->tv_sec) == 8) {
7788         /* _time64 function on 32bit arch */
7789         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7790                                            val3));
7791     }
7792 #endif
7793 #if defined(__NR_futex)
7794     /* old function on 32bit arch */
7795     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7796 #endif
7797 #endif /* HOST_LONG_BITS == 64 */
7798     return -TARGET_ENOSYS;
7799 }
7800 
7801 /* ??? Using host futex calls even when target atomic operations
7802    are not really atomic probably breaks things.  However implementing
7803    futexes locally would make futexes shared between multiple processes
7804    tricky.  However they're probably useless because guest atomic
7805    operations won't work either.  */
7806 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7807 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7808                     int op, int val, target_ulong timeout,
7809                     target_ulong uaddr2, int val3)
7810 {
7811     struct timespec ts, *pts = NULL;
7812     void *haddr2 = NULL;
7813     int base_op;
7814 
7815     /* We assume FUTEX_* constants are the same on both host and target. */
7816 #ifdef FUTEX_CMD_MASK
7817     base_op = op & FUTEX_CMD_MASK;
7818 #else
7819     base_op = op;
7820 #endif
7821     switch (base_op) {
7822     case FUTEX_WAIT:
7823     case FUTEX_WAIT_BITSET:
7824         val = tswap32(val);
7825         break;
7826     case FUTEX_WAIT_REQUEUE_PI:
7827         val = tswap32(val);
7828         haddr2 = g2h(cpu, uaddr2);
7829         break;
7830     case FUTEX_LOCK_PI:
7831     case FUTEX_LOCK_PI2:
7832         break;
7833     case FUTEX_WAKE:
7834     case FUTEX_WAKE_BITSET:
7835     case FUTEX_TRYLOCK_PI:
7836     case FUTEX_UNLOCK_PI:
7837         timeout = 0;
7838         break;
7839     case FUTEX_FD:
7840         val = target_to_host_signal(val);
7841         timeout = 0;
7842         break;
7843     case FUTEX_CMP_REQUEUE:
7844     case FUTEX_CMP_REQUEUE_PI:
7845         val3 = tswap32(val3);
7846         /* fall through */
7847     case FUTEX_REQUEUE:
7848     case FUTEX_WAKE_OP:
7849         /*
7850          * For these, the 4th argument is not TIMEOUT, but VAL2.
7851          * But the prototype of do_safe_futex takes a pointer, so
7852          * insert casts to satisfy the compiler.  We do not need
7853          * to tswap VAL2 since it's not compared to guest memory.
7854           */
7855         pts = (struct timespec *)(uintptr_t)timeout;
7856         timeout = 0;
7857         haddr2 = g2h(cpu, uaddr2);
7858         break;
7859     default:
7860         return -TARGET_ENOSYS;
7861     }
7862     if (timeout) {
7863         pts = &ts;
7864         if (time64
7865             ? target_to_host_timespec64(pts, timeout)
7866             : target_to_host_timespec(pts, timeout)) {
7867             return -TARGET_EFAULT;
7868         }
7869     }
7870     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7871 }
7872 #endif
7873 
7874 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7875 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7876                                      abi_long handle, abi_long mount_id,
7877                                      abi_long flags)
7878 {
7879     struct file_handle *target_fh;
7880     struct file_handle *fh;
7881     int mid = 0;
7882     abi_long ret;
7883     char *name;
7884     unsigned int size, total_size;
7885 
7886     if (get_user_s32(size, handle)) {
7887         return -TARGET_EFAULT;
7888     }
7889 
7890     name = lock_user_string(pathname);
7891     if (!name) {
7892         return -TARGET_EFAULT;
7893     }
7894 
7895     total_size = sizeof(struct file_handle) + size;
7896     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7897     if (!target_fh) {
7898         unlock_user(name, pathname, 0);
7899         return -TARGET_EFAULT;
7900     }
7901 
7902     fh = g_malloc0(total_size);
7903     fh->handle_bytes = size;
7904 
7905     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7906     unlock_user(name, pathname, 0);
7907 
7908     /* man name_to_handle_at(2):
7909      * Other than the use of the handle_bytes field, the caller should treat
7910      * the file_handle structure as an opaque data type
7911      */
7912 
7913     memcpy(target_fh, fh, total_size);
7914     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7915     target_fh->handle_type = tswap32(fh->handle_type);
7916     g_free(fh);
7917     unlock_user(target_fh, handle, total_size);
7918 
7919     if (put_user_s32(mid, mount_id)) {
7920         return -TARGET_EFAULT;
7921     }
7922 
7923     return ret;
7924 
7925 }
7926 #endif
7927 
7928 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7929 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7930                                      abi_long flags)
7931 {
7932     struct file_handle *target_fh;
7933     struct file_handle *fh;
7934     unsigned int size, total_size;
7935     abi_long ret;
7936 
7937     if (get_user_s32(size, handle)) {
7938         return -TARGET_EFAULT;
7939     }
7940 
7941     total_size = sizeof(struct file_handle) + size;
7942     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7943     if (!target_fh) {
7944         return -TARGET_EFAULT;
7945     }
7946 
7947     fh = g_memdup(target_fh, total_size);
7948     fh->handle_bytes = size;
7949     fh->handle_type = tswap32(target_fh->handle_type);
7950 
7951     ret = get_errno(open_by_handle_at(mount_fd, fh,
7952                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7953 
7954     g_free(fh);
7955 
7956     unlock_user(target_fh, handle, total_size);
7957 
7958     return ret;
7959 }
7960 #endif
7961 
7962 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7963 
7964 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7965 {
7966     int host_flags;
7967     target_sigset_t *target_mask;
7968     sigset_t host_mask;
7969     abi_long ret;
7970 
7971     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7972         return -TARGET_EINVAL;
7973     }
7974     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7975         return -TARGET_EFAULT;
7976     }
7977 
7978     target_to_host_sigset(&host_mask, target_mask);
7979 
7980     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7981 
7982     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7983     if (ret >= 0) {
7984         fd_trans_register(ret, &target_signalfd_trans);
7985     }
7986 
7987     unlock_user_struct(target_mask, mask, 0);
7988 
7989     return ret;
7990 }
7991 #endif
7992 
7993 /* Map host to target signal numbers for the wait family of syscalls.
7994    Assume all other status bits are the same.  */
7995 int host_to_target_waitstatus(int status)
7996 {
7997     if (WIFSIGNALED(status)) {
7998         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7999     }
8000     if (WIFSTOPPED(status)) {
8001         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8002                | (status & 0xff);
8003     }
8004     return status;
8005 }
8006 
8007 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8008 {
8009     CPUState *cpu = env_cpu(cpu_env);
8010     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8011     int i;
8012 
8013     for (i = 0; i < bprm->argc; i++) {
8014         size_t len = strlen(bprm->argv[i]) + 1;
8015 
8016         if (write(fd, bprm->argv[i], len) != len) {
8017             return -1;
8018         }
8019     }
8020 
8021     return 0;
8022 }
8023 
8024 static int open_self_maps(CPUArchState *cpu_env, int fd)
8025 {
8026     CPUState *cpu = env_cpu(cpu_env);
8027     TaskState *ts = cpu->opaque;
8028     GSList *map_info = read_self_maps();
8029     GSList *s;
8030     int count;
8031 
8032     for (s = map_info; s; s = g_slist_next(s)) {
8033         MapInfo *e = (MapInfo *) s->data;
8034 
8035         if (h2g_valid(e->start)) {
8036             unsigned long min = e->start;
8037             unsigned long max = e->end;
8038             int flags = page_get_flags(h2g(min));
8039             const char *path;
8040 
8041             max = h2g_valid(max - 1) ?
8042                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8043 
8044             if (page_check_range(h2g(min), max - min, flags) == -1) {
8045                 continue;
8046             }
8047 
8048 #ifdef TARGET_HPPA
8049             if (h2g(max) == ts->info->stack_limit) {
8050 #else
8051             if (h2g(min) == ts->info->stack_limit) {
8052 #endif
8053                 path = "[stack]";
8054             } else {
8055                 path = e->path;
8056             }
8057 
8058             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8059                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8060                             h2g(min), h2g(max - 1) + 1,
8061                             (flags & PAGE_READ) ? 'r' : '-',
8062                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8063                             (flags & PAGE_EXEC) ? 'x' : '-',
8064                             e->is_priv ? 'p' : 's',
8065                             (uint64_t) e->offset, e->dev, e->inode);
8066             if (path) {
8067                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8068             } else {
8069                 dprintf(fd, "\n");
8070             }
8071         }
8072     }
8073 
8074     free_self_maps(map_info);
8075 
8076 #ifdef TARGET_VSYSCALL_PAGE
8077     /*
8078      * We only support execution from the vsyscall page.
8079      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8080      */
8081     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8082                     " --xp 00000000 00:00 0",
8083                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8084     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8085 #endif
8086 
8087     return 0;
8088 }
8089 
8090 static int open_self_stat(CPUArchState *cpu_env, int fd)
8091 {
8092     CPUState *cpu = env_cpu(cpu_env);
8093     TaskState *ts = cpu->opaque;
8094     g_autoptr(GString) buf = g_string_new(NULL);
8095     int i;
8096 
8097     for (i = 0; i < 44; i++) {
8098         if (i == 0) {
8099             /* pid */
8100             g_string_printf(buf, FMT_pid " ", getpid());
8101         } else if (i == 1) {
8102             /* app name */
8103             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8104             bin = bin ? bin + 1 : ts->bprm->argv[0];
8105             g_string_printf(buf, "(%.15s) ", bin);
8106         } else if (i == 3) {
8107             /* ppid */
8108             g_string_printf(buf, FMT_pid " ", getppid());
8109         } else if (i == 21) {
8110             /* starttime */
8111             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8112         } else if (i == 27) {
8113             /* stack bottom */
8114             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8115         } else {
8116             /* for the rest, there is MasterCard */
8117             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8118         }
8119 
8120         if (write(fd, buf->str, buf->len) != buf->len) {
8121             return -1;
8122         }
8123     }
8124 
8125     return 0;
8126 }
8127 
8128 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8129 {
8130     CPUState *cpu = env_cpu(cpu_env);
8131     TaskState *ts = cpu->opaque;
8132     abi_ulong auxv = ts->info->saved_auxv;
8133     abi_ulong len = ts->info->auxv_len;
8134     char *ptr;
8135 
8136     /*
8137      * Auxiliary vector is stored in target process stack.
8138      * read in whole auxv vector and copy it to file
8139      */
8140     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8141     if (ptr != NULL) {
8142         while (len > 0) {
8143             ssize_t r;
8144             r = write(fd, ptr, len);
8145             if (r <= 0) {
8146                 break;
8147             }
8148             len -= r;
8149             ptr += r;
8150         }
8151         lseek(fd, 0, SEEK_SET);
8152         unlock_user(ptr, auxv, len);
8153     }
8154 
8155     return 0;
8156 }
8157 
8158 static int is_proc_myself(const char *filename, const char *entry)
8159 {
8160     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8161         filename += strlen("/proc/");
8162         if (!strncmp(filename, "self/", strlen("self/"))) {
8163             filename += strlen("self/");
8164         } else if (*filename >= '1' && *filename <= '9') {
8165             char myself[80];
8166             snprintf(myself, sizeof(myself), "%d/", getpid());
8167             if (!strncmp(filename, myself, strlen(myself))) {
8168                 filename += strlen(myself);
8169             } else {
8170                 return 0;
8171             }
8172         } else {
8173             return 0;
8174         }
8175         if (!strcmp(filename, entry)) {
8176             return 1;
8177         }
8178     }
8179     return 0;
8180 }
8181 
8182 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8183                       const char *fmt, int code)
8184 {
8185     if (logfile) {
8186         CPUState *cs = env_cpu(env);
8187 
8188         fprintf(logfile, fmt, code);
8189         fprintf(logfile, "Failing executable: %s\n", exec_path);
8190         cpu_dump_state(cs, logfile, 0);
8191         open_self_maps(env, fileno(logfile));
8192     }
8193 }
8194 
8195 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8196 {
8197     /* dump to console */
8198     excp_dump_file(stderr, env, fmt, code);
8199 
8200     /* dump to log file */
8201     if (qemu_log_separate()) {
8202         FILE *logfile = qemu_log_trylock();
8203 
8204         excp_dump_file(logfile, env, fmt, code);
8205         qemu_log_unlock(logfile);
8206     }
8207 }
8208 
8209 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8210     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8211 static int is_proc(const char *filename, const char *entry)
8212 {
8213     return strcmp(filename, entry) == 0;
8214 }
8215 #endif
8216 
8217 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8218 static int open_net_route(CPUArchState *cpu_env, int fd)
8219 {
8220     FILE *fp;
8221     char *line = NULL;
8222     size_t len = 0;
8223     ssize_t read;
8224 
8225     fp = fopen("/proc/net/route", "r");
8226     if (fp == NULL) {
8227         return -1;
8228     }
8229 
8230     /* read header */
8231 
8232     read = getline(&line, &len, fp);
8233     dprintf(fd, "%s", line);
8234 
8235     /* read routes */
8236 
8237     while ((read = getline(&line, &len, fp)) != -1) {
8238         char iface[16];
8239         uint32_t dest, gw, mask;
8240         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8241         int fields;
8242 
8243         fields = sscanf(line,
8244                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8245                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8246                         &mask, &mtu, &window, &irtt);
8247         if (fields != 11) {
8248             continue;
8249         }
8250         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8251                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8252                 metric, tswap32(mask), mtu, window, irtt);
8253     }
8254 
8255     free(line);
8256     fclose(fp);
8257 
8258     return 0;
8259 }
8260 #endif
8261 
8262 #if defined(TARGET_SPARC)
8263 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8264 {
8265     dprintf(fd, "type\t\t: sun4u\n");
8266     return 0;
8267 }
8268 #endif
8269 
8270 #if defined(TARGET_HPPA)
8271 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8272 {
8273     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8274     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8275     dprintf(fd, "capabilities\t: os32\n");
8276     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8277     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8278     return 0;
8279 }
8280 #endif
8281 
8282 #if defined(TARGET_M68K)
8283 static int open_hardware(CPUArchState *cpu_env, int fd)
8284 {
8285     dprintf(fd, "Model:\t\tqemu-m68k\n");
8286     return 0;
8287 }
8288 #endif
8289 
8290 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8291 {
8292     struct fake_open {
8293         const char *filename;
8294         int (*fill)(CPUArchState *cpu_env, int fd);
8295         int (*cmp)(const char *s1, const char *s2);
8296     };
8297     const struct fake_open *fake_open;
8298     static const struct fake_open fakes[] = {
8299         { "maps", open_self_maps, is_proc_myself },
8300         { "stat", open_self_stat, is_proc_myself },
8301         { "auxv", open_self_auxv, is_proc_myself },
8302         { "cmdline", open_self_cmdline, is_proc_myself },
8303 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8304         { "/proc/net/route", open_net_route, is_proc },
8305 #endif
8306 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8307         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8308 #endif
8309 #if defined(TARGET_M68K)
8310         { "/proc/hardware", open_hardware, is_proc },
8311 #endif
8312         { NULL, NULL, NULL }
8313     };
8314 
8315     if (is_proc_myself(pathname, "exe")) {
8316         return safe_openat(dirfd, exec_path, flags, mode);
8317     }
8318 
8319     for (fake_open = fakes; fake_open->filename; fake_open++) {
8320         if (fake_open->cmp(pathname, fake_open->filename)) {
8321             break;
8322         }
8323     }
8324 
8325     if (fake_open->filename) {
8326         const char *tmpdir;
8327         char filename[PATH_MAX];
8328         int fd, r;
8329 
8330         fd = memfd_create("qemu-open", 0);
8331         if (fd < 0) {
8332             if (errno != ENOSYS) {
8333                 return fd;
8334             }
8335             /* create temporary file to map stat to */
8336             tmpdir = getenv("TMPDIR");
8337             if (!tmpdir)
8338                 tmpdir = "/tmp";
8339             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8340             fd = mkstemp(filename);
8341             if (fd < 0) {
8342                 return fd;
8343             }
8344             unlink(filename);
8345         }
8346 
8347         if ((r = fake_open->fill(cpu_env, fd))) {
8348             int e = errno;
8349             close(fd);
8350             errno = e;
8351             return r;
8352         }
8353         lseek(fd, 0, SEEK_SET);
8354 
8355         return fd;
8356     }
8357 
8358     return safe_openat(dirfd, path(pathname), flags, mode);
8359 }
8360 
8361 static int do_execveat(CPUArchState *cpu_env, int dirfd,
8362                        abi_long pathname, abi_long guest_argp,
8363                        abi_long guest_envp, int flags)
8364 {
8365     int ret;
8366     char **argp, **envp;
8367     int argc, envc;
8368     abi_ulong gp;
8369     abi_ulong addr;
8370     char **q;
8371     void *p;
8372 
8373     argc = 0;
8374 
8375     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8376         if (get_user_ual(addr, gp)) {
8377             return -TARGET_EFAULT;
8378         }
8379         if (!addr) {
8380             break;
8381         }
8382         argc++;
8383     }
8384     envc = 0;
8385     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8386         if (get_user_ual(addr, gp)) {
8387             return -TARGET_EFAULT;
8388         }
8389         if (!addr) {
8390             break;
8391         }
8392         envc++;
8393     }
8394 
8395     argp = g_new0(char *, argc + 1);
8396     envp = g_new0(char *, envc + 1);
8397 
8398     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8399         if (get_user_ual(addr, gp)) {
8400             goto execve_efault;
8401         }
8402         if (!addr) {
8403             break;
8404         }
8405         *q = lock_user_string(addr);
8406         if (!*q) {
8407             goto execve_efault;
8408         }
8409     }
8410     *q = NULL;
8411 
8412     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8413         if (get_user_ual(addr, gp)) {
8414             goto execve_efault;
8415         }
8416         if (!addr) {
8417             break;
8418         }
8419         *q = lock_user_string(addr);
8420         if (!*q) {
8421             goto execve_efault;
8422         }
8423     }
8424     *q = NULL;
8425 
8426     /*
8427      * Although execve() is not an interruptible syscall it is
8428      * a special case where we must use the safe_syscall wrapper:
8429      * if we allow a signal to happen before we make the host
8430      * syscall then we will 'lose' it, because at the point of
8431      * execve the process leaves QEMU's control. So we use the
8432      * safe syscall wrapper to ensure that we either take the
8433      * signal as a guest signal, or else it does not happen
8434      * before the execve completes and makes it the other
8435      * program's problem.
8436      */
8437     p = lock_user_string(pathname);
8438     if (!p) {
8439         goto execve_efault;
8440     }
8441 
8442     if (is_proc_myself(p, "exe")) {
8443         ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags));
8444     } else {
8445         ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
8446     }
8447 
8448     unlock_user(p, pathname, 0);
8449 
8450     goto execve_end;
8451 
8452 execve_efault:
8453     ret = -TARGET_EFAULT;
8454 
8455 execve_end:
8456     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8457         if (get_user_ual(addr, gp) || !addr) {
8458             break;
8459         }
8460         unlock_user(*q, addr, 0);
8461     }
8462     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8463         if (get_user_ual(addr, gp) || !addr) {
8464             break;
8465         }
8466         unlock_user(*q, addr, 0);
8467     }
8468 
8469     g_free(argp);
8470     g_free(envp);
8471     return ret;
8472 }
8473 
8474 #define TIMER_MAGIC 0x0caf0000
8475 #define TIMER_MAGIC_MASK 0xffff0000
8476 
8477 /* Convert QEMU provided timer ID back to internal 16bit index format */
8478 static target_timer_t get_timer_id(abi_long arg)
8479 {
8480     target_timer_t timerid = arg;
8481 
8482     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8483         return -TARGET_EINVAL;
8484     }
8485 
8486     timerid &= 0xffff;
8487 
8488     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8489         return -TARGET_EINVAL;
8490     }
8491 
8492     return timerid;
8493 }
8494 
8495 static int target_to_host_cpu_mask(unsigned long *host_mask,
8496                                    size_t host_size,
8497                                    abi_ulong target_addr,
8498                                    size_t target_size)
8499 {
8500     unsigned target_bits = sizeof(abi_ulong) * 8;
8501     unsigned host_bits = sizeof(*host_mask) * 8;
8502     abi_ulong *target_mask;
8503     unsigned i, j;
8504 
8505     assert(host_size >= target_size);
8506 
8507     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8508     if (!target_mask) {
8509         return -TARGET_EFAULT;
8510     }
8511     memset(host_mask, 0, host_size);
8512 
8513     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8514         unsigned bit = i * target_bits;
8515         abi_ulong val;
8516 
8517         __get_user(val, &target_mask[i]);
8518         for (j = 0; j < target_bits; j++, bit++) {
8519             if (val & (1UL << j)) {
8520                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8521             }
8522         }
8523     }
8524 
8525     unlock_user(target_mask, target_addr, 0);
8526     return 0;
8527 }
8528 
8529 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8530                                    size_t host_size,
8531                                    abi_ulong target_addr,
8532                                    size_t target_size)
8533 {
8534     unsigned target_bits = sizeof(abi_ulong) * 8;
8535     unsigned host_bits = sizeof(*host_mask) * 8;
8536     abi_ulong *target_mask;
8537     unsigned i, j;
8538 
8539     assert(host_size >= target_size);
8540 
8541     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8542     if (!target_mask) {
8543         return -TARGET_EFAULT;
8544     }
8545 
8546     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8547         unsigned bit = i * target_bits;
8548         abi_ulong val = 0;
8549 
8550         for (j = 0; j < target_bits; j++, bit++) {
8551             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8552                 val |= 1UL << j;
8553             }
8554         }
8555         __put_user(val, &target_mask[i]);
8556     }
8557 
8558     unlock_user(target_mask, target_addr, target_size);
8559     return 0;
8560 }
8561 
8562 #ifdef TARGET_NR_getdents
8563 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8564 {
8565     g_autofree void *hdirp = NULL;
8566     void *tdirp;
8567     int hlen, hoff, toff;
8568     int hreclen, treclen;
8569     off64_t prev_diroff = 0;
8570 
8571     hdirp = g_try_malloc(count);
8572     if (!hdirp) {
8573         return -TARGET_ENOMEM;
8574     }
8575 
8576 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8577     hlen = sys_getdents(dirfd, hdirp, count);
8578 #else
8579     hlen = sys_getdents64(dirfd, hdirp, count);
8580 #endif
8581 
8582     hlen = get_errno(hlen);
8583     if (is_error(hlen)) {
8584         return hlen;
8585     }
8586 
8587     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8588     if (!tdirp) {
8589         return -TARGET_EFAULT;
8590     }
8591 
8592     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8593 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8594         struct linux_dirent *hde = hdirp + hoff;
8595 #else
8596         struct linux_dirent64 *hde = hdirp + hoff;
8597 #endif
8598         struct target_dirent *tde = tdirp + toff;
8599         int namelen;
8600         uint8_t type;
8601 
8602         namelen = strlen(hde->d_name);
8603         hreclen = hde->d_reclen;
8604         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8605         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8606 
8607         if (toff + treclen > count) {
8608             /*
8609              * If the host struct is smaller than the target struct, or
8610              * requires less alignment and thus packs into less space,
8611              * then the host can return more entries than we can pass
8612              * on to the guest.
8613              */
8614             if (toff == 0) {
8615                 toff = -TARGET_EINVAL; /* result buffer is too small */
8616                 break;
8617             }
8618             /*
8619              * Return what we have, resetting the file pointer to the
8620              * location of the first record not returned.
8621              */
8622             lseek64(dirfd, prev_diroff, SEEK_SET);
8623             break;
8624         }
8625 
8626         prev_diroff = hde->d_off;
8627         tde->d_ino = tswapal(hde->d_ino);
8628         tde->d_off = tswapal(hde->d_off);
8629         tde->d_reclen = tswap16(treclen);
8630         memcpy(tde->d_name, hde->d_name, namelen + 1);
8631 
8632         /*
8633          * The getdents type is in what was formerly a padding byte at the
8634          * end of the structure.
8635          */
8636 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8637         type = *((uint8_t *)hde + hreclen - 1);
8638 #else
8639         type = hde->d_type;
8640 #endif
8641         *((uint8_t *)tde + treclen - 1) = type;
8642     }
8643 
8644     unlock_user(tdirp, arg2, toff);
8645     return toff;
8646 }
8647 #endif /* TARGET_NR_getdents */
8648 
8649 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8650 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8651 {
8652     g_autofree void *hdirp = NULL;
8653     void *tdirp;
8654     int hlen, hoff, toff;
8655     int hreclen, treclen;
8656     off64_t prev_diroff = 0;
8657 
8658     hdirp = g_try_malloc(count);
8659     if (!hdirp) {
8660         return -TARGET_ENOMEM;
8661     }
8662 
8663     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8664     if (is_error(hlen)) {
8665         return hlen;
8666     }
8667 
8668     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8669     if (!tdirp) {
8670         return -TARGET_EFAULT;
8671     }
8672 
8673     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8674         struct linux_dirent64 *hde = hdirp + hoff;
8675         struct target_dirent64 *tde = tdirp + toff;
8676         int namelen;
8677 
8678         namelen = strlen(hde->d_name) + 1;
8679         hreclen = hde->d_reclen;
8680         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8681         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8682 
8683         if (toff + treclen > count) {
8684             /*
8685              * If the host struct is smaller than the target struct, or
8686              * requires less alignment and thus packs into less space,
8687              * then the host can return more entries than we can pass
8688              * on to the guest.
8689              */
8690             if (toff == 0) {
8691                 toff = -TARGET_EINVAL; /* result buffer is too small */
8692                 break;
8693             }
8694             /*
8695              * Return what we have, resetting the file pointer to the
8696              * location of the first record not returned.
8697              */
8698             lseek64(dirfd, prev_diroff, SEEK_SET);
8699             break;
8700         }
8701 
8702         prev_diroff = hde->d_off;
8703         tde->d_ino = tswap64(hde->d_ino);
8704         tde->d_off = tswap64(hde->d_off);
8705         tde->d_reclen = tswap16(treclen);
8706         tde->d_type = hde->d_type;
8707         memcpy(tde->d_name, hde->d_name, namelen);
8708     }
8709 
8710     unlock_user(tdirp, arg2, toff);
8711     return toff;
8712 }
8713 #endif /* TARGET_NR_getdents64 */
8714 
8715 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8716 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8717 #endif
8718 
8719 /* This is an internal helper for do_syscall so that it is easier
8720  * to have a single return point, so that actions, such as logging
8721  * of syscall results, can be performed.
8722  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8723  */
8724 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8725                             abi_long arg2, abi_long arg3, abi_long arg4,
8726                             abi_long arg5, abi_long arg6, abi_long arg7,
8727                             abi_long arg8)
8728 {
8729     CPUState *cpu = env_cpu(cpu_env);
8730     abi_long ret;
8731 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8732     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8733     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8734     || defined(TARGET_NR_statx)
8735     struct stat st;
8736 #endif
8737 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8738     || defined(TARGET_NR_fstatfs)
8739     struct statfs stfs;
8740 #endif
8741     void *p;
8742 
8743     switch(num) {
8744     case TARGET_NR_exit:
8745         /* In old applications this may be used to implement _exit(2).
8746            However in threaded applications it is used for thread termination,
8747            and _exit_group is used for application termination.
8748            Do thread termination if we have more then one thread.  */
8749 
8750         if (block_signals()) {
8751             return -QEMU_ERESTARTSYS;
8752         }
8753 
8754         pthread_mutex_lock(&clone_lock);
8755 
8756         if (CPU_NEXT(first_cpu)) {
8757             TaskState *ts = cpu->opaque;
8758 
8759             if (ts->child_tidptr) {
8760                 put_user_u32(0, ts->child_tidptr);
8761                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8762                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8763             }
8764 
8765             object_unparent(OBJECT(cpu));
8766             object_unref(OBJECT(cpu));
8767             /*
8768              * At this point the CPU should be unrealized and removed
8769              * from cpu lists. We can clean-up the rest of the thread
8770              * data without the lock held.
8771              */
8772 
8773             pthread_mutex_unlock(&clone_lock);
8774 
8775             thread_cpu = NULL;
8776             g_free(ts);
8777             rcu_unregister_thread();
8778             pthread_exit(NULL);
8779         }
8780 
8781         pthread_mutex_unlock(&clone_lock);
8782         preexit_cleanup(cpu_env, arg1);
8783         _exit(arg1);
8784         return 0; /* avoid warning */
8785     case TARGET_NR_read:
8786         if (arg2 == 0 && arg3 == 0) {
8787             return get_errno(safe_read(arg1, 0, 0));
8788         } else {
8789             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8790                 return -TARGET_EFAULT;
8791             ret = get_errno(safe_read(arg1, p, arg3));
8792             if (ret >= 0 &&
8793                 fd_trans_host_to_target_data(arg1)) {
8794                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8795             }
8796             unlock_user(p, arg2, ret);
8797         }
8798         return ret;
8799     case TARGET_NR_write:
8800         if (arg2 == 0 && arg3 == 0) {
8801             return get_errno(safe_write(arg1, 0, 0));
8802         }
8803         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8804             return -TARGET_EFAULT;
8805         if (fd_trans_target_to_host_data(arg1)) {
8806             void *copy = g_malloc(arg3);
8807             memcpy(copy, p, arg3);
8808             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8809             if (ret >= 0) {
8810                 ret = get_errno(safe_write(arg1, copy, ret));
8811             }
8812             g_free(copy);
8813         } else {
8814             ret = get_errno(safe_write(arg1, p, arg3));
8815         }
8816         unlock_user(p, arg2, 0);
8817         return ret;
8818 
8819 #ifdef TARGET_NR_open
8820     case TARGET_NR_open:
8821         if (!(p = lock_user_string(arg1)))
8822             return -TARGET_EFAULT;
8823         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8824                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8825                                   arg3));
8826         fd_trans_unregister(ret);
8827         unlock_user(p, arg1, 0);
8828         return ret;
8829 #endif
8830     case TARGET_NR_openat:
8831         if (!(p = lock_user_string(arg2)))
8832             return -TARGET_EFAULT;
8833         ret = get_errno(do_openat(cpu_env, arg1, p,
8834                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8835                                   arg4));
8836         fd_trans_unregister(ret);
8837         unlock_user(p, arg2, 0);
8838         return ret;
8839 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8840     case TARGET_NR_name_to_handle_at:
8841         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8842         return ret;
8843 #endif
8844 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8845     case TARGET_NR_open_by_handle_at:
8846         ret = do_open_by_handle_at(arg1, arg2, arg3);
8847         fd_trans_unregister(ret);
8848         return ret;
8849 #endif
8850 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8851     case TARGET_NR_pidfd_open:
8852         return get_errno(pidfd_open(arg1, arg2));
8853 #endif
8854 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8855     case TARGET_NR_pidfd_send_signal:
8856         {
8857             siginfo_t uinfo, *puinfo;
8858 
8859             if (arg3) {
8860                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8861                 if (!p) {
8862                     return -TARGET_EFAULT;
8863                  }
8864                  target_to_host_siginfo(&uinfo, p);
8865                  unlock_user(p, arg3, 0);
8866                  puinfo = &uinfo;
8867             } else {
8868                  puinfo = NULL;
8869             }
8870             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8871                                               puinfo, arg4));
8872         }
8873         return ret;
8874 #endif
8875 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8876     case TARGET_NR_pidfd_getfd:
8877         return get_errno(pidfd_getfd(arg1, arg2, arg3));
8878 #endif
8879     case TARGET_NR_close:
8880         fd_trans_unregister(arg1);
8881         return get_errno(close(arg1));
8882 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8883     case TARGET_NR_close_range:
8884         ret = get_errno(sys_close_range(arg1, arg2, arg3));
8885         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
8886             abi_long fd, maxfd;
8887             maxfd = MIN(arg2, target_fd_max);
8888             for (fd = arg1; fd < maxfd; fd++) {
8889                 fd_trans_unregister(fd);
8890             }
8891         }
8892         return ret;
8893 #endif
8894 
8895     case TARGET_NR_brk:
8896         return do_brk(arg1);
8897 #ifdef TARGET_NR_fork
8898     case TARGET_NR_fork:
8899         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8900 #endif
8901 #ifdef TARGET_NR_waitpid
8902     case TARGET_NR_waitpid:
8903         {
8904             int status;
8905             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8906             if (!is_error(ret) && arg2 && ret
8907                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8908                 return -TARGET_EFAULT;
8909         }
8910         return ret;
8911 #endif
8912 #ifdef TARGET_NR_waitid
8913     case TARGET_NR_waitid:
8914         {
8915             siginfo_t info;
8916             info.si_pid = 0;
8917             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8918             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8919                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8920                     return -TARGET_EFAULT;
8921                 host_to_target_siginfo(p, &info);
8922                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8923             }
8924         }
8925         return ret;
8926 #endif
8927 #ifdef TARGET_NR_creat /* not on alpha */
8928     case TARGET_NR_creat:
8929         if (!(p = lock_user_string(arg1)))
8930             return -TARGET_EFAULT;
8931         ret = get_errno(creat(p, arg2));
8932         fd_trans_unregister(ret);
8933         unlock_user(p, arg1, 0);
8934         return ret;
8935 #endif
8936 #ifdef TARGET_NR_link
8937     case TARGET_NR_link:
8938         {
8939             void * p2;
8940             p = lock_user_string(arg1);
8941             p2 = lock_user_string(arg2);
8942             if (!p || !p2)
8943                 ret = -TARGET_EFAULT;
8944             else
8945                 ret = get_errno(link(p, p2));
8946             unlock_user(p2, arg2, 0);
8947             unlock_user(p, arg1, 0);
8948         }
8949         return ret;
8950 #endif
8951 #if defined(TARGET_NR_linkat)
8952     case TARGET_NR_linkat:
8953         {
8954             void * p2 = NULL;
8955             if (!arg2 || !arg4)
8956                 return -TARGET_EFAULT;
8957             p  = lock_user_string(arg2);
8958             p2 = lock_user_string(arg4);
8959             if (!p || !p2)
8960                 ret = -TARGET_EFAULT;
8961             else
8962                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8963             unlock_user(p, arg2, 0);
8964             unlock_user(p2, arg4, 0);
8965         }
8966         return ret;
8967 #endif
8968 #ifdef TARGET_NR_unlink
8969     case TARGET_NR_unlink:
8970         if (!(p = lock_user_string(arg1)))
8971             return -TARGET_EFAULT;
8972         ret = get_errno(unlink(p));
8973         unlock_user(p, arg1, 0);
8974         return ret;
8975 #endif
8976 #if defined(TARGET_NR_unlinkat)
8977     case TARGET_NR_unlinkat:
8978         if (!(p = lock_user_string(arg2)))
8979             return -TARGET_EFAULT;
8980         ret = get_errno(unlinkat(arg1, p, arg3));
8981         unlock_user(p, arg2, 0);
8982         return ret;
8983 #endif
8984     case TARGET_NR_execveat:
8985         return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5);
8986     case TARGET_NR_execve:
8987         return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0);
8988     case TARGET_NR_chdir:
8989         if (!(p = lock_user_string(arg1)))
8990             return -TARGET_EFAULT;
8991         ret = get_errno(chdir(p));
8992         unlock_user(p, arg1, 0);
8993         return ret;
8994 #ifdef TARGET_NR_time
8995     case TARGET_NR_time:
8996         {
8997             time_t host_time;
8998             ret = get_errno(time(&host_time));
8999             if (!is_error(ret)
9000                 && arg1
9001                 && put_user_sal(host_time, arg1))
9002                 return -TARGET_EFAULT;
9003         }
9004         return ret;
9005 #endif
9006 #ifdef TARGET_NR_mknod
9007     case TARGET_NR_mknod:
9008         if (!(p = lock_user_string(arg1)))
9009             return -TARGET_EFAULT;
9010         ret = get_errno(mknod(p, arg2, arg3));
9011         unlock_user(p, arg1, 0);
9012         return ret;
9013 #endif
9014 #if defined(TARGET_NR_mknodat)
9015     case TARGET_NR_mknodat:
9016         if (!(p = lock_user_string(arg2)))
9017             return -TARGET_EFAULT;
9018         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9019         unlock_user(p, arg2, 0);
9020         return ret;
9021 #endif
9022 #ifdef TARGET_NR_chmod
9023     case TARGET_NR_chmod:
9024         if (!(p = lock_user_string(arg1)))
9025             return -TARGET_EFAULT;
9026         ret = get_errno(chmod(p, arg2));
9027         unlock_user(p, arg1, 0);
9028         return ret;
9029 #endif
9030 #ifdef TARGET_NR_lseek
9031     case TARGET_NR_lseek:
9032         return get_errno(lseek(arg1, arg2, arg3));
9033 #endif
9034 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9035     /* Alpha specific */
9036     case TARGET_NR_getxpid:
9037         cpu_env->ir[IR_A4] = getppid();
9038         return get_errno(getpid());
9039 #endif
9040 #ifdef TARGET_NR_getpid
9041     case TARGET_NR_getpid:
9042         return get_errno(getpid());
9043 #endif
9044     case TARGET_NR_mount:
9045         {
9046             /* need to look at the data field */
9047             void *p2, *p3;
9048 
9049             if (arg1) {
9050                 p = lock_user_string(arg1);
9051                 if (!p) {
9052                     return -TARGET_EFAULT;
9053                 }
9054             } else {
9055                 p = NULL;
9056             }
9057 
9058             p2 = lock_user_string(arg2);
9059             if (!p2) {
9060                 if (arg1) {
9061                     unlock_user(p, arg1, 0);
9062                 }
9063                 return -TARGET_EFAULT;
9064             }
9065 
9066             if (arg3) {
9067                 p3 = lock_user_string(arg3);
9068                 if (!p3) {
9069                     if (arg1) {
9070                         unlock_user(p, arg1, 0);
9071                     }
9072                     unlock_user(p2, arg2, 0);
9073                     return -TARGET_EFAULT;
9074                 }
9075             } else {
9076                 p3 = NULL;
9077             }
9078 
9079             /* FIXME - arg5 should be locked, but it isn't clear how to
9080              * do that since it's not guaranteed to be a NULL-terminated
9081              * string.
9082              */
9083             if (!arg5) {
9084                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9085             } else {
9086                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9087             }
9088             ret = get_errno(ret);
9089 
9090             if (arg1) {
9091                 unlock_user(p, arg1, 0);
9092             }
9093             unlock_user(p2, arg2, 0);
9094             if (arg3) {
9095                 unlock_user(p3, arg3, 0);
9096             }
9097         }
9098         return ret;
9099 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9100 #if defined(TARGET_NR_umount)
9101     case TARGET_NR_umount:
9102 #endif
9103 #if defined(TARGET_NR_oldumount)
9104     case TARGET_NR_oldumount:
9105 #endif
9106         if (!(p = lock_user_string(arg1)))
9107             return -TARGET_EFAULT;
9108         ret = get_errno(umount(p));
9109         unlock_user(p, arg1, 0);
9110         return ret;
9111 #endif
9112 #ifdef TARGET_NR_stime /* not on alpha */
9113     case TARGET_NR_stime:
9114         {
9115             struct timespec ts;
9116             ts.tv_nsec = 0;
9117             if (get_user_sal(ts.tv_sec, arg1)) {
9118                 return -TARGET_EFAULT;
9119             }
9120             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9121         }
9122 #endif
9123 #ifdef TARGET_NR_alarm /* not on alpha */
9124     case TARGET_NR_alarm:
9125         return alarm(arg1);
9126 #endif
9127 #ifdef TARGET_NR_pause /* not on alpha */
9128     case TARGET_NR_pause:
9129         if (!block_signals()) {
9130             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9131         }
9132         return -TARGET_EINTR;
9133 #endif
9134 #ifdef TARGET_NR_utime
9135     case TARGET_NR_utime:
9136         {
9137             struct utimbuf tbuf, *host_tbuf;
9138             struct target_utimbuf *target_tbuf;
9139             if (arg2) {
9140                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9141                     return -TARGET_EFAULT;
9142                 tbuf.actime = tswapal(target_tbuf->actime);
9143                 tbuf.modtime = tswapal(target_tbuf->modtime);
9144                 unlock_user_struct(target_tbuf, arg2, 0);
9145                 host_tbuf = &tbuf;
9146             } else {
9147                 host_tbuf = NULL;
9148             }
9149             if (!(p = lock_user_string(arg1)))
9150                 return -TARGET_EFAULT;
9151             ret = get_errno(utime(p, host_tbuf));
9152             unlock_user(p, arg1, 0);
9153         }
9154         return ret;
9155 #endif
9156 #ifdef TARGET_NR_utimes
9157     case TARGET_NR_utimes:
9158         {
9159             struct timeval *tvp, tv[2];
9160             if (arg2) {
9161                 if (copy_from_user_timeval(&tv[0], arg2)
9162                     || copy_from_user_timeval(&tv[1],
9163                                               arg2 + sizeof(struct target_timeval)))
9164                     return -TARGET_EFAULT;
9165                 tvp = tv;
9166             } else {
9167                 tvp = NULL;
9168             }
9169             if (!(p = lock_user_string(arg1)))
9170                 return -TARGET_EFAULT;
9171             ret = get_errno(utimes(p, tvp));
9172             unlock_user(p, arg1, 0);
9173         }
9174         return ret;
9175 #endif
9176 #if defined(TARGET_NR_futimesat)
9177     case TARGET_NR_futimesat:
9178         {
9179             struct timeval *tvp, tv[2];
9180             if (arg3) {
9181                 if (copy_from_user_timeval(&tv[0], arg3)
9182                     || copy_from_user_timeval(&tv[1],
9183                                               arg3 + sizeof(struct target_timeval)))
9184                     return -TARGET_EFAULT;
9185                 tvp = tv;
9186             } else {
9187                 tvp = NULL;
9188             }
9189             if (!(p = lock_user_string(arg2))) {
9190                 return -TARGET_EFAULT;
9191             }
9192             ret = get_errno(futimesat(arg1, path(p), tvp));
9193             unlock_user(p, arg2, 0);
9194         }
9195         return ret;
9196 #endif
9197 #ifdef TARGET_NR_access
9198     case TARGET_NR_access:
9199         if (!(p = lock_user_string(arg1))) {
9200             return -TARGET_EFAULT;
9201         }
9202         ret = get_errno(access(path(p), arg2));
9203         unlock_user(p, arg1, 0);
9204         return ret;
9205 #endif
9206 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9207     case TARGET_NR_faccessat:
9208         if (!(p = lock_user_string(arg2))) {
9209             return -TARGET_EFAULT;
9210         }
9211         ret = get_errno(faccessat(arg1, p, arg3, 0));
9212         unlock_user(p, arg2, 0);
9213         return ret;
9214 #endif
9215 #if defined(TARGET_NR_faccessat2)
9216     case TARGET_NR_faccessat2:
9217         if (!(p = lock_user_string(arg2))) {
9218             return -TARGET_EFAULT;
9219         }
9220         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9221         unlock_user(p, arg2, 0);
9222         return ret;
9223 #endif
9224 #ifdef TARGET_NR_nice /* not on alpha */
9225     case TARGET_NR_nice:
9226         return get_errno(nice(arg1));
9227 #endif
9228     case TARGET_NR_sync:
9229         sync();
9230         return 0;
9231 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9232     case TARGET_NR_syncfs:
9233         return get_errno(syncfs(arg1));
9234 #endif
9235     case TARGET_NR_kill:
9236         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9237 #ifdef TARGET_NR_rename
9238     case TARGET_NR_rename:
9239         {
9240             void *p2;
9241             p = lock_user_string(arg1);
9242             p2 = lock_user_string(arg2);
9243             if (!p || !p2)
9244                 ret = -TARGET_EFAULT;
9245             else
9246                 ret = get_errno(rename(p, p2));
9247             unlock_user(p2, arg2, 0);
9248             unlock_user(p, arg1, 0);
9249         }
9250         return ret;
9251 #endif
9252 #if defined(TARGET_NR_renameat)
9253     case TARGET_NR_renameat:
9254         {
9255             void *p2;
9256             p  = lock_user_string(arg2);
9257             p2 = lock_user_string(arg4);
9258             if (!p || !p2)
9259                 ret = -TARGET_EFAULT;
9260             else
9261                 ret = get_errno(renameat(arg1, p, arg3, p2));
9262             unlock_user(p2, arg4, 0);
9263             unlock_user(p, arg2, 0);
9264         }
9265         return ret;
9266 #endif
9267 #if defined(TARGET_NR_renameat2)
9268     case TARGET_NR_renameat2:
9269         {
9270             void *p2;
9271             p  = lock_user_string(arg2);
9272             p2 = lock_user_string(arg4);
9273             if (!p || !p2) {
9274                 ret = -TARGET_EFAULT;
9275             } else {
9276                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9277             }
9278             unlock_user(p2, arg4, 0);
9279             unlock_user(p, arg2, 0);
9280         }
9281         return ret;
9282 #endif
9283 #ifdef TARGET_NR_mkdir
9284     case TARGET_NR_mkdir:
9285         if (!(p = lock_user_string(arg1)))
9286             return -TARGET_EFAULT;
9287         ret = get_errno(mkdir(p, arg2));
9288         unlock_user(p, arg1, 0);
9289         return ret;
9290 #endif
9291 #if defined(TARGET_NR_mkdirat)
9292     case TARGET_NR_mkdirat:
9293         if (!(p = lock_user_string(arg2)))
9294             return -TARGET_EFAULT;
9295         ret = get_errno(mkdirat(arg1, p, arg3));
9296         unlock_user(p, arg2, 0);
9297         return ret;
9298 #endif
9299 #ifdef TARGET_NR_rmdir
9300     case TARGET_NR_rmdir:
9301         if (!(p = lock_user_string(arg1)))
9302             return -TARGET_EFAULT;
9303         ret = get_errno(rmdir(p));
9304         unlock_user(p, arg1, 0);
9305         return ret;
9306 #endif
9307     case TARGET_NR_dup:
9308         ret = get_errno(dup(arg1));
9309         if (ret >= 0) {
9310             fd_trans_dup(arg1, ret);
9311         }
9312         return ret;
9313 #ifdef TARGET_NR_pipe
9314     case TARGET_NR_pipe:
9315         return do_pipe(cpu_env, arg1, 0, 0);
9316 #endif
9317 #ifdef TARGET_NR_pipe2
9318     case TARGET_NR_pipe2:
9319         return do_pipe(cpu_env, arg1,
9320                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9321 #endif
9322     case TARGET_NR_times:
9323         {
9324             struct target_tms *tmsp;
9325             struct tms tms;
9326             ret = get_errno(times(&tms));
9327             if (arg1) {
9328                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9329                 if (!tmsp)
9330                     return -TARGET_EFAULT;
9331                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9332                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9333                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9334                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9335             }
9336             if (!is_error(ret))
9337                 ret = host_to_target_clock_t(ret);
9338         }
9339         return ret;
9340     case TARGET_NR_acct:
9341         if (arg1 == 0) {
9342             ret = get_errno(acct(NULL));
9343         } else {
9344             if (!(p = lock_user_string(arg1))) {
9345                 return -TARGET_EFAULT;
9346             }
9347             ret = get_errno(acct(path(p)));
9348             unlock_user(p, arg1, 0);
9349         }
9350         return ret;
9351 #ifdef TARGET_NR_umount2
9352     case TARGET_NR_umount2:
9353         if (!(p = lock_user_string(arg1)))
9354             return -TARGET_EFAULT;
9355         ret = get_errno(umount2(p, arg2));
9356         unlock_user(p, arg1, 0);
9357         return ret;
9358 #endif
9359     case TARGET_NR_ioctl:
9360         return do_ioctl(arg1, arg2, arg3);
9361 #ifdef TARGET_NR_fcntl
9362     case TARGET_NR_fcntl:
9363         return do_fcntl(arg1, arg2, arg3);
9364 #endif
9365     case TARGET_NR_setpgid:
9366         return get_errno(setpgid(arg1, arg2));
9367     case TARGET_NR_umask:
9368         return get_errno(umask(arg1));
9369     case TARGET_NR_chroot:
9370         if (!(p = lock_user_string(arg1)))
9371             return -TARGET_EFAULT;
9372         ret = get_errno(chroot(p));
9373         unlock_user(p, arg1, 0);
9374         return ret;
9375 #ifdef TARGET_NR_dup2
9376     case TARGET_NR_dup2:
9377         ret = get_errno(dup2(arg1, arg2));
9378         if (ret >= 0) {
9379             fd_trans_dup(arg1, arg2);
9380         }
9381         return ret;
9382 #endif
9383 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9384     case TARGET_NR_dup3:
9385     {
9386         int host_flags;
9387 
9388         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9389             return -EINVAL;
9390         }
9391         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9392         ret = get_errno(dup3(arg1, arg2, host_flags));
9393         if (ret >= 0) {
9394             fd_trans_dup(arg1, arg2);
9395         }
9396         return ret;
9397     }
9398 #endif
9399 #ifdef TARGET_NR_getppid /* not on alpha */
9400     case TARGET_NR_getppid:
9401         return get_errno(getppid());
9402 #endif
9403 #ifdef TARGET_NR_getpgrp
9404     case TARGET_NR_getpgrp:
9405         return get_errno(getpgrp());
9406 #endif
9407     case TARGET_NR_setsid:
9408         return get_errno(setsid());
9409 #ifdef TARGET_NR_sigaction
9410     case TARGET_NR_sigaction:
9411         {
9412 #if defined(TARGET_MIPS)
9413 	    struct target_sigaction act, oact, *pact, *old_act;
9414 
9415 	    if (arg2) {
9416                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9417                     return -TARGET_EFAULT;
9418 		act._sa_handler = old_act->_sa_handler;
9419 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9420 		act.sa_flags = old_act->sa_flags;
9421 		unlock_user_struct(old_act, arg2, 0);
9422 		pact = &act;
9423 	    } else {
9424 		pact = NULL;
9425 	    }
9426 
9427         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9428 
9429 	    if (!is_error(ret) && arg3) {
9430                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9431                     return -TARGET_EFAULT;
9432 		old_act->_sa_handler = oact._sa_handler;
9433 		old_act->sa_flags = oact.sa_flags;
9434 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9435 		old_act->sa_mask.sig[1] = 0;
9436 		old_act->sa_mask.sig[2] = 0;
9437 		old_act->sa_mask.sig[3] = 0;
9438 		unlock_user_struct(old_act, arg3, 1);
9439 	    }
9440 #else
9441             struct target_old_sigaction *old_act;
9442             struct target_sigaction act, oact, *pact;
9443             if (arg2) {
9444                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9445                     return -TARGET_EFAULT;
9446                 act._sa_handler = old_act->_sa_handler;
9447                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9448                 act.sa_flags = old_act->sa_flags;
9449 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9450                 act.sa_restorer = old_act->sa_restorer;
9451 #endif
9452                 unlock_user_struct(old_act, arg2, 0);
9453                 pact = &act;
9454             } else {
9455                 pact = NULL;
9456             }
9457             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9458             if (!is_error(ret) && arg3) {
9459                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9460                     return -TARGET_EFAULT;
9461                 old_act->_sa_handler = oact._sa_handler;
9462                 old_act->sa_mask = oact.sa_mask.sig[0];
9463                 old_act->sa_flags = oact.sa_flags;
9464 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9465                 old_act->sa_restorer = oact.sa_restorer;
9466 #endif
9467                 unlock_user_struct(old_act, arg3, 1);
9468             }
9469 #endif
9470         }
9471         return ret;
9472 #endif
9473     case TARGET_NR_rt_sigaction:
9474         {
9475             /*
9476              * For Alpha and SPARC this is a 5 argument syscall, with
9477              * a 'restorer' parameter which must be copied into the
9478              * sa_restorer field of the sigaction struct.
9479              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9480              * and arg5 is the sigsetsize.
9481              */
9482 #if defined(TARGET_ALPHA)
9483             target_ulong sigsetsize = arg4;
9484             target_ulong restorer = arg5;
9485 #elif defined(TARGET_SPARC)
9486             target_ulong restorer = arg4;
9487             target_ulong sigsetsize = arg5;
9488 #else
9489             target_ulong sigsetsize = arg4;
9490             target_ulong restorer = 0;
9491 #endif
9492             struct target_sigaction *act = NULL;
9493             struct target_sigaction *oact = NULL;
9494 
9495             if (sigsetsize != sizeof(target_sigset_t)) {
9496                 return -TARGET_EINVAL;
9497             }
9498             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9499                 return -TARGET_EFAULT;
9500             }
9501             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9502                 ret = -TARGET_EFAULT;
9503             } else {
9504                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9505                 if (oact) {
9506                     unlock_user_struct(oact, arg3, 1);
9507                 }
9508             }
9509             if (act) {
9510                 unlock_user_struct(act, arg2, 0);
9511             }
9512         }
9513         return ret;
9514 #ifdef TARGET_NR_sgetmask /* not on alpha */
9515     case TARGET_NR_sgetmask:
9516         {
9517             sigset_t cur_set;
9518             abi_ulong target_set;
9519             ret = do_sigprocmask(0, NULL, &cur_set);
9520             if (!ret) {
9521                 host_to_target_old_sigset(&target_set, &cur_set);
9522                 ret = target_set;
9523             }
9524         }
9525         return ret;
9526 #endif
9527 #ifdef TARGET_NR_ssetmask /* not on alpha */
9528     case TARGET_NR_ssetmask:
9529         {
9530             sigset_t set, oset;
9531             abi_ulong target_set = arg1;
9532             target_to_host_old_sigset(&set, &target_set);
9533             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9534             if (!ret) {
9535                 host_to_target_old_sigset(&target_set, &oset);
9536                 ret = target_set;
9537             }
9538         }
9539         return ret;
9540 #endif
9541 #ifdef TARGET_NR_sigprocmask
9542     case TARGET_NR_sigprocmask:
9543         {
9544 #if defined(TARGET_ALPHA)
9545             sigset_t set, oldset;
9546             abi_ulong mask;
9547             int how;
9548 
9549             switch (arg1) {
9550             case TARGET_SIG_BLOCK:
9551                 how = SIG_BLOCK;
9552                 break;
9553             case TARGET_SIG_UNBLOCK:
9554                 how = SIG_UNBLOCK;
9555                 break;
9556             case TARGET_SIG_SETMASK:
9557                 how = SIG_SETMASK;
9558                 break;
9559             default:
9560                 return -TARGET_EINVAL;
9561             }
9562             mask = arg2;
9563             target_to_host_old_sigset(&set, &mask);
9564 
9565             ret = do_sigprocmask(how, &set, &oldset);
9566             if (!is_error(ret)) {
9567                 host_to_target_old_sigset(&mask, &oldset);
9568                 ret = mask;
9569                 cpu_env->ir[IR_V0] = 0; /* force no error */
9570             }
9571 #else
9572             sigset_t set, oldset, *set_ptr;
9573             int how;
9574 
9575             if (arg2) {
9576                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9577                 if (!p) {
9578                     return -TARGET_EFAULT;
9579                 }
9580                 target_to_host_old_sigset(&set, p);
9581                 unlock_user(p, arg2, 0);
9582                 set_ptr = &set;
9583                 switch (arg1) {
9584                 case TARGET_SIG_BLOCK:
9585                     how = SIG_BLOCK;
9586                     break;
9587                 case TARGET_SIG_UNBLOCK:
9588                     how = SIG_UNBLOCK;
9589                     break;
9590                 case TARGET_SIG_SETMASK:
9591                     how = SIG_SETMASK;
9592                     break;
9593                 default:
9594                     return -TARGET_EINVAL;
9595                 }
9596             } else {
9597                 how = 0;
9598                 set_ptr = NULL;
9599             }
9600             ret = do_sigprocmask(how, set_ptr, &oldset);
9601             if (!is_error(ret) && arg3) {
9602                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9603                     return -TARGET_EFAULT;
9604                 host_to_target_old_sigset(p, &oldset);
9605                 unlock_user(p, arg3, sizeof(target_sigset_t));
9606             }
9607 #endif
9608         }
9609         return ret;
9610 #endif
9611     case TARGET_NR_rt_sigprocmask:
9612         {
9613             int how = arg1;
9614             sigset_t set, oldset, *set_ptr;
9615 
9616             if (arg4 != sizeof(target_sigset_t)) {
9617                 return -TARGET_EINVAL;
9618             }
9619 
9620             if (arg2) {
9621                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9622                 if (!p) {
9623                     return -TARGET_EFAULT;
9624                 }
9625                 target_to_host_sigset(&set, p);
9626                 unlock_user(p, arg2, 0);
9627                 set_ptr = &set;
9628                 switch(how) {
9629                 case TARGET_SIG_BLOCK:
9630                     how = SIG_BLOCK;
9631                     break;
9632                 case TARGET_SIG_UNBLOCK:
9633                     how = SIG_UNBLOCK;
9634                     break;
9635                 case TARGET_SIG_SETMASK:
9636                     how = SIG_SETMASK;
9637                     break;
9638                 default:
9639                     return -TARGET_EINVAL;
9640                 }
9641             } else {
9642                 how = 0;
9643                 set_ptr = NULL;
9644             }
9645             ret = do_sigprocmask(how, set_ptr, &oldset);
9646             if (!is_error(ret) && arg3) {
9647                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9648                     return -TARGET_EFAULT;
9649                 host_to_target_sigset(p, &oldset);
9650                 unlock_user(p, arg3, sizeof(target_sigset_t));
9651             }
9652         }
9653         return ret;
9654 #ifdef TARGET_NR_sigpending
9655     case TARGET_NR_sigpending:
9656         {
9657             sigset_t set;
9658             ret = get_errno(sigpending(&set));
9659             if (!is_error(ret)) {
9660                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9661                     return -TARGET_EFAULT;
9662                 host_to_target_old_sigset(p, &set);
9663                 unlock_user(p, arg1, sizeof(target_sigset_t));
9664             }
9665         }
9666         return ret;
9667 #endif
9668     case TARGET_NR_rt_sigpending:
9669         {
9670             sigset_t set;
9671 
9672             /* Yes, this check is >, not != like most. We follow the kernel's
9673              * logic and it does it like this because it implements
9674              * NR_sigpending through the same code path, and in that case
9675              * the old_sigset_t is smaller in size.
9676              */
9677             if (arg2 > sizeof(target_sigset_t)) {
9678                 return -TARGET_EINVAL;
9679             }
9680 
9681             ret = get_errno(sigpending(&set));
9682             if (!is_error(ret)) {
9683                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9684                     return -TARGET_EFAULT;
9685                 host_to_target_sigset(p, &set);
9686                 unlock_user(p, arg1, sizeof(target_sigset_t));
9687             }
9688         }
9689         return ret;
9690 #ifdef TARGET_NR_sigsuspend
9691     case TARGET_NR_sigsuspend:
9692         {
9693             sigset_t *set;
9694 
9695 #if defined(TARGET_ALPHA)
9696             TaskState *ts = cpu->opaque;
9697             /* target_to_host_old_sigset will bswap back */
9698             abi_ulong mask = tswapal(arg1);
9699             set = &ts->sigsuspend_mask;
9700             target_to_host_old_sigset(set, &mask);
9701 #else
9702             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9703             if (ret != 0) {
9704                 return ret;
9705             }
9706 #endif
9707             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9708             finish_sigsuspend_mask(ret);
9709         }
9710         return ret;
9711 #endif
9712     case TARGET_NR_rt_sigsuspend:
9713         {
9714             sigset_t *set;
9715 
9716             ret = process_sigsuspend_mask(&set, arg1, arg2);
9717             if (ret != 0) {
9718                 return ret;
9719             }
9720             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9721             finish_sigsuspend_mask(ret);
9722         }
9723         return ret;
9724 #ifdef TARGET_NR_rt_sigtimedwait
9725     case TARGET_NR_rt_sigtimedwait:
9726         {
9727             sigset_t set;
9728             struct timespec uts, *puts;
9729             siginfo_t uinfo;
9730 
9731             if (arg4 != sizeof(target_sigset_t)) {
9732                 return -TARGET_EINVAL;
9733             }
9734 
9735             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9736                 return -TARGET_EFAULT;
9737             target_to_host_sigset(&set, p);
9738             unlock_user(p, arg1, 0);
9739             if (arg3) {
9740                 puts = &uts;
9741                 if (target_to_host_timespec(puts, arg3)) {
9742                     return -TARGET_EFAULT;
9743                 }
9744             } else {
9745                 puts = NULL;
9746             }
9747             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9748                                                  SIGSET_T_SIZE));
9749             if (!is_error(ret)) {
9750                 if (arg2) {
9751                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9752                                   0);
9753                     if (!p) {
9754                         return -TARGET_EFAULT;
9755                     }
9756                     host_to_target_siginfo(p, &uinfo);
9757                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9758                 }
9759                 ret = host_to_target_signal(ret);
9760             }
9761         }
9762         return ret;
9763 #endif
9764 #ifdef TARGET_NR_rt_sigtimedwait_time64
9765     case TARGET_NR_rt_sigtimedwait_time64:
9766         {
9767             sigset_t set;
9768             struct timespec uts, *puts;
9769             siginfo_t uinfo;
9770 
9771             if (arg4 != sizeof(target_sigset_t)) {
9772                 return -TARGET_EINVAL;
9773             }
9774 
9775             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9776             if (!p) {
9777                 return -TARGET_EFAULT;
9778             }
9779             target_to_host_sigset(&set, p);
9780             unlock_user(p, arg1, 0);
9781             if (arg3) {
9782                 puts = &uts;
9783                 if (target_to_host_timespec64(puts, arg3)) {
9784                     return -TARGET_EFAULT;
9785                 }
9786             } else {
9787                 puts = NULL;
9788             }
9789             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9790                                                  SIGSET_T_SIZE));
9791             if (!is_error(ret)) {
9792                 if (arg2) {
9793                     p = lock_user(VERIFY_WRITE, arg2,
9794                                   sizeof(target_siginfo_t), 0);
9795                     if (!p) {
9796                         return -TARGET_EFAULT;
9797                     }
9798                     host_to_target_siginfo(p, &uinfo);
9799                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9800                 }
9801                 ret = host_to_target_signal(ret);
9802             }
9803         }
9804         return ret;
9805 #endif
9806     case TARGET_NR_rt_sigqueueinfo:
9807         {
9808             siginfo_t uinfo;
9809 
9810             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9811             if (!p) {
9812                 return -TARGET_EFAULT;
9813             }
9814             target_to_host_siginfo(&uinfo, p);
9815             unlock_user(p, arg3, 0);
9816             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9817         }
9818         return ret;
9819     case TARGET_NR_rt_tgsigqueueinfo:
9820         {
9821             siginfo_t uinfo;
9822 
9823             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9824             if (!p) {
9825                 return -TARGET_EFAULT;
9826             }
9827             target_to_host_siginfo(&uinfo, p);
9828             unlock_user(p, arg4, 0);
9829             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9830         }
9831         return ret;
9832 #ifdef TARGET_NR_sigreturn
9833     case TARGET_NR_sigreturn:
9834         if (block_signals()) {
9835             return -QEMU_ERESTARTSYS;
9836         }
9837         return do_sigreturn(cpu_env);
9838 #endif
9839     case TARGET_NR_rt_sigreturn:
9840         if (block_signals()) {
9841             return -QEMU_ERESTARTSYS;
9842         }
9843         return do_rt_sigreturn(cpu_env);
9844     case TARGET_NR_sethostname:
9845         if (!(p = lock_user_string(arg1)))
9846             return -TARGET_EFAULT;
9847         ret = get_errno(sethostname(p, arg2));
9848         unlock_user(p, arg1, 0);
9849         return ret;
9850 #ifdef TARGET_NR_setrlimit
9851     case TARGET_NR_setrlimit:
9852         {
9853             int resource = target_to_host_resource(arg1);
9854             struct target_rlimit *target_rlim;
9855             struct rlimit rlim;
9856             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9857                 return -TARGET_EFAULT;
9858             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9859             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9860             unlock_user_struct(target_rlim, arg2, 0);
9861             /*
9862              * If we just passed through resource limit settings for memory then
9863              * they would also apply to QEMU's own allocations, and QEMU will
9864              * crash or hang or die if its allocations fail. Ideally we would
9865              * track the guest allocations in QEMU and apply the limits ourselves.
9866              * For now, just tell the guest the call succeeded but don't actually
9867              * limit anything.
9868              */
9869             if (resource != RLIMIT_AS &&
9870                 resource != RLIMIT_DATA &&
9871                 resource != RLIMIT_STACK) {
9872                 return get_errno(setrlimit(resource, &rlim));
9873             } else {
9874                 return 0;
9875             }
9876         }
9877 #endif
9878 #ifdef TARGET_NR_getrlimit
9879     case TARGET_NR_getrlimit:
9880         {
9881             int resource = target_to_host_resource(arg1);
9882             struct target_rlimit *target_rlim;
9883             struct rlimit rlim;
9884 
9885             ret = get_errno(getrlimit(resource, &rlim));
9886             if (!is_error(ret)) {
9887                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9888                     return -TARGET_EFAULT;
9889                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9890                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9891                 unlock_user_struct(target_rlim, arg2, 1);
9892             }
9893         }
9894         return ret;
9895 #endif
9896     case TARGET_NR_getrusage:
9897         {
9898             struct rusage rusage;
9899             ret = get_errno(getrusage(arg1, &rusage));
9900             if (!is_error(ret)) {
9901                 ret = host_to_target_rusage(arg2, &rusage);
9902             }
9903         }
9904         return ret;
9905 #if defined(TARGET_NR_gettimeofday)
9906     case TARGET_NR_gettimeofday:
9907         {
9908             struct timeval tv;
9909             struct timezone tz;
9910 
9911             ret = get_errno(gettimeofday(&tv, &tz));
9912             if (!is_error(ret)) {
9913                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9914                     return -TARGET_EFAULT;
9915                 }
9916                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9917                     return -TARGET_EFAULT;
9918                 }
9919             }
9920         }
9921         return ret;
9922 #endif
9923 #if defined(TARGET_NR_settimeofday)
9924     case TARGET_NR_settimeofday:
9925         {
9926             struct timeval tv, *ptv = NULL;
9927             struct timezone tz, *ptz = NULL;
9928 
9929             if (arg1) {
9930                 if (copy_from_user_timeval(&tv, arg1)) {
9931                     return -TARGET_EFAULT;
9932                 }
9933                 ptv = &tv;
9934             }
9935 
9936             if (arg2) {
9937                 if (copy_from_user_timezone(&tz, arg2)) {
9938                     return -TARGET_EFAULT;
9939                 }
9940                 ptz = &tz;
9941             }
9942 
9943             return get_errno(settimeofday(ptv, ptz));
9944         }
9945 #endif
9946 #if defined(TARGET_NR_select)
9947     case TARGET_NR_select:
9948 #if defined(TARGET_WANT_NI_OLD_SELECT)
9949         /* some architectures used to have old_select here
9950          * but now ENOSYS it.
9951          */
9952         ret = -TARGET_ENOSYS;
9953 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9954         ret = do_old_select(arg1);
9955 #else
9956         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9957 #endif
9958         return ret;
9959 #endif
9960 #ifdef TARGET_NR_pselect6
9961     case TARGET_NR_pselect6:
9962         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9963 #endif
9964 #ifdef TARGET_NR_pselect6_time64
9965     case TARGET_NR_pselect6_time64:
9966         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9967 #endif
9968 #ifdef TARGET_NR_symlink
9969     case TARGET_NR_symlink:
9970         {
9971             void *p2;
9972             p = lock_user_string(arg1);
9973             p2 = lock_user_string(arg2);
9974             if (!p || !p2)
9975                 ret = -TARGET_EFAULT;
9976             else
9977                 ret = get_errno(symlink(p, p2));
9978             unlock_user(p2, arg2, 0);
9979             unlock_user(p, arg1, 0);
9980         }
9981         return ret;
9982 #endif
9983 #if defined(TARGET_NR_symlinkat)
9984     case TARGET_NR_symlinkat:
9985         {
9986             void *p2;
9987             p  = lock_user_string(arg1);
9988             p2 = lock_user_string(arg3);
9989             if (!p || !p2)
9990                 ret = -TARGET_EFAULT;
9991             else
9992                 ret = get_errno(symlinkat(p, arg2, p2));
9993             unlock_user(p2, arg3, 0);
9994             unlock_user(p, arg1, 0);
9995         }
9996         return ret;
9997 #endif
9998 #ifdef TARGET_NR_readlink
9999     case TARGET_NR_readlink:
10000         {
10001             void *p2;
10002             p = lock_user_string(arg1);
10003             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10004             if (!p || !p2) {
10005                 ret = -TARGET_EFAULT;
10006             } else if (!arg3) {
10007                 /* Short circuit this for the magic exe check. */
10008                 ret = -TARGET_EINVAL;
10009             } else if (is_proc_myself((const char *)p, "exe")) {
10010                 char real[PATH_MAX], *temp;
10011                 temp = realpath(exec_path, real);
10012                 /* Return value is # of bytes that we wrote to the buffer. */
10013                 if (temp == NULL) {
10014                     ret = get_errno(-1);
10015                 } else {
10016                     /* Don't worry about sign mismatch as earlier mapping
10017                      * logic would have thrown a bad address error. */
10018                     ret = MIN(strlen(real), arg3);
10019                     /* We cannot NUL terminate the string. */
10020                     memcpy(p2, real, ret);
10021                 }
10022             } else {
10023                 ret = get_errno(readlink(path(p), p2, arg3));
10024             }
10025             unlock_user(p2, arg2, ret);
10026             unlock_user(p, arg1, 0);
10027         }
10028         return ret;
10029 #endif
10030 #if defined(TARGET_NR_readlinkat)
10031     case TARGET_NR_readlinkat:
10032         {
10033             void *p2;
10034             p  = lock_user_string(arg2);
10035             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10036             if (!p || !p2) {
10037                 ret = -TARGET_EFAULT;
10038             } else if (!arg4) {
10039                 /* Short circuit this for the magic exe check. */
10040                 ret = -TARGET_EINVAL;
10041             } else if (is_proc_myself((const char *)p, "exe")) {
10042                 char real[PATH_MAX], *temp;
10043                 temp = realpath(exec_path, real);
10044                 /* Return value is # of bytes that we wrote to the buffer. */
10045                 if (temp == NULL) {
10046                     ret = get_errno(-1);
10047                 } else {
10048                     /* Don't worry about sign mismatch as earlier mapping
10049                      * logic would have thrown a bad address error. */
10050                     ret = MIN(strlen(real), arg4);
10051                     /* We cannot NUL terminate the string. */
10052                     memcpy(p2, real, ret);
10053                 }
10054             } else {
10055                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10056             }
10057             unlock_user(p2, arg3, ret);
10058             unlock_user(p, arg2, 0);
10059         }
10060         return ret;
10061 #endif
10062 #ifdef TARGET_NR_swapon
10063     case TARGET_NR_swapon:
10064         if (!(p = lock_user_string(arg1)))
10065             return -TARGET_EFAULT;
10066         ret = get_errno(swapon(p, arg2));
10067         unlock_user(p, arg1, 0);
10068         return ret;
10069 #endif
10070     case TARGET_NR_reboot:
10071         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10072            /* arg4 must be ignored in all other cases */
10073            p = lock_user_string(arg4);
10074            if (!p) {
10075                return -TARGET_EFAULT;
10076            }
10077            ret = get_errno(reboot(arg1, arg2, arg3, p));
10078            unlock_user(p, arg4, 0);
10079         } else {
10080            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10081         }
10082         return ret;
10083 #ifdef TARGET_NR_mmap
10084     case TARGET_NR_mmap:
10085 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10086     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10087     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10088     || defined(TARGET_S390X)
10089         {
10090             abi_ulong *v;
10091             abi_ulong v1, v2, v3, v4, v5, v6;
10092             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10093                 return -TARGET_EFAULT;
10094             v1 = tswapal(v[0]);
10095             v2 = tswapal(v[1]);
10096             v3 = tswapal(v[2]);
10097             v4 = tswapal(v[3]);
10098             v5 = tswapal(v[4]);
10099             v6 = tswapal(v[5]);
10100             unlock_user(v, arg1, 0);
10101             ret = get_errno(target_mmap(v1, v2, v3,
10102                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10103                                         v5, v6));
10104         }
10105 #else
10106         /* mmap pointers are always untagged */
10107         ret = get_errno(target_mmap(arg1, arg2, arg3,
10108                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10109                                     arg5,
10110                                     arg6));
10111 #endif
10112         return ret;
10113 #endif
10114 #ifdef TARGET_NR_mmap2
10115     case TARGET_NR_mmap2:
10116 #ifndef MMAP_SHIFT
10117 #define MMAP_SHIFT 12
10118 #endif
10119         ret = target_mmap(arg1, arg2, arg3,
10120                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10121                           arg5, arg6 << MMAP_SHIFT);
10122         return get_errno(ret);
10123 #endif
10124     case TARGET_NR_munmap:
10125         arg1 = cpu_untagged_addr(cpu, arg1);
10126         return get_errno(target_munmap(arg1, arg2));
10127     case TARGET_NR_mprotect:
10128         arg1 = cpu_untagged_addr(cpu, arg1);
10129         {
10130             TaskState *ts = cpu->opaque;
10131             /* Special hack to detect libc making the stack executable.  */
10132             if ((arg3 & PROT_GROWSDOWN)
10133                 && arg1 >= ts->info->stack_limit
10134                 && arg1 <= ts->info->start_stack) {
10135                 arg3 &= ~PROT_GROWSDOWN;
10136                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10137                 arg1 = ts->info->stack_limit;
10138             }
10139         }
10140         return get_errno(target_mprotect(arg1, arg2, arg3));
10141 #ifdef TARGET_NR_mremap
10142     case TARGET_NR_mremap:
10143         arg1 = cpu_untagged_addr(cpu, arg1);
10144         /* mremap new_addr (arg5) is always untagged */
10145         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10146 #endif
10147         /* ??? msync/mlock/munlock are broken for softmmu.  */
10148 #ifdef TARGET_NR_msync
10149     case TARGET_NR_msync:
10150         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10151 #endif
10152 #ifdef TARGET_NR_mlock
10153     case TARGET_NR_mlock:
10154         return get_errno(mlock(g2h(cpu, arg1), arg2));
10155 #endif
10156 #ifdef TARGET_NR_munlock
10157     case TARGET_NR_munlock:
10158         return get_errno(munlock(g2h(cpu, arg1), arg2));
10159 #endif
10160 #ifdef TARGET_NR_mlockall
10161     case TARGET_NR_mlockall:
10162         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10163 #endif
10164 #ifdef TARGET_NR_munlockall
10165     case TARGET_NR_munlockall:
10166         return get_errno(munlockall());
10167 #endif
10168 #ifdef TARGET_NR_truncate
10169     case TARGET_NR_truncate:
10170         if (!(p = lock_user_string(arg1)))
10171             return -TARGET_EFAULT;
10172         ret = get_errno(truncate(p, arg2));
10173         unlock_user(p, arg1, 0);
10174         return ret;
10175 #endif
10176 #ifdef TARGET_NR_ftruncate
10177     case TARGET_NR_ftruncate:
10178         return get_errno(ftruncate(arg1, arg2));
10179 #endif
10180     case TARGET_NR_fchmod:
10181         return get_errno(fchmod(arg1, arg2));
10182 #if defined(TARGET_NR_fchmodat)
10183     case TARGET_NR_fchmodat:
10184         if (!(p = lock_user_string(arg2)))
10185             return -TARGET_EFAULT;
10186         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10187         unlock_user(p, arg2, 0);
10188         return ret;
10189 #endif
10190     case TARGET_NR_getpriority:
10191         /* Note that negative values are valid for getpriority, so we must
10192            differentiate based on errno settings.  */
10193         errno = 0;
10194         ret = getpriority(arg1, arg2);
10195         if (ret == -1 && errno != 0) {
10196             return -host_to_target_errno(errno);
10197         }
10198 #ifdef TARGET_ALPHA
10199         /* Return value is the unbiased priority.  Signal no error.  */
10200         cpu_env->ir[IR_V0] = 0;
10201 #else
10202         /* Return value is a biased priority to avoid negative numbers.  */
10203         ret = 20 - ret;
10204 #endif
10205         return ret;
10206     case TARGET_NR_setpriority:
10207         return get_errno(setpriority(arg1, arg2, arg3));
10208 #ifdef TARGET_NR_statfs
10209     case TARGET_NR_statfs:
10210         if (!(p = lock_user_string(arg1))) {
10211             return -TARGET_EFAULT;
10212         }
10213         ret = get_errno(statfs(path(p), &stfs));
10214         unlock_user(p, arg1, 0);
10215     convert_statfs:
10216         if (!is_error(ret)) {
10217             struct target_statfs *target_stfs;
10218 
10219             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10220                 return -TARGET_EFAULT;
10221             __put_user(stfs.f_type, &target_stfs->f_type);
10222             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10223             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10224             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10225             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10226             __put_user(stfs.f_files, &target_stfs->f_files);
10227             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10228             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10229             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10230             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10231             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10232 #ifdef _STATFS_F_FLAGS
10233             __put_user(stfs.f_flags, &target_stfs->f_flags);
10234 #else
10235             __put_user(0, &target_stfs->f_flags);
10236 #endif
10237             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10238             unlock_user_struct(target_stfs, arg2, 1);
10239         }
10240         return ret;
10241 #endif
10242 #ifdef TARGET_NR_fstatfs
10243     case TARGET_NR_fstatfs:
10244         ret = get_errno(fstatfs(arg1, &stfs));
10245         goto convert_statfs;
10246 #endif
10247 #ifdef TARGET_NR_statfs64
10248     case TARGET_NR_statfs64:
10249         if (!(p = lock_user_string(arg1))) {
10250             return -TARGET_EFAULT;
10251         }
10252         ret = get_errno(statfs(path(p), &stfs));
10253         unlock_user(p, arg1, 0);
10254     convert_statfs64:
10255         if (!is_error(ret)) {
10256             struct target_statfs64 *target_stfs;
10257 
10258             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10259                 return -TARGET_EFAULT;
10260             __put_user(stfs.f_type, &target_stfs->f_type);
10261             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10262             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10263             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10264             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10265             __put_user(stfs.f_files, &target_stfs->f_files);
10266             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10267             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10268             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10269             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10270             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10271 #ifdef _STATFS_F_FLAGS
10272             __put_user(stfs.f_flags, &target_stfs->f_flags);
10273 #else
10274             __put_user(0, &target_stfs->f_flags);
10275 #endif
10276             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10277             unlock_user_struct(target_stfs, arg3, 1);
10278         }
10279         return ret;
10280     case TARGET_NR_fstatfs64:
10281         ret = get_errno(fstatfs(arg1, &stfs));
10282         goto convert_statfs64;
10283 #endif
10284 #ifdef TARGET_NR_socketcall
10285     case TARGET_NR_socketcall:
10286         return do_socketcall(arg1, arg2);
10287 #endif
10288 #ifdef TARGET_NR_accept
10289     case TARGET_NR_accept:
10290         return do_accept4(arg1, arg2, arg3, 0);
10291 #endif
10292 #ifdef TARGET_NR_accept4
10293     case TARGET_NR_accept4:
10294         return do_accept4(arg1, arg2, arg3, arg4);
10295 #endif
10296 #ifdef TARGET_NR_bind
10297     case TARGET_NR_bind:
10298         return do_bind(arg1, arg2, arg3);
10299 #endif
10300 #ifdef TARGET_NR_connect
10301     case TARGET_NR_connect:
10302         return do_connect(arg1, arg2, arg3);
10303 #endif
10304 #ifdef TARGET_NR_getpeername
10305     case TARGET_NR_getpeername:
10306         return do_getpeername(arg1, arg2, arg3);
10307 #endif
10308 #ifdef TARGET_NR_getsockname
10309     case TARGET_NR_getsockname:
10310         return do_getsockname(arg1, arg2, arg3);
10311 #endif
10312 #ifdef TARGET_NR_getsockopt
10313     case TARGET_NR_getsockopt:
10314         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10315 #endif
10316 #ifdef TARGET_NR_listen
10317     case TARGET_NR_listen:
10318         return get_errno(listen(arg1, arg2));
10319 #endif
10320 #ifdef TARGET_NR_recv
10321     case TARGET_NR_recv:
10322         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10323 #endif
10324 #ifdef TARGET_NR_recvfrom
10325     case TARGET_NR_recvfrom:
10326         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10327 #endif
10328 #ifdef TARGET_NR_recvmsg
10329     case TARGET_NR_recvmsg:
10330         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10331 #endif
10332 #ifdef TARGET_NR_send
10333     case TARGET_NR_send:
10334         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10335 #endif
10336 #ifdef TARGET_NR_sendmsg
10337     case TARGET_NR_sendmsg:
10338         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10339 #endif
10340 #ifdef TARGET_NR_sendmmsg
10341     case TARGET_NR_sendmmsg:
10342         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10343 #endif
10344 #ifdef TARGET_NR_recvmmsg
10345     case TARGET_NR_recvmmsg:
10346         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10347 #endif
10348 #ifdef TARGET_NR_sendto
10349     case TARGET_NR_sendto:
10350         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10351 #endif
10352 #ifdef TARGET_NR_shutdown
10353     case TARGET_NR_shutdown:
10354         return get_errno(shutdown(arg1, arg2));
10355 #endif
10356 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10357     case TARGET_NR_getrandom:
10358         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10359         if (!p) {
10360             return -TARGET_EFAULT;
10361         }
10362         ret = get_errno(getrandom(p, arg2, arg3));
10363         unlock_user(p, arg1, ret);
10364         return ret;
10365 #endif
10366 #ifdef TARGET_NR_socket
10367     case TARGET_NR_socket:
10368         return do_socket(arg1, arg2, arg3);
10369 #endif
10370 #ifdef TARGET_NR_socketpair
10371     case TARGET_NR_socketpair:
10372         return do_socketpair(arg1, arg2, arg3, arg4);
10373 #endif
10374 #ifdef TARGET_NR_setsockopt
10375     case TARGET_NR_setsockopt:
10376         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10377 #endif
10378 #if defined(TARGET_NR_syslog)
10379     case TARGET_NR_syslog:
10380         {
10381             int len = arg2;
10382 
10383             switch (arg1) {
10384             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10385             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10386             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10387             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10388             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10389             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10390             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10391             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10392                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10393             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10394             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10395             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10396                 {
10397                     if (len < 0) {
10398                         return -TARGET_EINVAL;
10399                     }
10400                     if (len == 0) {
10401                         return 0;
10402                     }
10403                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10404                     if (!p) {
10405                         return -TARGET_EFAULT;
10406                     }
10407                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10408                     unlock_user(p, arg2, arg3);
10409                 }
10410                 return ret;
10411             default:
10412                 return -TARGET_EINVAL;
10413             }
10414         }
10415         break;
10416 #endif
10417     case TARGET_NR_setitimer:
10418         {
10419             struct itimerval value, ovalue, *pvalue;
10420 
10421             if (arg2) {
10422                 pvalue = &value;
10423                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10424                     || copy_from_user_timeval(&pvalue->it_value,
10425                                               arg2 + sizeof(struct target_timeval)))
10426                     return -TARGET_EFAULT;
10427             } else {
10428                 pvalue = NULL;
10429             }
10430             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10431             if (!is_error(ret) && arg3) {
10432                 if (copy_to_user_timeval(arg3,
10433                                          &ovalue.it_interval)
10434                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10435                                             &ovalue.it_value))
10436                     return -TARGET_EFAULT;
10437             }
10438         }
10439         return ret;
10440     case TARGET_NR_getitimer:
10441         {
10442             struct itimerval value;
10443 
10444             ret = get_errno(getitimer(arg1, &value));
10445             if (!is_error(ret) && arg2) {
10446                 if (copy_to_user_timeval(arg2,
10447                                          &value.it_interval)
10448                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10449                                             &value.it_value))
10450                     return -TARGET_EFAULT;
10451             }
10452         }
10453         return ret;
10454 #ifdef TARGET_NR_stat
10455     case TARGET_NR_stat:
10456         if (!(p = lock_user_string(arg1))) {
10457             return -TARGET_EFAULT;
10458         }
10459         ret = get_errno(stat(path(p), &st));
10460         unlock_user(p, arg1, 0);
10461         goto do_stat;
10462 #endif
10463 #ifdef TARGET_NR_lstat
10464     case TARGET_NR_lstat:
10465         if (!(p = lock_user_string(arg1))) {
10466             return -TARGET_EFAULT;
10467         }
10468         ret = get_errno(lstat(path(p), &st));
10469         unlock_user(p, arg1, 0);
10470         goto do_stat;
10471 #endif
10472 #ifdef TARGET_NR_fstat
10473     case TARGET_NR_fstat:
10474         {
10475             ret = get_errno(fstat(arg1, &st));
10476 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10477         do_stat:
10478 #endif
10479             if (!is_error(ret)) {
10480                 struct target_stat *target_st;
10481 
10482                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10483                     return -TARGET_EFAULT;
10484                 memset(target_st, 0, sizeof(*target_st));
10485                 __put_user(st.st_dev, &target_st->st_dev);
10486                 __put_user(st.st_ino, &target_st->st_ino);
10487                 __put_user(st.st_mode, &target_st->st_mode);
10488                 __put_user(st.st_uid, &target_st->st_uid);
10489                 __put_user(st.st_gid, &target_st->st_gid);
10490                 __put_user(st.st_nlink, &target_st->st_nlink);
10491                 __put_user(st.st_rdev, &target_st->st_rdev);
10492                 __put_user(st.st_size, &target_st->st_size);
10493                 __put_user(st.st_blksize, &target_st->st_blksize);
10494                 __put_user(st.st_blocks, &target_st->st_blocks);
10495                 __put_user(st.st_atime, &target_st->target_st_atime);
10496                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10497                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10498 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10499                 __put_user(st.st_atim.tv_nsec,
10500                            &target_st->target_st_atime_nsec);
10501                 __put_user(st.st_mtim.tv_nsec,
10502                            &target_st->target_st_mtime_nsec);
10503                 __put_user(st.st_ctim.tv_nsec,
10504                            &target_st->target_st_ctime_nsec);
10505 #endif
10506                 unlock_user_struct(target_st, arg2, 1);
10507             }
10508         }
10509         return ret;
10510 #endif
10511     case TARGET_NR_vhangup:
10512         return get_errno(vhangup());
10513 #ifdef TARGET_NR_syscall
10514     case TARGET_NR_syscall:
10515         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10516                           arg6, arg7, arg8, 0);
10517 #endif
10518 #if defined(TARGET_NR_wait4)
10519     case TARGET_NR_wait4:
10520         {
10521             int status;
10522             abi_long status_ptr = arg2;
10523             struct rusage rusage, *rusage_ptr;
10524             abi_ulong target_rusage = arg4;
10525             abi_long rusage_err;
10526             if (target_rusage)
10527                 rusage_ptr = &rusage;
10528             else
10529                 rusage_ptr = NULL;
10530             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10531             if (!is_error(ret)) {
10532                 if (status_ptr && ret) {
10533                     status = host_to_target_waitstatus(status);
10534                     if (put_user_s32(status, status_ptr))
10535                         return -TARGET_EFAULT;
10536                 }
10537                 if (target_rusage) {
10538                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10539                     if (rusage_err) {
10540                         ret = rusage_err;
10541                     }
10542                 }
10543             }
10544         }
10545         return ret;
10546 #endif
10547 #ifdef TARGET_NR_swapoff
10548     case TARGET_NR_swapoff:
10549         if (!(p = lock_user_string(arg1)))
10550             return -TARGET_EFAULT;
10551         ret = get_errno(swapoff(p));
10552         unlock_user(p, arg1, 0);
10553         return ret;
10554 #endif
10555     case TARGET_NR_sysinfo:
10556         {
10557             struct target_sysinfo *target_value;
10558             struct sysinfo value;
10559             ret = get_errno(sysinfo(&value));
10560             if (!is_error(ret) && arg1)
10561             {
10562                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10563                     return -TARGET_EFAULT;
10564                 __put_user(value.uptime, &target_value->uptime);
10565                 __put_user(value.loads[0], &target_value->loads[0]);
10566                 __put_user(value.loads[1], &target_value->loads[1]);
10567                 __put_user(value.loads[2], &target_value->loads[2]);
10568                 __put_user(value.totalram, &target_value->totalram);
10569                 __put_user(value.freeram, &target_value->freeram);
10570                 __put_user(value.sharedram, &target_value->sharedram);
10571                 __put_user(value.bufferram, &target_value->bufferram);
10572                 __put_user(value.totalswap, &target_value->totalswap);
10573                 __put_user(value.freeswap, &target_value->freeswap);
10574                 __put_user(value.procs, &target_value->procs);
10575                 __put_user(value.totalhigh, &target_value->totalhigh);
10576                 __put_user(value.freehigh, &target_value->freehigh);
10577                 __put_user(value.mem_unit, &target_value->mem_unit);
10578                 unlock_user_struct(target_value, arg1, 1);
10579             }
10580         }
10581         return ret;
10582 #ifdef TARGET_NR_ipc
10583     case TARGET_NR_ipc:
10584         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10585 #endif
10586 #ifdef TARGET_NR_semget
10587     case TARGET_NR_semget:
10588         return get_errno(semget(arg1, arg2, arg3));
10589 #endif
10590 #ifdef TARGET_NR_semop
10591     case TARGET_NR_semop:
10592         return do_semtimedop(arg1, arg2, arg3, 0, false);
10593 #endif
10594 #ifdef TARGET_NR_semtimedop
10595     case TARGET_NR_semtimedop:
10596         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10597 #endif
10598 #ifdef TARGET_NR_semtimedop_time64
10599     case TARGET_NR_semtimedop_time64:
10600         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10601 #endif
10602 #ifdef TARGET_NR_semctl
10603     case TARGET_NR_semctl:
10604         return do_semctl(arg1, arg2, arg3, arg4);
10605 #endif
10606 #ifdef TARGET_NR_msgctl
10607     case TARGET_NR_msgctl:
10608         return do_msgctl(arg1, arg2, arg3);
10609 #endif
10610 #ifdef TARGET_NR_msgget
10611     case TARGET_NR_msgget:
10612         return get_errno(msgget(arg1, arg2));
10613 #endif
10614 #ifdef TARGET_NR_msgrcv
10615     case TARGET_NR_msgrcv:
10616         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10617 #endif
10618 #ifdef TARGET_NR_msgsnd
10619     case TARGET_NR_msgsnd:
10620         return do_msgsnd(arg1, arg2, arg3, arg4);
10621 #endif
10622 #ifdef TARGET_NR_shmget
10623     case TARGET_NR_shmget:
10624         return get_errno(shmget(arg1, arg2, arg3));
10625 #endif
10626 #ifdef TARGET_NR_shmctl
10627     case TARGET_NR_shmctl:
10628         return do_shmctl(arg1, arg2, arg3);
10629 #endif
10630 #ifdef TARGET_NR_shmat
10631     case TARGET_NR_shmat:
10632         return do_shmat(cpu_env, arg1, arg2, arg3);
10633 #endif
10634 #ifdef TARGET_NR_shmdt
10635     case TARGET_NR_shmdt:
10636         return do_shmdt(arg1);
10637 #endif
10638     case TARGET_NR_fsync:
10639         return get_errno(fsync(arg1));
10640     case TARGET_NR_clone:
10641         /* Linux manages to have three different orderings for its
10642          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10643          * match the kernel's CONFIG_CLONE_* settings.
10644          * Microblaze is further special in that it uses a sixth
10645          * implicit argument to clone for the TLS pointer.
10646          */
10647 #if defined(TARGET_MICROBLAZE)
10648         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10649 #elif defined(TARGET_CLONE_BACKWARDS)
10650         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10651 #elif defined(TARGET_CLONE_BACKWARDS2)
10652         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10653 #else
10654         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10655 #endif
10656         return ret;
10657 #ifdef __NR_exit_group
10658         /* new thread calls */
10659     case TARGET_NR_exit_group:
10660         preexit_cleanup(cpu_env, arg1);
10661         return get_errno(exit_group(arg1));
10662 #endif
10663     case TARGET_NR_setdomainname:
10664         if (!(p = lock_user_string(arg1)))
10665             return -TARGET_EFAULT;
10666         ret = get_errno(setdomainname(p, arg2));
10667         unlock_user(p, arg1, 0);
10668         return ret;
10669     case TARGET_NR_uname:
10670         /* no need to transcode because we use the linux syscall */
10671         {
10672             struct new_utsname * buf;
10673 
10674             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10675                 return -TARGET_EFAULT;
10676             ret = get_errno(sys_uname(buf));
10677             if (!is_error(ret)) {
10678                 /* Overwrite the native machine name with whatever is being
10679                    emulated. */
10680                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10681                           sizeof(buf->machine));
10682                 /* Allow the user to override the reported release.  */
10683                 if (qemu_uname_release && *qemu_uname_release) {
10684                     g_strlcpy(buf->release, qemu_uname_release,
10685                               sizeof(buf->release));
10686                 }
10687             }
10688             unlock_user_struct(buf, arg1, 1);
10689         }
10690         return ret;
10691 #ifdef TARGET_I386
10692     case TARGET_NR_modify_ldt:
10693         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10694 #if !defined(TARGET_X86_64)
10695     case TARGET_NR_vm86:
10696         return do_vm86(cpu_env, arg1, arg2);
10697 #endif
10698 #endif
10699 #if defined(TARGET_NR_adjtimex)
10700     case TARGET_NR_adjtimex:
10701         {
10702             struct timex host_buf;
10703 
10704             if (target_to_host_timex(&host_buf, arg1) != 0) {
10705                 return -TARGET_EFAULT;
10706             }
10707             ret = get_errno(adjtimex(&host_buf));
10708             if (!is_error(ret)) {
10709                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10710                     return -TARGET_EFAULT;
10711                 }
10712             }
10713         }
10714         return ret;
10715 #endif
10716 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10717     case TARGET_NR_clock_adjtime:
10718         {
10719             struct timex htx, *phtx = &htx;
10720 
10721             if (target_to_host_timex(phtx, arg2) != 0) {
10722                 return -TARGET_EFAULT;
10723             }
10724             ret = get_errno(clock_adjtime(arg1, phtx));
10725             if (!is_error(ret) && phtx) {
10726                 if (host_to_target_timex(arg2, phtx) != 0) {
10727                     return -TARGET_EFAULT;
10728                 }
10729             }
10730         }
10731         return ret;
10732 #endif
10733 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10734     case TARGET_NR_clock_adjtime64:
10735         {
10736             struct timex htx;
10737 
10738             if (target_to_host_timex64(&htx, arg2) != 0) {
10739                 return -TARGET_EFAULT;
10740             }
10741             ret = get_errno(clock_adjtime(arg1, &htx));
10742             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10743                     return -TARGET_EFAULT;
10744             }
10745         }
10746         return ret;
10747 #endif
10748     case TARGET_NR_getpgid:
10749         return get_errno(getpgid(arg1));
10750     case TARGET_NR_fchdir:
10751         return get_errno(fchdir(arg1));
10752     case TARGET_NR_personality:
10753         return get_errno(personality(arg1));
10754 #ifdef TARGET_NR__llseek /* Not on alpha */
10755     case TARGET_NR__llseek:
10756         {
10757             int64_t res;
10758 #if !defined(__NR_llseek)
10759             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10760             if (res == -1) {
10761                 ret = get_errno(res);
10762             } else {
10763                 ret = 0;
10764             }
10765 #else
10766             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10767 #endif
10768             if ((ret == 0) && put_user_s64(res, arg4)) {
10769                 return -TARGET_EFAULT;
10770             }
10771         }
10772         return ret;
10773 #endif
10774 #ifdef TARGET_NR_getdents
10775     case TARGET_NR_getdents:
10776         return do_getdents(arg1, arg2, arg3);
10777 #endif /* TARGET_NR_getdents */
10778 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10779     case TARGET_NR_getdents64:
10780         return do_getdents64(arg1, arg2, arg3);
10781 #endif /* TARGET_NR_getdents64 */
10782 #if defined(TARGET_NR__newselect)
10783     case TARGET_NR__newselect:
10784         return do_select(arg1, arg2, arg3, arg4, arg5);
10785 #endif
10786 #ifdef TARGET_NR_poll
10787     case TARGET_NR_poll:
10788         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10789 #endif
10790 #ifdef TARGET_NR_ppoll
10791     case TARGET_NR_ppoll:
10792         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10793 #endif
10794 #ifdef TARGET_NR_ppoll_time64
10795     case TARGET_NR_ppoll_time64:
10796         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10797 #endif
10798     case TARGET_NR_flock:
10799         /* NOTE: the flock constant seems to be the same for every
10800            Linux platform */
10801         return get_errno(safe_flock(arg1, arg2));
10802     case TARGET_NR_readv:
10803         {
10804             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10805             if (vec != NULL) {
10806                 ret = get_errno(safe_readv(arg1, vec, arg3));
10807                 unlock_iovec(vec, arg2, arg3, 1);
10808             } else {
10809                 ret = -host_to_target_errno(errno);
10810             }
10811         }
10812         return ret;
10813     case TARGET_NR_writev:
10814         {
10815             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10816             if (vec != NULL) {
10817                 ret = get_errno(safe_writev(arg1, vec, arg3));
10818                 unlock_iovec(vec, arg2, arg3, 0);
10819             } else {
10820                 ret = -host_to_target_errno(errno);
10821             }
10822         }
10823         return ret;
10824 #if defined(TARGET_NR_preadv)
10825     case TARGET_NR_preadv:
10826         {
10827             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10828             if (vec != NULL) {
10829                 unsigned long low, high;
10830 
10831                 target_to_host_low_high(arg4, arg5, &low, &high);
10832                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10833                 unlock_iovec(vec, arg2, arg3, 1);
10834             } else {
10835                 ret = -host_to_target_errno(errno);
10836            }
10837         }
10838         return ret;
10839 #endif
10840 #if defined(TARGET_NR_pwritev)
10841     case TARGET_NR_pwritev:
10842         {
10843             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10844             if (vec != NULL) {
10845                 unsigned long low, high;
10846 
10847                 target_to_host_low_high(arg4, arg5, &low, &high);
10848                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10849                 unlock_iovec(vec, arg2, arg3, 0);
10850             } else {
10851                 ret = -host_to_target_errno(errno);
10852            }
10853         }
10854         return ret;
10855 #endif
10856     case TARGET_NR_getsid:
10857         return get_errno(getsid(arg1));
10858 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10859     case TARGET_NR_fdatasync:
10860         return get_errno(fdatasync(arg1));
10861 #endif
10862     case TARGET_NR_sched_getaffinity:
10863         {
10864             unsigned int mask_size;
10865             unsigned long *mask;
10866 
10867             /*
10868              * sched_getaffinity needs multiples of ulong, so need to take
10869              * care of mismatches between target ulong and host ulong sizes.
10870              */
10871             if (arg2 & (sizeof(abi_ulong) - 1)) {
10872                 return -TARGET_EINVAL;
10873             }
10874             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10875 
10876             mask = alloca(mask_size);
10877             memset(mask, 0, mask_size);
10878             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10879 
10880             if (!is_error(ret)) {
10881                 if (ret > arg2) {
10882                     /* More data returned than the caller's buffer will fit.
10883                      * This only happens if sizeof(abi_long) < sizeof(long)
10884                      * and the caller passed us a buffer holding an odd number
10885                      * of abi_longs. If the host kernel is actually using the
10886                      * extra 4 bytes then fail EINVAL; otherwise we can just
10887                      * ignore them and only copy the interesting part.
10888                      */
10889                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10890                     if (numcpus > arg2 * 8) {
10891                         return -TARGET_EINVAL;
10892                     }
10893                     ret = arg2;
10894                 }
10895 
10896                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10897                     return -TARGET_EFAULT;
10898                 }
10899             }
10900         }
10901         return ret;
10902     case TARGET_NR_sched_setaffinity:
10903         {
10904             unsigned int mask_size;
10905             unsigned long *mask;
10906 
10907             /*
10908              * sched_setaffinity needs multiples of ulong, so need to take
10909              * care of mismatches between target ulong and host ulong sizes.
10910              */
10911             if (arg2 & (sizeof(abi_ulong) - 1)) {
10912                 return -TARGET_EINVAL;
10913             }
10914             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10915             mask = alloca(mask_size);
10916 
10917             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10918             if (ret) {
10919                 return ret;
10920             }
10921 
10922             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10923         }
10924     case TARGET_NR_getcpu:
10925         {
10926             unsigned cpu, node;
10927             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10928                                        arg2 ? &node : NULL,
10929                                        NULL));
10930             if (is_error(ret)) {
10931                 return ret;
10932             }
10933             if (arg1 && put_user_u32(cpu, arg1)) {
10934                 return -TARGET_EFAULT;
10935             }
10936             if (arg2 && put_user_u32(node, arg2)) {
10937                 return -TARGET_EFAULT;
10938             }
10939         }
10940         return ret;
10941     case TARGET_NR_sched_setparam:
10942         {
10943             struct target_sched_param *target_schp;
10944             struct sched_param schp;
10945 
10946             if (arg2 == 0) {
10947                 return -TARGET_EINVAL;
10948             }
10949             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10950                 return -TARGET_EFAULT;
10951             }
10952             schp.sched_priority = tswap32(target_schp->sched_priority);
10953             unlock_user_struct(target_schp, arg2, 0);
10954             return get_errno(sys_sched_setparam(arg1, &schp));
10955         }
10956     case TARGET_NR_sched_getparam:
10957         {
10958             struct target_sched_param *target_schp;
10959             struct sched_param schp;
10960 
10961             if (arg2 == 0) {
10962                 return -TARGET_EINVAL;
10963             }
10964             ret = get_errno(sys_sched_getparam(arg1, &schp));
10965             if (!is_error(ret)) {
10966                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10967                     return -TARGET_EFAULT;
10968                 }
10969                 target_schp->sched_priority = tswap32(schp.sched_priority);
10970                 unlock_user_struct(target_schp, arg2, 1);
10971             }
10972         }
10973         return ret;
10974     case TARGET_NR_sched_setscheduler:
10975         {
10976             struct target_sched_param *target_schp;
10977             struct sched_param schp;
10978             if (arg3 == 0) {
10979                 return -TARGET_EINVAL;
10980             }
10981             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10982                 return -TARGET_EFAULT;
10983             }
10984             schp.sched_priority = tswap32(target_schp->sched_priority);
10985             unlock_user_struct(target_schp, arg3, 0);
10986             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10987         }
10988     case TARGET_NR_sched_getscheduler:
10989         return get_errno(sys_sched_getscheduler(arg1));
10990     case TARGET_NR_sched_getattr:
10991         {
10992             struct target_sched_attr *target_scha;
10993             struct sched_attr scha;
10994             if (arg2 == 0) {
10995                 return -TARGET_EINVAL;
10996             }
10997             if (arg3 > sizeof(scha)) {
10998                 arg3 = sizeof(scha);
10999             }
11000             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11001             if (!is_error(ret)) {
11002                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11003                 if (!target_scha) {
11004                     return -TARGET_EFAULT;
11005                 }
11006                 target_scha->size = tswap32(scha.size);
11007                 target_scha->sched_policy = tswap32(scha.sched_policy);
11008                 target_scha->sched_flags = tswap64(scha.sched_flags);
11009                 target_scha->sched_nice = tswap32(scha.sched_nice);
11010                 target_scha->sched_priority = tswap32(scha.sched_priority);
11011                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11012                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11013                 target_scha->sched_period = tswap64(scha.sched_period);
11014                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11015                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11016                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11017                 }
11018                 unlock_user(target_scha, arg2, arg3);
11019             }
11020             return ret;
11021         }
11022     case TARGET_NR_sched_setattr:
11023         {
11024             struct target_sched_attr *target_scha;
11025             struct sched_attr scha;
11026             uint32_t size;
11027             int zeroed;
11028             if (arg2 == 0) {
11029                 return -TARGET_EINVAL;
11030             }
11031             if (get_user_u32(size, arg2)) {
11032                 return -TARGET_EFAULT;
11033             }
11034             if (!size) {
11035                 size = offsetof(struct target_sched_attr, sched_util_min);
11036             }
11037             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11038                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11039                     return -TARGET_EFAULT;
11040                 }
11041                 return -TARGET_E2BIG;
11042             }
11043 
11044             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11045             if (zeroed < 0) {
11046                 return zeroed;
11047             } else if (zeroed == 0) {
11048                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11049                     return -TARGET_EFAULT;
11050                 }
11051                 return -TARGET_E2BIG;
11052             }
11053             if (size > sizeof(struct target_sched_attr)) {
11054                 size = sizeof(struct target_sched_attr);
11055             }
11056 
11057             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11058             if (!target_scha) {
11059                 return -TARGET_EFAULT;
11060             }
11061             scha.size = size;
11062             scha.sched_policy = tswap32(target_scha->sched_policy);
11063             scha.sched_flags = tswap64(target_scha->sched_flags);
11064             scha.sched_nice = tswap32(target_scha->sched_nice);
11065             scha.sched_priority = tswap32(target_scha->sched_priority);
11066             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11067             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11068             scha.sched_period = tswap64(target_scha->sched_period);
11069             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11070                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11071                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11072             }
11073             unlock_user(target_scha, arg2, 0);
11074             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11075         }
11076     case TARGET_NR_sched_yield:
11077         return get_errno(sched_yield());
11078     case TARGET_NR_sched_get_priority_max:
11079         return get_errno(sched_get_priority_max(arg1));
11080     case TARGET_NR_sched_get_priority_min:
11081         return get_errno(sched_get_priority_min(arg1));
11082 #ifdef TARGET_NR_sched_rr_get_interval
11083     case TARGET_NR_sched_rr_get_interval:
11084         {
11085             struct timespec ts;
11086             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11087             if (!is_error(ret)) {
11088                 ret = host_to_target_timespec(arg2, &ts);
11089             }
11090         }
11091         return ret;
11092 #endif
11093 #ifdef TARGET_NR_sched_rr_get_interval_time64
11094     case TARGET_NR_sched_rr_get_interval_time64:
11095         {
11096             struct timespec ts;
11097             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11098             if (!is_error(ret)) {
11099                 ret = host_to_target_timespec64(arg2, &ts);
11100             }
11101         }
11102         return ret;
11103 #endif
11104 #if defined(TARGET_NR_nanosleep)
11105     case TARGET_NR_nanosleep:
11106         {
11107             struct timespec req, rem;
11108             target_to_host_timespec(&req, arg1);
11109             ret = get_errno(safe_nanosleep(&req, &rem));
11110             if (is_error(ret) && arg2) {
11111                 host_to_target_timespec(arg2, &rem);
11112             }
11113         }
11114         return ret;
11115 #endif
11116     case TARGET_NR_prctl:
11117         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11118         break;
11119 #ifdef TARGET_NR_arch_prctl
11120     case TARGET_NR_arch_prctl:
11121         return do_arch_prctl(cpu_env, arg1, arg2);
11122 #endif
11123 #ifdef TARGET_NR_pread64
11124     case TARGET_NR_pread64:
11125         if (regpairs_aligned(cpu_env, num)) {
11126             arg4 = arg5;
11127             arg5 = arg6;
11128         }
11129         if (arg2 == 0 && arg3 == 0) {
11130             /* Special-case NULL buffer and zero length, which should succeed */
11131             p = 0;
11132         } else {
11133             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11134             if (!p) {
11135                 return -TARGET_EFAULT;
11136             }
11137         }
11138         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11139         unlock_user(p, arg2, ret);
11140         return ret;
11141     case TARGET_NR_pwrite64:
11142         if (regpairs_aligned(cpu_env, num)) {
11143             arg4 = arg5;
11144             arg5 = arg6;
11145         }
11146         if (arg2 == 0 && arg3 == 0) {
11147             /* Special-case NULL buffer and zero length, which should succeed */
11148             p = 0;
11149         } else {
11150             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11151             if (!p) {
11152                 return -TARGET_EFAULT;
11153             }
11154         }
11155         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11156         unlock_user(p, arg2, 0);
11157         return ret;
11158 #endif
11159     case TARGET_NR_getcwd:
11160         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11161             return -TARGET_EFAULT;
11162         ret = get_errno(sys_getcwd1(p, arg2));
11163         unlock_user(p, arg1, ret);
11164         return ret;
11165     case TARGET_NR_capget:
11166     case TARGET_NR_capset:
11167     {
11168         struct target_user_cap_header *target_header;
11169         struct target_user_cap_data *target_data = NULL;
11170         struct __user_cap_header_struct header;
11171         struct __user_cap_data_struct data[2];
11172         struct __user_cap_data_struct *dataptr = NULL;
11173         int i, target_datalen;
11174         int data_items = 1;
11175 
11176         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11177             return -TARGET_EFAULT;
11178         }
11179         header.version = tswap32(target_header->version);
11180         header.pid = tswap32(target_header->pid);
11181 
11182         if (header.version != _LINUX_CAPABILITY_VERSION) {
11183             /* Version 2 and up takes pointer to two user_data structs */
11184             data_items = 2;
11185         }
11186 
11187         target_datalen = sizeof(*target_data) * data_items;
11188 
11189         if (arg2) {
11190             if (num == TARGET_NR_capget) {
11191                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11192             } else {
11193                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11194             }
11195             if (!target_data) {
11196                 unlock_user_struct(target_header, arg1, 0);
11197                 return -TARGET_EFAULT;
11198             }
11199 
11200             if (num == TARGET_NR_capset) {
11201                 for (i = 0; i < data_items; i++) {
11202                     data[i].effective = tswap32(target_data[i].effective);
11203                     data[i].permitted = tswap32(target_data[i].permitted);
11204                     data[i].inheritable = tswap32(target_data[i].inheritable);
11205                 }
11206             }
11207 
11208             dataptr = data;
11209         }
11210 
11211         if (num == TARGET_NR_capget) {
11212             ret = get_errno(capget(&header, dataptr));
11213         } else {
11214             ret = get_errno(capset(&header, dataptr));
11215         }
11216 
11217         /* The kernel always updates version for both capget and capset */
11218         target_header->version = tswap32(header.version);
11219         unlock_user_struct(target_header, arg1, 1);
11220 
11221         if (arg2) {
11222             if (num == TARGET_NR_capget) {
11223                 for (i = 0; i < data_items; i++) {
11224                     target_data[i].effective = tswap32(data[i].effective);
11225                     target_data[i].permitted = tswap32(data[i].permitted);
11226                     target_data[i].inheritable = tswap32(data[i].inheritable);
11227                 }
11228                 unlock_user(target_data, arg2, target_datalen);
11229             } else {
11230                 unlock_user(target_data, arg2, 0);
11231             }
11232         }
11233         return ret;
11234     }
11235     case TARGET_NR_sigaltstack:
11236         return do_sigaltstack(arg1, arg2, cpu_env);
11237 
11238 #ifdef CONFIG_SENDFILE
11239 #ifdef TARGET_NR_sendfile
11240     case TARGET_NR_sendfile:
11241     {
11242         off_t *offp = NULL;
11243         off_t off;
11244         if (arg3) {
11245             ret = get_user_sal(off, arg3);
11246             if (is_error(ret)) {
11247                 return ret;
11248             }
11249             offp = &off;
11250         }
11251         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11252         if (!is_error(ret) && arg3) {
11253             abi_long ret2 = put_user_sal(off, arg3);
11254             if (is_error(ret2)) {
11255                 ret = ret2;
11256             }
11257         }
11258         return ret;
11259     }
11260 #endif
11261 #ifdef TARGET_NR_sendfile64
11262     case TARGET_NR_sendfile64:
11263     {
11264         off_t *offp = NULL;
11265         off_t off;
11266         if (arg3) {
11267             ret = get_user_s64(off, arg3);
11268             if (is_error(ret)) {
11269                 return ret;
11270             }
11271             offp = &off;
11272         }
11273         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11274         if (!is_error(ret) && arg3) {
11275             abi_long ret2 = put_user_s64(off, arg3);
11276             if (is_error(ret2)) {
11277                 ret = ret2;
11278             }
11279         }
11280         return ret;
11281     }
11282 #endif
11283 #endif
11284 #ifdef TARGET_NR_vfork
11285     case TARGET_NR_vfork:
11286         return get_errno(do_fork(cpu_env,
11287                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11288                          0, 0, 0, 0));
11289 #endif
11290 #ifdef TARGET_NR_ugetrlimit
11291     case TARGET_NR_ugetrlimit:
11292     {
11293 	struct rlimit rlim;
11294 	int resource = target_to_host_resource(arg1);
11295 	ret = get_errno(getrlimit(resource, &rlim));
11296 	if (!is_error(ret)) {
11297 	    struct target_rlimit *target_rlim;
11298             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11299                 return -TARGET_EFAULT;
11300 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11301 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11302             unlock_user_struct(target_rlim, arg2, 1);
11303 	}
11304         return ret;
11305     }
11306 #endif
11307 #ifdef TARGET_NR_truncate64
11308     case TARGET_NR_truncate64:
11309         if (!(p = lock_user_string(arg1)))
11310             return -TARGET_EFAULT;
11311 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11312         unlock_user(p, arg1, 0);
11313         return ret;
11314 #endif
11315 #ifdef TARGET_NR_ftruncate64
11316     case TARGET_NR_ftruncate64:
11317         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11318 #endif
11319 #ifdef TARGET_NR_stat64
11320     case TARGET_NR_stat64:
11321         if (!(p = lock_user_string(arg1))) {
11322             return -TARGET_EFAULT;
11323         }
11324         ret = get_errno(stat(path(p), &st));
11325         unlock_user(p, arg1, 0);
11326         if (!is_error(ret))
11327             ret = host_to_target_stat64(cpu_env, arg2, &st);
11328         return ret;
11329 #endif
11330 #ifdef TARGET_NR_lstat64
11331     case TARGET_NR_lstat64:
11332         if (!(p = lock_user_string(arg1))) {
11333             return -TARGET_EFAULT;
11334         }
11335         ret = get_errno(lstat(path(p), &st));
11336         unlock_user(p, arg1, 0);
11337         if (!is_error(ret))
11338             ret = host_to_target_stat64(cpu_env, arg2, &st);
11339         return ret;
11340 #endif
11341 #ifdef TARGET_NR_fstat64
11342     case TARGET_NR_fstat64:
11343         ret = get_errno(fstat(arg1, &st));
11344         if (!is_error(ret))
11345             ret = host_to_target_stat64(cpu_env, arg2, &st);
11346         return ret;
11347 #endif
11348 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11349 #ifdef TARGET_NR_fstatat64
11350     case TARGET_NR_fstatat64:
11351 #endif
11352 #ifdef TARGET_NR_newfstatat
11353     case TARGET_NR_newfstatat:
11354 #endif
11355         if (!(p = lock_user_string(arg2))) {
11356             return -TARGET_EFAULT;
11357         }
11358         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11359         unlock_user(p, arg2, 0);
11360         if (!is_error(ret))
11361             ret = host_to_target_stat64(cpu_env, arg3, &st);
11362         return ret;
11363 #endif
11364 #if defined(TARGET_NR_statx)
11365     case TARGET_NR_statx:
11366         {
11367             struct target_statx *target_stx;
11368             int dirfd = arg1;
11369             int flags = arg3;
11370 
11371             p = lock_user_string(arg2);
11372             if (p == NULL) {
11373                 return -TARGET_EFAULT;
11374             }
11375 #if defined(__NR_statx)
11376             {
11377                 /*
11378                  * It is assumed that struct statx is architecture independent.
11379                  */
11380                 struct target_statx host_stx;
11381                 int mask = arg4;
11382 
11383                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11384                 if (!is_error(ret)) {
11385                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11386                         unlock_user(p, arg2, 0);
11387                         return -TARGET_EFAULT;
11388                     }
11389                 }
11390 
11391                 if (ret != -TARGET_ENOSYS) {
11392                     unlock_user(p, arg2, 0);
11393                     return ret;
11394                 }
11395             }
11396 #endif
11397             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11398             unlock_user(p, arg2, 0);
11399 
11400             if (!is_error(ret)) {
11401                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11402                     return -TARGET_EFAULT;
11403                 }
11404                 memset(target_stx, 0, sizeof(*target_stx));
11405                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11406                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11407                 __put_user(st.st_ino, &target_stx->stx_ino);
11408                 __put_user(st.st_mode, &target_stx->stx_mode);
11409                 __put_user(st.st_uid, &target_stx->stx_uid);
11410                 __put_user(st.st_gid, &target_stx->stx_gid);
11411                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11412                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11413                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11414                 __put_user(st.st_size, &target_stx->stx_size);
11415                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11416                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11417                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11418                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11419                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11420                 unlock_user_struct(target_stx, arg5, 1);
11421             }
11422         }
11423         return ret;
11424 #endif
11425 #ifdef TARGET_NR_lchown
11426     case TARGET_NR_lchown:
11427         if (!(p = lock_user_string(arg1)))
11428             return -TARGET_EFAULT;
11429         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11430         unlock_user(p, arg1, 0);
11431         return ret;
11432 #endif
11433 #ifdef TARGET_NR_getuid
11434     case TARGET_NR_getuid:
11435         return get_errno(high2lowuid(getuid()));
11436 #endif
11437 #ifdef TARGET_NR_getgid
11438     case TARGET_NR_getgid:
11439         return get_errno(high2lowgid(getgid()));
11440 #endif
11441 #ifdef TARGET_NR_geteuid
11442     case TARGET_NR_geteuid:
11443         return get_errno(high2lowuid(geteuid()));
11444 #endif
11445 #ifdef TARGET_NR_getegid
11446     case TARGET_NR_getegid:
11447         return get_errno(high2lowgid(getegid()));
11448 #endif
11449     case TARGET_NR_setreuid:
11450         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11451     case TARGET_NR_setregid:
11452         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11453     case TARGET_NR_getgroups:
11454         {
11455             int gidsetsize = arg1;
11456             target_id *target_grouplist;
11457             gid_t *grouplist;
11458             int i;
11459 
11460             grouplist = alloca(gidsetsize * sizeof(gid_t));
11461             ret = get_errno(getgroups(gidsetsize, grouplist));
11462             if (gidsetsize == 0)
11463                 return ret;
11464             if (!is_error(ret)) {
11465                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11466                 if (!target_grouplist)
11467                     return -TARGET_EFAULT;
11468                 for(i = 0;i < ret; i++)
11469                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11470                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11471             }
11472         }
11473         return ret;
11474     case TARGET_NR_setgroups:
11475         {
11476             int gidsetsize = arg1;
11477             target_id *target_grouplist;
11478             gid_t *grouplist = NULL;
11479             int i;
11480             if (gidsetsize) {
11481                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11482                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11483                 if (!target_grouplist) {
11484                     return -TARGET_EFAULT;
11485                 }
11486                 for (i = 0; i < gidsetsize; i++) {
11487                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11488                 }
11489                 unlock_user(target_grouplist, arg2, 0);
11490             }
11491             return get_errno(setgroups(gidsetsize, grouplist));
11492         }
11493     case TARGET_NR_fchown:
11494         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11495 #if defined(TARGET_NR_fchownat)
11496     case TARGET_NR_fchownat:
11497         if (!(p = lock_user_string(arg2)))
11498             return -TARGET_EFAULT;
11499         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11500                                  low2highgid(arg4), arg5));
11501         unlock_user(p, arg2, 0);
11502         return ret;
11503 #endif
11504 #ifdef TARGET_NR_setresuid
11505     case TARGET_NR_setresuid:
11506         return get_errno(sys_setresuid(low2highuid(arg1),
11507                                        low2highuid(arg2),
11508                                        low2highuid(arg3)));
11509 #endif
11510 #ifdef TARGET_NR_getresuid
11511     case TARGET_NR_getresuid:
11512         {
11513             uid_t ruid, euid, suid;
11514             ret = get_errno(getresuid(&ruid, &euid, &suid));
11515             if (!is_error(ret)) {
11516                 if (put_user_id(high2lowuid(ruid), arg1)
11517                     || put_user_id(high2lowuid(euid), arg2)
11518                     || put_user_id(high2lowuid(suid), arg3))
11519                     return -TARGET_EFAULT;
11520             }
11521         }
11522         return ret;
11523 #endif
11524 #ifdef TARGET_NR_getresgid
11525     case TARGET_NR_setresgid:
11526         return get_errno(sys_setresgid(low2highgid(arg1),
11527                                        low2highgid(arg2),
11528                                        low2highgid(arg3)));
11529 #endif
11530 #ifdef TARGET_NR_getresgid
11531     case TARGET_NR_getresgid:
11532         {
11533             gid_t rgid, egid, sgid;
11534             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11535             if (!is_error(ret)) {
11536                 if (put_user_id(high2lowgid(rgid), arg1)
11537                     || put_user_id(high2lowgid(egid), arg2)
11538                     || put_user_id(high2lowgid(sgid), arg3))
11539                     return -TARGET_EFAULT;
11540             }
11541         }
11542         return ret;
11543 #endif
11544 #ifdef TARGET_NR_chown
11545     case TARGET_NR_chown:
11546         if (!(p = lock_user_string(arg1)))
11547             return -TARGET_EFAULT;
11548         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11549         unlock_user(p, arg1, 0);
11550         return ret;
11551 #endif
11552     case TARGET_NR_setuid:
11553         return get_errno(sys_setuid(low2highuid(arg1)));
11554     case TARGET_NR_setgid:
11555         return get_errno(sys_setgid(low2highgid(arg1)));
11556     case TARGET_NR_setfsuid:
11557         return get_errno(setfsuid(arg1));
11558     case TARGET_NR_setfsgid:
11559         return get_errno(setfsgid(arg1));
11560 
11561 #ifdef TARGET_NR_lchown32
11562     case TARGET_NR_lchown32:
11563         if (!(p = lock_user_string(arg1)))
11564             return -TARGET_EFAULT;
11565         ret = get_errno(lchown(p, arg2, arg3));
11566         unlock_user(p, arg1, 0);
11567         return ret;
11568 #endif
11569 #ifdef TARGET_NR_getuid32
11570     case TARGET_NR_getuid32:
11571         return get_errno(getuid());
11572 #endif
11573 
11574 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11575    /* Alpha specific */
11576     case TARGET_NR_getxuid:
11577          {
11578             uid_t euid;
11579             euid=geteuid();
11580             cpu_env->ir[IR_A4]=euid;
11581          }
11582         return get_errno(getuid());
11583 #endif
11584 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11585    /* Alpha specific */
11586     case TARGET_NR_getxgid:
11587          {
11588             uid_t egid;
11589             egid=getegid();
11590             cpu_env->ir[IR_A4]=egid;
11591          }
11592         return get_errno(getgid());
11593 #endif
11594 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11595     /* Alpha specific */
11596     case TARGET_NR_osf_getsysinfo:
11597         ret = -TARGET_EOPNOTSUPP;
11598         switch (arg1) {
11599           case TARGET_GSI_IEEE_FP_CONTROL:
11600             {
11601                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11602                 uint64_t swcr = cpu_env->swcr;
11603 
11604                 swcr &= ~SWCR_STATUS_MASK;
11605                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11606 
11607                 if (put_user_u64 (swcr, arg2))
11608                         return -TARGET_EFAULT;
11609                 ret = 0;
11610             }
11611             break;
11612 
11613           /* case GSI_IEEE_STATE_AT_SIGNAL:
11614              -- Not implemented in linux kernel.
11615              case GSI_UACPROC:
11616              -- Retrieves current unaligned access state; not much used.
11617              case GSI_PROC_TYPE:
11618              -- Retrieves implver information; surely not used.
11619              case GSI_GET_HWRPB:
11620              -- Grabs a copy of the HWRPB; surely not used.
11621           */
11622         }
11623         return ret;
11624 #endif
11625 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11626     /* Alpha specific */
11627     case TARGET_NR_osf_setsysinfo:
11628         ret = -TARGET_EOPNOTSUPP;
11629         switch (arg1) {
11630           case TARGET_SSI_IEEE_FP_CONTROL:
11631             {
11632                 uint64_t swcr, fpcr;
11633 
11634                 if (get_user_u64 (swcr, arg2)) {
11635                     return -TARGET_EFAULT;
11636                 }
11637 
11638                 /*
11639                  * The kernel calls swcr_update_status to update the
11640                  * status bits from the fpcr at every point that it
11641                  * could be queried.  Therefore, we store the status
11642                  * bits only in FPCR.
11643                  */
11644                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11645 
11646                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11647                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11648                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11649                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11650                 ret = 0;
11651             }
11652             break;
11653 
11654           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11655             {
11656                 uint64_t exc, fpcr, fex;
11657 
11658                 if (get_user_u64(exc, arg2)) {
11659                     return -TARGET_EFAULT;
11660                 }
11661                 exc &= SWCR_STATUS_MASK;
11662                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11663 
11664                 /* Old exceptions are not signaled.  */
11665                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11666                 fex = exc & ~fex;
11667                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11668                 fex &= (cpu_env)->swcr;
11669 
11670                 /* Update the hardware fpcr.  */
11671                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11672                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11673 
11674                 if (fex) {
11675                     int si_code = TARGET_FPE_FLTUNK;
11676                     target_siginfo_t info;
11677 
11678                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11679                         si_code = TARGET_FPE_FLTUND;
11680                     }
11681                     if (fex & SWCR_TRAP_ENABLE_INE) {
11682                         si_code = TARGET_FPE_FLTRES;
11683                     }
11684                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11685                         si_code = TARGET_FPE_FLTUND;
11686                     }
11687                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11688                         si_code = TARGET_FPE_FLTOVF;
11689                     }
11690                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11691                         si_code = TARGET_FPE_FLTDIV;
11692                     }
11693                     if (fex & SWCR_TRAP_ENABLE_INV) {
11694                         si_code = TARGET_FPE_FLTINV;
11695                     }
11696 
11697                     info.si_signo = SIGFPE;
11698                     info.si_errno = 0;
11699                     info.si_code = si_code;
11700                     info._sifields._sigfault._addr = (cpu_env)->pc;
11701                     queue_signal(cpu_env, info.si_signo,
11702                                  QEMU_SI_FAULT, &info);
11703                 }
11704                 ret = 0;
11705             }
11706             break;
11707 
11708           /* case SSI_NVPAIRS:
11709              -- Used with SSIN_UACPROC to enable unaligned accesses.
11710              case SSI_IEEE_STATE_AT_SIGNAL:
11711              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11712              -- Not implemented in linux kernel
11713           */
11714         }
11715         return ret;
11716 #endif
11717 #ifdef TARGET_NR_osf_sigprocmask
11718     /* Alpha specific.  */
11719     case TARGET_NR_osf_sigprocmask:
11720         {
11721             abi_ulong mask;
11722             int how;
11723             sigset_t set, oldset;
11724 
11725             switch(arg1) {
11726             case TARGET_SIG_BLOCK:
11727                 how = SIG_BLOCK;
11728                 break;
11729             case TARGET_SIG_UNBLOCK:
11730                 how = SIG_UNBLOCK;
11731                 break;
11732             case TARGET_SIG_SETMASK:
11733                 how = SIG_SETMASK;
11734                 break;
11735             default:
11736                 return -TARGET_EINVAL;
11737             }
11738             mask = arg2;
11739             target_to_host_old_sigset(&set, &mask);
11740             ret = do_sigprocmask(how, &set, &oldset);
11741             if (!ret) {
11742                 host_to_target_old_sigset(&mask, &oldset);
11743                 ret = mask;
11744             }
11745         }
11746         return ret;
11747 #endif
11748 
11749 #ifdef TARGET_NR_getgid32
11750     case TARGET_NR_getgid32:
11751         return get_errno(getgid());
11752 #endif
11753 #ifdef TARGET_NR_geteuid32
11754     case TARGET_NR_geteuid32:
11755         return get_errno(geteuid());
11756 #endif
11757 #ifdef TARGET_NR_getegid32
11758     case TARGET_NR_getegid32:
11759         return get_errno(getegid());
11760 #endif
11761 #ifdef TARGET_NR_setreuid32
11762     case TARGET_NR_setreuid32:
11763         return get_errno(setreuid(arg1, arg2));
11764 #endif
11765 #ifdef TARGET_NR_setregid32
11766     case TARGET_NR_setregid32:
11767         return get_errno(setregid(arg1, arg2));
11768 #endif
11769 #ifdef TARGET_NR_getgroups32
11770     case TARGET_NR_getgroups32:
11771         {
11772             int gidsetsize = arg1;
11773             uint32_t *target_grouplist;
11774             gid_t *grouplist;
11775             int i;
11776 
11777             grouplist = alloca(gidsetsize * sizeof(gid_t));
11778             ret = get_errno(getgroups(gidsetsize, grouplist));
11779             if (gidsetsize == 0)
11780                 return ret;
11781             if (!is_error(ret)) {
11782                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11783                 if (!target_grouplist) {
11784                     return -TARGET_EFAULT;
11785                 }
11786                 for(i = 0;i < ret; i++)
11787                     target_grouplist[i] = tswap32(grouplist[i]);
11788                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11789             }
11790         }
11791         return ret;
11792 #endif
11793 #ifdef TARGET_NR_setgroups32
11794     case TARGET_NR_setgroups32:
11795         {
11796             int gidsetsize = arg1;
11797             uint32_t *target_grouplist;
11798             gid_t *grouplist;
11799             int i;
11800 
11801             grouplist = alloca(gidsetsize * sizeof(gid_t));
11802             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11803             if (!target_grouplist) {
11804                 return -TARGET_EFAULT;
11805             }
11806             for(i = 0;i < gidsetsize; i++)
11807                 grouplist[i] = tswap32(target_grouplist[i]);
11808             unlock_user(target_grouplist, arg2, 0);
11809             return get_errno(setgroups(gidsetsize, grouplist));
11810         }
11811 #endif
11812 #ifdef TARGET_NR_fchown32
11813     case TARGET_NR_fchown32:
11814         return get_errno(fchown(arg1, arg2, arg3));
11815 #endif
11816 #ifdef TARGET_NR_setresuid32
11817     case TARGET_NR_setresuid32:
11818         return get_errno(sys_setresuid(arg1, arg2, arg3));
11819 #endif
11820 #ifdef TARGET_NR_getresuid32
11821     case TARGET_NR_getresuid32:
11822         {
11823             uid_t ruid, euid, suid;
11824             ret = get_errno(getresuid(&ruid, &euid, &suid));
11825             if (!is_error(ret)) {
11826                 if (put_user_u32(ruid, arg1)
11827                     || put_user_u32(euid, arg2)
11828                     || put_user_u32(suid, arg3))
11829                     return -TARGET_EFAULT;
11830             }
11831         }
11832         return ret;
11833 #endif
11834 #ifdef TARGET_NR_setresgid32
11835     case TARGET_NR_setresgid32:
11836         return get_errno(sys_setresgid(arg1, arg2, arg3));
11837 #endif
11838 #ifdef TARGET_NR_getresgid32
11839     case TARGET_NR_getresgid32:
11840         {
11841             gid_t rgid, egid, sgid;
11842             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11843             if (!is_error(ret)) {
11844                 if (put_user_u32(rgid, arg1)
11845                     || put_user_u32(egid, arg2)
11846                     || put_user_u32(sgid, arg3))
11847                     return -TARGET_EFAULT;
11848             }
11849         }
11850         return ret;
11851 #endif
11852 #ifdef TARGET_NR_chown32
11853     case TARGET_NR_chown32:
11854         if (!(p = lock_user_string(arg1)))
11855             return -TARGET_EFAULT;
11856         ret = get_errno(chown(p, arg2, arg3));
11857         unlock_user(p, arg1, 0);
11858         return ret;
11859 #endif
11860 #ifdef TARGET_NR_setuid32
11861     case TARGET_NR_setuid32:
11862         return get_errno(sys_setuid(arg1));
11863 #endif
11864 #ifdef TARGET_NR_setgid32
11865     case TARGET_NR_setgid32:
11866         return get_errno(sys_setgid(arg1));
11867 #endif
11868 #ifdef TARGET_NR_setfsuid32
11869     case TARGET_NR_setfsuid32:
11870         return get_errno(setfsuid(arg1));
11871 #endif
11872 #ifdef TARGET_NR_setfsgid32
11873     case TARGET_NR_setfsgid32:
11874         return get_errno(setfsgid(arg1));
11875 #endif
11876 #ifdef TARGET_NR_mincore
11877     case TARGET_NR_mincore:
11878         {
11879             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11880             if (!a) {
11881                 return -TARGET_ENOMEM;
11882             }
11883             p = lock_user_string(arg3);
11884             if (!p) {
11885                 ret = -TARGET_EFAULT;
11886             } else {
11887                 ret = get_errno(mincore(a, arg2, p));
11888                 unlock_user(p, arg3, ret);
11889             }
11890             unlock_user(a, arg1, 0);
11891         }
11892         return ret;
11893 #endif
11894 #ifdef TARGET_NR_arm_fadvise64_64
11895     case TARGET_NR_arm_fadvise64_64:
11896         /* arm_fadvise64_64 looks like fadvise64_64 but
11897          * with different argument order: fd, advice, offset, len
11898          * rather than the usual fd, offset, len, advice.
11899          * Note that offset and len are both 64-bit so appear as
11900          * pairs of 32-bit registers.
11901          */
11902         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11903                             target_offset64(arg5, arg6), arg2);
11904         return -host_to_target_errno(ret);
11905 #endif
11906 
11907 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11908 
11909 #ifdef TARGET_NR_fadvise64_64
11910     case TARGET_NR_fadvise64_64:
11911 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11912         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11913         ret = arg2;
11914         arg2 = arg3;
11915         arg3 = arg4;
11916         arg4 = arg5;
11917         arg5 = arg6;
11918         arg6 = ret;
11919 #else
11920         /* 6 args: fd, offset (high, low), len (high, low), advice */
11921         if (regpairs_aligned(cpu_env, num)) {
11922             /* offset is in (3,4), len in (5,6) and advice in 7 */
11923             arg2 = arg3;
11924             arg3 = arg4;
11925             arg4 = arg5;
11926             arg5 = arg6;
11927             arg6 = arg7;
11928         }
11929 #endif
11930         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11931                             target_offset64(arg4, arg5), arg6);
11932         return -host_to_target_errno(ret);
11933 #endif
11934 
11935 #ifdef TARGET_NR_fadvise64
11936     case TARGET_NR_fadvise64:
11937         /* 5 args: fd, offset (high, low), len, advice */
11938         if (regpairs_aligned(cpu_env, num)) {
11939             /* offset is in (3,4), len in 5 and advice in 6 */
11940             arg2 = arg3;
11941             arg3 = arg4;
11942             arg4 = arg5;
11943             arg5 = arg6;
11944         }
11945         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11946         return -host_to_target_errno(ret);
11947 #endif
11948 
11949 #else /* not a 32-bit ABI */
11950 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11951 #ifdef TARGET_NR_fadvise64_64
11952     case TARGET_NR_fadvise64_64:
11953 #endif
11954 #ifdef TARGET_NR_fadvise64
11955     case TARGET_NR_fadvise64:
11956 #endif
11957 #ifdef TARGET_S390X
11958         switch (arg4) {
11959         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11960         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11961         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11962         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11963         default: break;
11964         }
11965 #endif
11966         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11967 #endif
11968 #endif /* end of 64-bit ABI fadvise handling */
11969 
11970 #ifdef TARGET_NR_madvise
11971     case TARGET_NR_madvise:
11972         return target_madvise(arg1, arg2, arg3);
11973 #endif
11974 #ifdef TARGET_NR_fcntl64
11975     case TARGET_NR_fcntl64:
11976     {
11977         int cmd;
11978         struct flock64 fl;
11979         from_flock64_fn *copyfrom = copy_from_user_flock64;
11980         to_flock64_fn *copyto = copy_to_user_flock64;
11981 
11982 #ifdef TARGET_ARM
11983         if (!cpu_env->eabi) {
11984             copyfrom = copy_from_user_oabi_flock64;
11985             copyto = copy_to_user_oabi_flock64;
11986         }
11987 #endif
11988 
11989         cmd = target_to_host_fcntl_cmd(arg2);
11990         if (cmd == -TARGET_EINVAL) {
11991             return cmd;
11992         }
11993 
11994         switch(arg2) {
11995         case TARGET_F_GETLK64:
11996             ret = copyfrom(&fl, arg3);
11997             if (ret) {
11998                 break;
11999             }
12000             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12001             if (ret == 0) {
12002                 ret = copyto(arg3, &fl);
12003             }
12004 	    break;
12005 
12006         case TARGET_F_SETLK64:
12007         case TARGET_F_SETLKW64:
12008             ret = copyfrom(&fl, arg3);
12009             if (ret) {
12010                 break;
12011             }
12012             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12013 	    break;
12014         default:
12015             ret = do_fcntl(arg1, arg2, arg3);
12016             break;
12017         }
12018         return ret;
12019     }
12020 #endif
12021 #ifdef TARGET_NR_cacheflush
12022     case TARGET_NR_cacheflush:
12023         /* self-modifying code is handled automatically, so nothing needed */
12024         return 0;
12025 #endif
12026 #ifdef TARGET_NR_getpagesize
12027     case TARGET_NR_getpagesize:
12028         return TARGET_PAGE_SIZE;
12029 #endif
12030     case TARGET_NR_gettid:
12031         return get_errno(sys_gettid());
12032 #ifdef TARGET_NR_readahead
12033     case TARGET_NR_readahead:
12034 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12035         if (regpairs_aligned(cpu_env, num)) {
12036             arg2 = arg3;
12037             arg3 = arg4;
12038             arg4 = arg5;
12039         }
12040         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12041 #else
12042         ret = get_errno(readahead(arg1, arg2, arg3));
12043 #endif
12044         return ret;
12045 #endif
12046 #ifdef CONFIG_ATTR
12047 #ifdef TARGET_NR_setxattr
12048     case TARGET_NR_listxattr:
12049     case TARGET_NR_llistxattr:
12050     {
12051         void *p, *b = 0;
12052         if (arg2) {
12053             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12054             if (!b) {
12055                 return -TARGET_EFAULT;
12056             }
12057         }
12058         p = lock_user_string(arg1);
12059         if (p) {
12060             if (num == TARGET_NR_listxattr) {
12061                 ret = get_errno(listxattr(p, b, arg3));
12062             } else {
12063                 ret = get_errno(llistxattr(p, b, arg3));
12064             }
12065         } else {
12066             ret = -TARGET_EFAULT;
12067         }
12068         unlock_user(p, arg1, 0);
12069         unlock_user(b, arg2, arg3);
12070         return ret;
12071     }
12072     case TARGET_NR_flistxattr:
12073     {
12074         void *b = 0;
12075         if (arg2) {
12076             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12077             if (!b) {
12078                 return -TARGET_EFAULT;
12079             }
12080         }
12081         ret = get_errno(flistxattr(arg1, b, arg3));
12082         unlock_user(b, arg2, arg3);
12083         return ret;
12084     }
12085     case TARGET_NR_setxattr:
12086     case TARGET_NR_lsetxattr:
12087         {
12088             void *p, *n, *v = 0;
12089             if (arg3) {
12090                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12091                 if (!v) {
12092                     return -TARGET_EFAULT;
12093                 }
12094             }
12095             p = lock_user_string(arg1);
12096             n = lock_user_string(arg2);
12097             if (p && n) {
12098                 if (num == TARGET_NR_setxattr) {
12099                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12100                 } else {
12101                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12102                 }
12103             } else {
12104                 ret = -TARGET_EFAULT;
12105             }
12106             unlock_user(p, arg1, 0);
12107             unlock_user(n, arg2, 0);
12108             unlock_user(v, arg3, 0);
12109         }
12110         return ret;
12111     case TARGET_NR_fsetxattr:
12112         {
12113             void *n, *v = 0;
12114             if (arg3) {
12115                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12116                 if (!v) {
12117                     return -TARGET_EFAULT;
12118                 }
12119             }
12120             n = lock_user_string(arg2);
12121             if (n) {
12122                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12123             } else {
12124                 ret = -TARGET_EFAULT;
12125             }
12126             unlock_user(n, arg2, 0);
12127             unlock_user(v, arg3, 0);
12128         }
12129         return ret;
12130     case TARGET_NR_getxattr:
12131     case TARGET_NR_lgetxattr:
12132         {
12133             void *p, *n, *v = 0;
12134             if (arg3) {
12135                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12136                 if (!v) {
12137                     return -TARGET_EFAULT;
12138                 }
12139             }
12140             p = lock_user_string(arg1);
12141             n = lock_user_string(arg2);
12142             if (p && n) {
12143                 if (num == TARGET_NR_getxattr) {
12144                     ret = get_errno(getxattr(p, n, v, arg4));
12145                 } else {
12146                     ret = get_errno(lgetxattr(p, n, v, arg4));
12147                 }
12148             } else {
12149                 ret = -TARGET_EFAULT;
12150             }
12151             unlock_user(p, arg1, 0);
12152             unlock_user(n, arg2, 0);
12153             unlock_user(v, arg3, arg4);
12154         }
12155         return ret;
12156     case TARGET_NR_fgetxattr:
12157         {
12158             void *n, *v = 0;
12159             if (arg3) {
12160                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12161                 if (!v) {
12162                     return -TARGET_EFAULT;
12163                 }
12164             }
12165             n = lock_user_string(arg2);
12166             if (n) {
12167                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12168             } else {
12169                 ret = -TARGET_EFAULT;
12170             }
12171             unlock_user(n, arg2, 0);
12172             unlock_user(v, arg3, arg4);
12173         }
12174         return ret;
12175     case TARGET_NR_removexattr:
12176     case TARGET_NR_lremovexattr:
12177         {
12178             void *p, *n;
12179             p = lock_user_string(arg1);
12180             n = lock_user_string(arg2);
12181             if (p && n) {
12182                 if (num == TARGET_NR_removexattr) {
12183                     ret = get_errno(removexattr(p, n));
12184                 } else {
12185                     ret = get_errno(lremovexattr(p, n));
12186                 }
12187             } else {
12188                 ret = -TARGET_EFAULT;
12189             }
12190             unlock_user(p, arg1, 0);
12191             unlock_user(n, arg2, 0);
12192         }
12193         return ret;
12194     case TARGET_NR_fremovexattr:
12195         {
12196             void *n;
12197             n = lock_user_string(arg2);
12198             if (n) {
12199                 ret = get_errno(fremovexattr(arg1, n));
12200             } else {
12201                 ret = -TARGET_EFAULT;
12202             }
12203             unlock_user(n, arg2, 0);
12204         }
12205         return ret;
12206 #endif
12207 #endif /* CONFIG_ATTR */
12208 #ifdef TARGET_NR_set_thread_area
12209     case TARGET_NR_set_thread_area:
12210 #if defined(TARGET_MIPS)
12211       cpu_env->active_tc.CP0_UserLocal = arg1;
12212       return 0;
12213 #elif defined(TARGET_CRIS)
12214       if (arg1 & 0xff)
12215           ret = -TARGET_EINVAL;
12216       else {
12217           cpu_env->pregs[PR_PID] = arg1;
12218           ret = 0;
12219       }
12220       return ret;
12221 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12222       return do_set_thread_area(cpu_env, arg1);
12223 #elif defined(TARGET_M68K)
12224       {
12225           TaskState *ts = cpu->opaque;
12226           ts->tp_value = arg1;
12227           return 0;
12228       }
12229 #else
12230       return -TARGET_ENOSYS;
12231 #endif
12232 #endif
12233 #ifdef TARGET_NR_get_thread_area
12234     case TARGET_NR_get_thread_area:
12235 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12236         return do_get_thread_area(cpu_env, arg1);
12237 #elif defined(TARGET_M68K)
12238         {
12239             TaskState *ts = cpu->opaque;
12240             return ts->tp_value;
12241         }
12242 #else
12243         return -TARGET_ENOSYS;
12244 #endif
12245 #endif
12246 #ifdef TARGET_NR_getdomainname
12247     case TARGET_NR_getdomainname:
12248         return -TARGET_ENOSYS;
12249 #endif
12250 
12251 #ifdef TARGET_NR_clock_settime
12252     case TARGET_NR_clock_settime:
12253     {
12254         struct timespec ts;
12255 
12256         ret = target_to_host_timespec(&ts, arg2);
12257         if (!is_error(ret)) {
12258             ret = get_errno(clock_settime(arg1, &ts));
12259         }
12260         return ret;
12261     }
12262 #endif
12263 #ifdef TARGET_NR_clock_settime64
12264     case TARGET_NR_clock_settime64:
12265     {
12266         struct timespec ts;
12267 
12268         ret = target_to_host_timespec64(&ts, arg2);
12269         if (!is_error(ret)) {
12270             ret = get_errno(clock_settime(arg1, &ts));
12271         }
12272         return ret;
12273     }
12274 #endif
12275 #ifdef TARGET_NR_clock_gettime
12276     case TARGET_NR_clock_gettime:
12277     {
12278         struct timespec ts;
12279         ret = get_errno(clock_gettime(arg1, &ts));
12280         if (!is_error(ret)) {
12281             ret = host_to_target_timespec(arg2, &ts);
12282         }
12283         return ret;
12284     }
12285 #endif
12286 #ifdef TARGET_NR_clock_gettime64
12287     case TARGET_NR_clock_gettime64:
12288     {
12289         struct timespec ts;
12290         ret = get_errno(clock_gettime(arg1, &ts));
12291         if (!is_error(ret)) {
12292             ret = host_to_target_timespec64(arg2, &ts);
12293         }
12294         return ret;
12295     }
12296 #endif
12297 #ifdef TARGET_NR_clock_getres
12298     case TARGET_NR_clock_getres:
12299     {
12300         struct timespec ts;
12301         ret = get_errno(clock_getres(arg1, &ts));
12302         if (!is_error(ret)) {
12303             host_to_target_timespec(arg2, &ts);
12304         }
12305         return ret;
12306     }
12307 #endif
12308 #ifdef TARGET_NR_clock_getres_time64
12309     case TARGET_NR_clock_getres_time64:
12310     {
12311         struct timespec ts;
12312         ret = get_errno(clock_getres(arg1, &ts));
12313         if (!is_error(ret)) {
12314             host_to_target_timespec64(arg2, &ts);
12315         }
12316         return ret;
12317     }
12318 #endif
12319 #ifdef TARGET_NR_clock_nanosleep
12320     case TARGET_NR_clock_nanosleep:
12321     {
12322         struct timespec ts;
12323         if (target_to_host_timespec(&ts, arg3)) {
12324             return -TARGET_EFAULT;
12325         }
12326         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12327                                              &ts, arg4 ? &ts : NULL));
12328         /*
12329          * if the call is interrupted by a signal handler, it fails
12330          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12331          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12332          */
12333         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12334             host_to_target_timespec(arg4, &ts)) {
12335               return -TARGET_EFAULT;
12336         }
12337 
12338         return ret;
12339     }
12340 #endif
12341 #ifdef TARGET_NR_clock_nanosleep_time64
12342     case TARGET_NR_clock_nanosleep_time64:
12343     {
12344         struct timespec ts;
12345 
12346         if (target_to_host_timespec64(&ts, arg3)) {
12347             return -TARGET_EFAULT;
12348         }
12349 
12350         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12351                                              &ts, arg4 ? &ts : NULL));
12352 
12353         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12354             host_to_target_timespec64(arg4, &ts)) {
12355             return -TARGET_EFAULT;
12356         }
12357         return ret;
12358     }
12359 #endif
12360 
12361 #if defined(TARGET_NR_set_tid_address)
12362     case TARGET_NR_set_tid_address:
12363     {
12364         TaskState *ts = cpu->opaque;
12365         ts->child_tidptr = arg1;
12366         /* do not call host set_tid_address() syscall, instead return tid() */
12367         return get_errno(sys_gettid());
12368     }
12369 #endif
12370 
12371     case TARGET_NR_tkill:
12372         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12373 
12374     case TARGET_NR_tgkill:
12375         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12376                          target_to_host_signal(arg3)));
12377 
12378 #ifdef TARGET_NR_set_robust_list
12379     case TARGET_NR_set_robust_list:
12380     case TARGET_NR_get_robust_list:
12381         /* The ABI for supporting robust futexes has userspace pass
12382          * the kernel a pointer to a linked list which is updated by
12383          * userspace after the syscall; the list is walked by the kernel
12384          * when the thread exits. Since the linked list in QEMU guest
12385          * memory isn't a valid linked list for the host and we have
12386          * no way to reliably intercept the thread-death event, we can't
12387          * support these. Silently return ENOSYS so that guest userspace
12388          * falls back to a non-robust futex implementation (which should
12389          * be OK except in the corner case of the guest crashing while
12390          * holding a mutex that is shared with another process via
12391          * shared memory).
12392          */
12393         return -TARGET_ENOSYS;
12394 #endif
12395 
12396 #if defined(TARGET_NR_utimensat)
12397     case TARGET_NR_utimensat:
12398         {
12399             struct timespec *tsp, ts[2];
12400             if (!arg3) {
12401                 tsp = NULL;
12402             } else {
12403                 if (target_to_host_timespec(ts, arg3)) {
12404                     return -TARGET_EFAULT;
12405                 }
12406                 if (target_to_host_timespec(ts + 1, arg3 +
12407                                             sizeof(struct target_timespec))) {
12408                     return -TARGET_EFAULT;
12409                 }
12410                 tsp = ts;
12411             }
12412             if (!arg2)
12413                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12414             else {
12415                 if (!(p = lock_user_string(arg2))) {
12416                     return -TARGET_EFAULT;
12417                 }
12418                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12419                 unlock_user(p, arg2, 0);
12420             }
12421         }
12422         return ret;
12423 #endif
12424 #ifdef TARGET_NR_utimensat_time64
12425     case TARGET_NR_utimensat_time64:
12426         {
12427             struct timespec *tsp, ts[2];
12428             if (!arg3) {
12429                 tsp = NULL;
12430             } else {
12431                 if (target_to_host_timespec64(ts, arg3)) {
12432                     return -TARGET_EFAULT;
12433                 }
12434                 if (target_to_host_timespec64(ts + 1, arg3 +
12435                                      sizeof(struct target__kernel_timespec))) {
12436                     return -TARGET_EFAULT;
12437                 }
12438                 tsp = ts;
12439             }
12440             if (!arg2)
12441                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12442             else {
12443                 p = lock_user_string(arg2);
12444                 if (!p) {
12445                     return -TARGET_EFAULT;
12446                 }
12447                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12448                 unlock_user(p, arg2, 0);
12449             }
12450         }
12451         return ret;
12452 #endif
12453 #ifdef TARGET_NR_futex
12454     case TARGET_NR_futex:
12455         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12456 #endif
12457 #ifdef TARGET_NR_futex_time64
12458     case TARGET_NR_futex_time64:
12459         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12460 #endif
12461 #ifdef CONFIG_INOTIFY
12462 #if defined(TARGET_NR_inotify_init)
12463     case TARGET_NR_inotify_init:
12464         ret = get_errno(inotify_init());
12465         if (ret >= 0) {
12466             fd_trans_register(ret, &target_inotify_trans);
12467         }
12468         return ret;
12469 #endif
12470 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12471     case TARGET_NR_inotify_init1:
12472         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12473                                           fcntl_flags_tbl)));
12474         if (ret >= 0) {
12475             fd_trans_register(ret, &target_inotify_trans);
12476         }
12477         return ret;
12478 #endif
12479 #if defined(TARGET_NR_inotify_add_watch)
12480     case TARGET_NR_inotify_add_watch:
12481         p = lock_user_string(arg2);
12482         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12483         unlock_user(p, arg2, 0);
12484         return ret;
12485 #endif
12486 #if defined(TARGET_NR_inotify_rm_watch)
12487     case TARGET_NR_inotify_rm_watch:
12488         return get_errno(inotify_rm_watch(arg1, arg2));
12489 #endif
12490 #endif
12491 
12492 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12493     case TARGET_NR_mq_open:
12494         {
12495             struct mq_attr posix_mq_attr;
12496             struct mq_attr *pposix_mq_attr;
12497             int host_flags;
12498 
12499             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12500             pposix_mq_attr = NULL;
12501             if (arg4) {
12502                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12503                     return -TARGET_EFAULT;
12504                 }
12505                 pposix_mq_attr = &posix_mq_attr;
12506             }
12507             p = lock_user_string(arg1 - 1);
12508             if (!p) {
12509                 return -TARGET_EFAULT;
12510             }
12511             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12512             unlock_user (p, arg1, 0);
12513         }
12514         return ret;
12515 
12516     case TARGET_NR_mq_unlink:
12517         p = lock_user_string(arg1 - 1);
12518         if (!p) {
12519             return -TARGET_EFAULT;
12520         }
12521         ret = get_errno(mq_unlink(p));
12522         unlock_user (p, arg1, 0);
12523         return ret;
12524 
12525 #ifdef TARGET_NR_mq_timedsend
12526     case TARGET_NR_mq_timedsend:
12527         {
12528             struct timespec ts;
12529 
12530             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12531             if (arg5 != 0) {
12532                 if (target_to_host_timespec(&ts, arg5)) {
12533                     return -TARGET_EFAULT;
12534                 }
12535                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12536                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12537                     return -TARGET_EFAULT;
12538                 }
12539             } else {
12540                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12541             }
12542             unlock_user (p, arg2, arg3);
12543         }
12544         return ret;
12545 #endif
12546 #ifdef TARGET_NR_mq_timedsend_time64
12547     case TARGET_NR_mq_timedsend_time64:
12548         {
12549             struct timespec ts;
12550 
12551             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12552             if (arg5 != 0) {
12553                 if (target_to_host_timespec64(&ts, arg5)) {
12554                     return -TARGET_EFAULT;
12555                 }
12556                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12557                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12558                     return -TARGET_EFAULT;
12559                 }
12560             } else {
12561                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12562             }
12563             unlock_user(p, arg2, arg3);
12564         }
12565         return ret;
12566 #endif
12567 
12568 #ifdef TARGET_NR_mq_timedreceive
12569     case TARGET_NR_mq_timedreceive:
12570         {
12571             struct timespec ts;
12572             unsigned int prio;
12573 
12574             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12575             if (arg5 != 0) {
12576                 if (target_to_host_timespec(&ts, arg5)) {
12577                     return -TARGET_EFAULT;
12578                 }
12579                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12580                                                      &prio, &ts));
12581                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12582                     return -TARGET_EFAULT;
12583                 }
12584             } else {
12585                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12586                                                      &prio, NULL));
12587             }
12588             unlock_user (p, arg2, arg3);
12589             if (arg4 != 0)
12590                 put_user_u32(prio, arg4);
12591         }
12592         return ret;
12593 #endif
12594 #ifdef TARGET_NR_mq_timedreceive_time64
12595     case TARGET_NR_mq_timedreceive_time64:
12596         {
12597             struct timespec ts;
12598             unsigned int prio;
12599 
12600             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12601             if (arg5 != 0) {
12602                 if (target_to_host_timespec64(&ts, arg5)) {
12603                     return -TARGET_EFAULT;
12604                 }
12605                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12606                                                      &prio, &ts));
12607                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12608                     return -TARGET_EFAULT;
12609                 }
12610             } else {
12611                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12612                                                      &prio, NULL));
12613             }
12614             unlock_user(p, arg2, arg3);
12615             if (arg4 != 0) {
12616                 put_user_u32(prio, arg4);
12617             }
12618         }
12619         return ret;
12620 #endif
12621 
12622     /* Not implemented for now... */
12623 /*     case TARGET_NR_mq_notify: */
12624 /*         break; */
12625 
12626     case TARGET_NR_mq_getsetattr:
12627         {
12628             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12629             ret = 0;
12630             if (arg2 != 0) {
12631                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12632                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12633                                            &posix_mq_attr_out));
12634             } else if (arg3 != 0) {
12635                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12636             }
12637             if (ret == 0 && arg3 != 0) {
12638                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12639             }
12640         }
12641         return ret;
12642 #endif
12643 
12644 #ifdef CONFIG_SPLICE
12645 #ifdef TARGET_NR_tee
12646     case TARGET_NR_tee:
12647         {
12648             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12649         }
12650         return ret;
12651 #endif
12652 #ifdef TARGET_NR_splice
12653     case TARGET_NR_splice:
12654         {
12655             loff_t loff_in, loff_out;
12656             loff_t *ploff_in = NULL, *ploff_out = NULL;
12657             if (arg2) {
12658                 if (get_user_u64(loff_in, arg2)) {
12659                     return -TARGET_EFAULT;
12660                 }
12661                 ploff_in = &loff_in;
12662             }
12663             if (arg4) {
12664                 if (get_user_u64(loff_out, arg4)) {
12665                     return -TARGET_EFAULT;
12666                 }
12667                 ploff_out = &loff_out;
12668             }
12669             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12670             if (arg2) {
12671                 if (put_user_u64(loff_in, arg2)) {
12672                     return -TARGET_EFAULT;
12673                 }
12674             }
12675             if (arg4) {
12676                 if (put_user_u64(loff_out, arg4)) {
12677                     return -TARGET_EFAULT;
12678                 }
12679             }
12680         }
12681         return ret;
12682 #endif
12683 #ifdef TARGET_NR_vmsplice
12684 	case TARGET_NR_vmsplice:
12685         {
12686             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12687             if (vec != NULL) {
12688                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12689                 unlock_iovec(vec, arg2, arg3, 0);
12690             } else {
12691                 ret = -host_to_target_errno(errno);
12692             }
12693         }
12694         return ret;
12695 #endif
12696 #endif /* CONFIG_SPLICE */
12697 #ifdef CONFIG_EVENTFD
12698 #if defined(TARGET_NR_eventfd)
12699     case TARGET_NR_eventfd:
12700         ret = get_errno(eventfd(arg1, 0));
12701         if (ret >= 0) {
12702             fd_trans_register(ret, &target_eventfd_trans);
12703         }
12704         return ret;
12705 #endif
12706 #if defined(TARGET_NR_eventfd2)
12707     case TARGET_NR_eventfd2:
12708     {
12709         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12710         if (arg2 & TARGET_O_NONBLOCK) {
12711             host_flags |= O_NONBLOCK;
12712         }
12713         if (arg2 & TARGET_O_CLOEXEC) {
12714             host_flags |= O_CLOEXEC;
12715         }
12716         ret = get_errno(eventfd(arg1, host_flags));
12717         if (ret >= 0) {
12718             fd_trans_register(ret, &target_eventfd_trans);
12719         }
12720         return ret;
12721     }
12722 #endif
12723 #endif /* CONFIG_EVENTFD  */
12724 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12725     case TARGET_NR_fallocate:
12726 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12727         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12728                                   target_offset64(arg5, arg6)));
12729 #else
12730         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12731 #endif
12732         return ret;
12733 #endif
12734 #if defined(CONFIG_SYNC_FILE_RANGE)
12735 #if defined(TARGET_NR_sync_file_range)
12736     case TARGET_NR_sync_file_range:
12737 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12738 #if defined(TARGET_MIPS)
12739         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12740                                         target_offset64(arg5, arg6), arg7));
12741 #else
12742         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12743                                         target_offset64(arg4, arg5), arg6));
12744 #endif /* !TARGET_MIPS */
12745 #else
12746         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12747 #endif
12748         return ret;
12749 #endif
12750 #if defined(TARGET_NR_sync_file_range2) || \
12751     defined(TARGET_NR_arm_sync_file_range)
12752 #if defined(TARGET_NR_sync_file_range2)
12753     case TARGET_NR_sync_file_range2:
12754 #endif
12755 #if defined(TARGET_NR_arm_sync_file_range)
12756     case TARGET_NR_arm_sync_file_range:
12757 #endif
12758         /* This is like sync_file_range but the arguments are reordered */
12759 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12760         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12761                                         target_offset64(arg5, arg6), arg2));
12762 #else
12763         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12764 #endif
12765         return ret;
12766 #endif
12767 #endif
12768 #if defined(TARGET_NR_signalfd4)
12769     case TARGET_NR_signalfd4:
12770         return do_signalfd4(arg1, arg2, arg4);
12771 #endif
12772 #if defined(TARGET_NR_signalfd)
12773     case TARGET_NR_signalfd:
12774         return do_signalfd4(arg1, arg2, 0);
12775 #endif
12776 #if defined(CONFIG_EPOLL)
12777 #if defined(TARGET_NR_epoll_create)
12778     case TARGET_NR_epoll_create:
12779         return get_errno(epoll_create(arg1));
12780 #endif
12781 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12782     case TARGET_NR_epoll_create1:
12783         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12784 #endif
12785 #if defined(TARGET_NR_epoll_ctl)
12786     case TARGET_NR_epoll_ctl:
12787     {
12788         struct epoll_event ep;
12789         struct epoll_event *epp = 0;
12790         if (arg4) {
12791             if (arg2 != EPOLL_CTL_DEL) {
12792                 struct target_epoll_event *target_ep;
12793                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12794                     return -TARGET_EFAULT;
12795                 }
12796                 ep.events = tswap32(target_ep->events);
12797                 /*
12798                  * The epoll_data_t union is just opaque data to the kernel,
12799                  * so we transfer all 64 bits across and need not worry what
12800                  * actual data type it is.
12801                  */
12802                 ep.data.u64 = tswap64(target_ep->data.u64);
12803                 unlock_user_struct(target_ep, arg4, 0);
12804             }
12805             /*
12806              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12807              * non-null pointer, even though this argument is ignored.
12808              *
12809              */
12810             epp = &ep;
12811         }
12812         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12813     }
12814 #endif
12815 
12816 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12817 #if defined(TARGET_NR_epoll_wait)
12818     case TARGET_NR_epoll_wait:
12819 #endif
12820 #if defined(TARGET_NR_epoll_pwait)
12821     case TARGET_NR_epoll_pwait:
12822 #endif
12823     {
12824         struct target_epoll_event *target_ep;
12825         struct epoll_event *ep;
12826         int epfd = arg1;
12827         int maxevents = arg3;
12828         int timeout = arg4;
12829 
12830         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12831             return -TARGET_EINVAL;
12832         }
12833 
12834         target_ep = lock_user(VERIFY_WRITE, arg2,
12835                               maxevents * sizeof(struct target_epoll_event), 1);
12836         if (!target_ep) {
12837             return -TARGET_EFAULT;
12838         }
12839 
12840         ep = g_try_new(struct epoll_event, maxevents);
12841         if (!ep) {
12842             unlock_user(target_ep, arg2, 0);
12843             return -TARGET_ENOMEM;
12844         }
12845 
12846         switch (num) {
12847 #if defined(TARGET_NR_epoll_pwait)
12848         case TARGET_NR_epoll_pwait:
12849         {
12850             sigset_t *set = NULL;
12851 
12852             if (arg5) {
12853                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12854                 if (ret != 0) {
12855                     break;
12856                 }
12857             }
12858 
12859             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12860                                              set, SIGSET_T_SIZE));
12861 
12862             if (set) {
12863                 finish_sigsuspend_mask(ret);
12864             }
12865             break;
12866         }
12867 #endif
12868 #if defined(TARGET_NR_epoll_wait)
12869         case TARGET_NR_epoll_wait:
12870             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12871                                              NULL, 0));
12872             break;
12873 #endif
12874         default:
12875             ret = -TARGET_ENOSYS;
12876         }
12877         if (!is_error(ret)) {
12878             int i;
12879             for (i = 0; i < ret; i++) {
12880                 target_ep[i].events = tswap32(ep[i].events);
12881                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12882             }
12883             unlock_user(target_ep, arg2,
12884                         ret * sizeof(struct target_epoll_event));
12885         } else {
12886             unlock_user(target_ep, arg2, 0);
12887         }
12888         g_free(ep);
12889         return ret;
12890     }
12891 #endif
12892 #endif
12893 #ifdef TARGET_NR_prlimit64
12894     case TARGET_NR_prlimit64:
12895     {
12896         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12897         struct target_rlimit64 *target_rnew, *target_rold;
12898         struct host_rlimit64 rnew, rold, *rnewp = 0;
12899         int resource = target_to_host_resource(arg2);
12900 
12901         if (arg3 && (resource != RLIMIT_AS &&
12902                      resource != RLIMIT_DATA &&
12903                      resource != RLIMIT_STACK)) {
12904             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12905                 return -TARGET_EFAULT;
12906             }
12907             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12908             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12909             unlock_user_struct(target_rnew, arg3, 0);
12910             rnewp = &rnew;
12911         }
12912 
12913         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12914         if (!is_error(ret) && arg4) {
12915             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12916                 return -TARGET_EFAULT;
12917             }
12918             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12919             target_rold->rlim_max = tswap64(rold.rlim_max);
12920             unlock_user_struct(target_rold, arg4, 1);
12921         }
12922         return ret;
12923     }
12924 #endif
12925 #ifdef TARGET_NR_gethostname
12926     case TARGET_NR_gethostname:
12927     {
12928         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12929         if (name) {
12930             ret = get_errno(gethostname(name, arg2));
12931             unlock_user(name, arg1, arg2);
12932         } else {
12933             ret = -TARGET_EFAULT;
12934         }
12935         return ret;
12936     }
12937 #endif
12938 #ifdef TARGET_NR_atomic_cmpxchg_32
12939     case TARGET_NR_atomic_cmpxchg_32:
12940     {
12941         /* should use start_exclusive from main.c */
12942         abi_ulong mem_value;
12943         if (get_user_u32(mem_value, arg6)) {
12944             target_siginfo_t info;
12945             info.si_signo = SIGSEGV;
12946             info.si_errno = 0;
12947             info.si_code = TARGET_SEGV_MAPERR;
12948             info._sifields._sigfault._addr = arg6;
12949             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12950             ret = 0xdeadbeef;
12951 
12952         }
12953         if (mem_value == arg2)
12954             put_user_u32(arg1, arg6);
12955         return mem_value;
12956     }
12957 #endif
12958 #ifdef TARGET_NR_atomic_barrier
12959     case TARGET_NR_atomic_barrier:
12960         /* Like the kernel implementation and the
12961            qemu arm barrier, no-op this? */
12962         return 0;
12963 #endif
12964 
12965 #ifdef TARGET_NR_timer_create
12966     case TARGET_NR_timer_create:
12967     {
12968         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12969 
12970         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12971 
12972         int clkid = arg1;
12973         int timer_index = next_free_host_timer();
12974 
12975         if (timer_index < 0) {
12976             ret = -TARGET_EAGAIN;
12977         } else {
12978             timer_t *phtimer = g_posix_timers  + timer_index;
12979 
12980             if (arg2) {
12981                 phost_sevp = &host_sevp;
12982                 ret = target_to_host_sigevent(phost_sevp, arg2);
12983                 if (ret != 0) {
12984                     free_host_timer_slot(timer_index);
12985                     return ret;
12986                 }
12987             }
12988 
12989             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12990             if (ret) {
12991                 free_host_timer_slot(timer_index);
12992             } else {
12993                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12994                     timer_delete(*phtimer);
12995                     free_host_timer_slot(timer_index);
12996                     return -TARGET_EFAULT;
12997                 }
12998             }
12999         }
13000         return ret;
13001     }
13002 #endif
13003 
13004 #ifdef TARGET_NR_timer_settime
13005     case TARGET_NR_timer_settime:
13006     {
13007         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13008          * struct itimerspec * old_value */
13009         target_timer_t timerid = get_timer_id(arg1);
13010 
13011         if (timerid < 0) {
13012             ret = timerid;
13013         } else if (arg3 == 0) {
13014             ret = -TARGET_EINVAL;
13015         } else {
13016             timer_t htimer = g_posix_timers[timerid];
13017             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13018 
13019             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13020                 return -TARGET_EFAULT;
13021             }
13022             ret = get_errno(
13023                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13024             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13025                 return -TARGET_EFAULT;
13026             }
13027         }
13028         return ret;
13029     }
13030 #endif
13031 
13032 #ifdef TARGET_NR_timer_settime64
13033     case TARGET_NR_timer_settime64:
13034     {
13035         target_timer_t timerid = get_timer_id(arg1);
13036 
13037         if (timerid < 0) {
13038             ret = timerid;
13039         } else if (arg3 == 0) {
13040             ret = -TARGET_EINVAL;
13041         } else {
13042             timer_t htimer = g_posix_timers[timerid];
13043             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13044 
13045             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13046                 return -TARGET_EFAULT;
13047             }
13048             ret = get_errno(
13049                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13050             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13051                 return -TARGET_EFAULT;
13052             }
13053         }
13054         return ret;
13055     }
13056 #endif
13057 
13058 #ifdef TARGET_NR_timer_gettime
13059     case TARGET_NR_timer_gettime:
13060     {
13061         /* args: timer_t timerid, struct itimerspec *curr_value */
13062         target_timer_t timerid = get_timer_id(arg1);
13063 
13064         if (timerid < 0) {
13065             ret = timerid;
13066         } else if (!arg2) {
13067             ret = -TARGET_EFAULT;
13068         } else {
13069             timer_t htimer = g_posix_timers[timerid];
13070             struct itimerspec hspec;
13071             ret = get_errno(timer_gettime(htimer, &hspec));
13072 
13073             if (host_to_target_itimerspec(arg2, &hspec)) {
13074                 ret = -TARGET_EFAULT;
13075             }
13076         }
13077         return ret;
13078     }
13079 #endif
13080 
13081 #ifdef TARGET_NR_timer_gettime64
13082     case TARGET_NR_timer_gettime64:
13083     {
13084         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13085         target_timer_t timerid = get_timer_id(arg1);
13086 
13087         if (timerid < 0) {
13088             ret = timerid;
13089         } else if (!arg2) {
13090             ret = -TARGET_EFAULT;
13091         } else {
13092             timer_t htimer = g_posix_timers[timerid];
13093             struct itimerspec hspec;
13094             ret = get_errno(timer_gettime(htimer, &hspec));
13095 
13096             if (host_to_target_itimerspec64(arg2, &hspec)) {
13097                 ret = -TARGET_EFAULT;
13098             }
13099         }
13100         return ret;
13101     }
13102 #endif
13103 
13104 #ifdef TARGET_NR_timer_getoverrun
13105     case TARGET_NR_timer_getoverrun:
13106     {
13107         /* args: timer_t timerid */
13108         target_timer_t timerid = get_timer_id(arg1);
13109 
13110         if (timerid < 0) {
13111             ret = timerid;
13112         } else {
13113             timer_t htimer = g_posix_timers[timerid];
13114             ret = get_errno(timer_getoverrun(htimer));
13115         }
13116         return ret;
13117     }
13118 #endif
13119 
13120 #ifdef TARGET_NR_timer_delete
13121     case TARGET_NR_timer_delete:
13122     {
13123         /* args: timer_t timerid */
13124         target_timer_t timerid = get_timer_id(arg1);
13125 
13126         if (timerid < 0) {
13127             ret = timerid;
13128         } else {
13129             timer_t htimer = g_posix_timers[timerid];
13130             ret = get_errno(timer_delete(htimer));
13131             free_host_timer_slot(timerid);
13132         }
13133         return ret;
13134     }
13135 #endif
13136 
13137 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13138     case TARGET_NR_timerfd_create:
13139         return get_errno(timerfd_create(arg1,
13140                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13141 #endif
13142 
13143 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13144     case TARGET_NR_timerfd_gettime:
13145         {
13146             struct itimerspec its_curr;
13147 
13148             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13149 
13150             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13151                 return -TARGET_EFAULT;
13152             }
13153         }
13154         return ret;
13155 #endif
13156 
13157 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13158     case TARGET_NR_timerfd_gettime64:
13159         {
13160             struct itimerspec its_curr;
13161 
13162             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13163 
13164             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13165                 return -TARGET_EFAULT;
13166             }
13167         }
13168         return ret;
13169 #endif
13170 
13171 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13172     case TARGET_NR_timerfd_settime:
13173         {
13174             struct itimerspec its_new, its_old, *p_new;
13175 
13176             if (arg3) {
13177                 if (target_to_host_itimerspec(&its_new, arg3)) {
13178                     return -TARGET_EFAULT;
13179                 }
13180                 p_new = &its_new;
13181             } else {
13182                 p_new = NULL;
13183             }
13184 
13185             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13186 
13187             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13188                 return -TARGET_EFAULT;
13189             }
13190         }
13191         return ret;
13192 #endif
13193 
13194 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13195     case TARGET_NR_timerfd_settime64:
13196         {
13197             struct itimerspec its_new, its_old, *p_new;
13198 
13199             if (arg3) {
13200                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13201                     return -TARGET_EFAULT;
13202                 }
13203                 p_new = &its_new;
13204             } else {
13205                 p_new = NULL;
13206             }
13207 
13208             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13209 
13210             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13211                 return -TARGET_EFAULT;
13212             }
13213         }
13214         return ret;
13215 #endif
13216 
13217 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13218     case TARGET_NR_ioprio_get:
13219         return get_errno(ioprio_get(arg1, arg2));
13220 #endif
13221 
13222 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13223     case TARGET_NR_ioprio_set:
13224         return get_errno(ioprio_set(arg1, arg2, arg3));
13225 #endif
13226 
13227 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13228     case TARGET_NR_setns:
13229         return get_errno(setns(arg1, arg2));
13230 #endif
13231 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13232     case TARGET_NR_unshare:
13233         return get_errno(unshare(arg1));
13234 #endif
13235 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13236     case TARGET_NR_kcmp:
13237         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13238 #endif
13239 #ifdef TARGET_NR_swapcontext
13240     case TARGET_NR_swapcontext:
13241         /* PowerPC specific.  */
13242         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13243 #endif
13244 #ifdef TARGET_NR_memfd_create
13245     case TARGET_NR_memfd_create:
13246         p = lock_user_string(arg1);
13247         if (!p) {
13248             return -TARGET_EFAULT;
13249         }
13250         ret = get_errno(memfd_create(p, arg2));
13251         fd_trans_unregister(ret);
13252         unlock_user(p, arg1, 0);
13253         return ret;
13254 #endif
13255 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13256     case TARGET_NR_membarrier:
13257         return get_errno(membarrier(arg1, arg2));
13258 #endif
13259 
13260 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13261     case TARGET_NR_copy_file_range:
13262         {
13263             loff_t inoff, outoff;
13264             loff_t *pinoff = NULL, *poutoff = NULL;
13265 
13266             if (arg2) {
13267                 if (get_user_u64(inoff, arg2)) {
13268                     return -TARGET_EFAULT;
13269                 }
13270                 pinoff = &inoff;
13271             }
13272             if (arg4) {
13273                 if (get_user_u64(outoff, arg4)) {
13274                     return -TARGET_EFAULT;
13275                 }
13276                 poutoff = &outoff;
13277             }
13278             /* Do not sign-extend the count parameter. */
13279             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13280                                                  (abi_ulong)arg5, arg6));
13281             if (!is_error(ret) && ret > 0) {
13282                 if (arg2) {
13283                     if (put_user_u64(inoff, arg2)) {
13284                         return -TARGET_EFAULT;
13285                     }
13286                 }
13287                 if (arg4) {
13288                     if (put_user_u64(outoff, arg4)) {
13289                         return -TARGET_EFAULT;
13290                     }
13291                 }
13292             }
13293         }
13294         return ret;
13295 #endif
13296 
13297 #if defined(TARGET_NR_pivot_root)
13298     case TARGET_NR_pivot_root:
13299         {
13300             void *p2;
13301             p = lock_user_string(arg1); /* new_root */
13302             p2 = lock_user_string(arg2); /* put_old */
13303             if (!p || !p2) {
13304                 ret = -TARGET_EFAULT;
13305             } else {
13306                 ret = get_errno(pivot_root(p, p2));
13307             }
13308             unlock_user(p2, arg2, 0);
13309             unlock_user(p, arg1, 0);
13310         }
13311         return ret;
13312 #endif
13313 
13314     default:
13315         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13316         return -TARGET_ENOSYS;
13317     }
13318     return ret;
13319 }
13320 
13321 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13322                     abi_long arg2, abi_long arg3, abi_long arg4,
13323                     abi_long arg5, abi_long arg6, abi_long arg7,
13324                     abi_long arg8)
13325 {
13326     CPUState *cpu = env_cpu(cpu_env);
13327     abi_long ret;
13328 
13329 #ifdef DEBUG_ERESTARTSYS
13330     /* Debug-only code for exercising the syscall-restart code paths
13331      * in the per-architecture cpu main loops: restart every syscall
13332      * the guest makes once before letting it through.
13333      */
13334     {
13335         static bool flag;
13336         flag = !flag;
13337         if (flag) {
13338             return -QEMU_ERESTARTSYS;
13339         }
13340     }
13341 #endif
13342 
13343     record_syscall_start(cpu, num, arg1,
13344                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13345 
13346     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13347         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13348     }
13349 
13350     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13351                       arg5, arg6, arg7, arg8);
13352 
13353     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13354         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13355                           arg3, arg4, arg5, arg6);
13356     }
13357 
13358     record_syscall_return(cpu, num, ret);
13359     return ret;
13360 }
13361