xref: /openbmc/qemu/linux-user/syscall.c (revision 156e1f67)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 
99 #ifdef HAVE_SYS_MOUNT_FSCONFIG
100 /*
101  * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h,
102  * which in turn prevents use of linux/fs.h. So we have to
103  * define the constants ourselves for now.
104  */
105 #define FS_IOC_GETFLAGS                _IOR('f', 1, long)
106 #define FS_IOC_SETFLAGS                _IOW('f', 2, long)
107 #define FS_IOC_GETVERSION              _IOR('v', 1, long)
108 #define FS_IOC_SETVERSION              _IOW('v', 2, long)
109 #define FS_IOC_FIEMAP                  _IOWR('f', 11, struct fiemap)
110 #define FS_IOC32_GETFLAGS              _IOR('f', 1, int)
111 #define FS_IOC32_SETFLAGS              _IOW('f', 2, int)
112 #define FS_IOC32_GETVERSION            _IOR('v', 1, int)
113 #define FS_IOC32_SETVERSION            _IOW('v', 2, int)
114 
115 #define BLKGETSIZE64 _IOR(0x12,114,size_t)
116 #define BLKDISCARD _IO(0x12,119)
117 #define BLKIOMIN _IO(0x12,120)
118 #define BLKIOOPT _IO(0x12,121)
119 #define BLKALIGNOFF _IO(0x12,122)
120 #define BLKPBSZGET _IO(0x12,123)
121 #define BLKDISCARDZEROES _IO(0x12,124)
122 #define BLKSECDISCARD _IO(0x12,125)
123 #define BLKROTATIONAL _IO(0x12,126)
124 #define BLKZEROOUT _IO(0x12,127)
125 
126 #define FIBMAP     _IO(0x00,1)
127 #define FIGETBSZ   _IO(0x00,2)
128 
129 struct file_clone_range {
130         __s64 src_fd;
131         __u64 src_offset;
132         __u64 src_length;
133         __u64 dest_offset;
134 };
135 
136 #define FICLONE         _IOW(0x94, 9, int)
137 #define FICLONERANGE    _IOW(0x94, 13, struct file_clone_range)
138 
139 #else
140 #include <linux/fs.h>
141 #endif
142 #include <linux/fd.h>
143 #if defined(CONFIG_FIEMAP)
144 #include <linux/fiemap.h>
145 #endif
146 #include <linux/fb.h>
147 #if defined(CONFIG_USBFS)
148 #include <linux/usbdevice_fs.h>
149 #include <linux/usb/ch9.h>
150 #endif
151 #include <linux/vt.h>
152 #include <linux/dm-ioctl.h>
153 #include <linux/reboot.h>
154 #include <linux/route.h>
155 #include <linux/filter.h>
156 #include <linux/blkpg.h>
157 #include <netpacket/packet.h>
158 #include <linux/netlink.h>
159 #include <linux/if_alg.h>
160 #include <linux/rtc.h>
161 #include <sound/asound.h>
162 #ifdef HAVE_BTRFS_H
163 #include <linux/btrfs.h>
164 #endif
165 #ifdef HAVE_DRM_H
166 #include <libdrm/drm.h>
167 #include <libdrm/i915_drm.h>
168 #endif
169 #include "linux_loop.h"
170 #include "uname.h"
171 
172 #include "qemu.h"
173 #include "user-internals.h"
174 #include "strace.h"
175 #include "signal-common.h"
176 #include "loader.h"
177 #include "user-mmap.h"
178 #include "user/safe-syscall.h"
179 #include "qemu/guest-random.h"
180 #include "qemu/selfmap.h"
181 #include "user/syscall-trace.h"
182 #include "special-errno.h"
183 #include "qapi/error.h"
184 #include "fd-trans.h"
185 #include "tcg/tcg.h"
186 #include "cpu_loop-common.h"
187 
188 #ifndef CLONE_IO
189 #define CLONE_IO                0x80000000      /* Clone io context */
190 #endif
191 
192 /* We can't directly call the host clone syscall, because this will
193  * badly confuse libc (breaking mutexes, for example). So we must
194  * divide clone flags into:
195  *  * flag combinations that look like pthread_create()
196  *  * flag combinations that look like fork()
197  *  * flags we can implement within QEMU itself
198  *  * flags we can't support and will return an error for
199  */
200 /* For thread creation, all these flags must be present; for
201  * fork, none must be present.
202  */
203 #define CLONE_THREAD_FLAGS                              \
204     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
205      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
206 
207 /* These flags are ignored:
208  * CLONE_DETACHED is now ignored by the kernel;
209  * CLONE_IO is just an optimisation hint to the I/O scheduler
210  */
211 #define CLONE_IGNORED_FLAGS                     \
212     (CLONE_DETACHED | CLONE_IO)
213 
214 /* Flags for fork which we can implement within QEMU itself */
215 #define CLONE_OPTIONAL_FORK_FLAGS               \
216     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
217      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
218 
219 /* Flags for thread creation which we can implement within QEMU itself */
220 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
221     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
222      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
223 
224 #define CLONE_INVALID_FORK_FLAGS                                        \
225     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
226 
227 #define CLONE_INVALID_THREAD_FLAGS                                      \
228     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
229        CLONE_IGNORED_FLAGS))
230 
231 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
232  * have almost all been allocated. We cannot support any of
233  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
234  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
235  * The checks against the invalid thread masks above will catch these.
236  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
237  */
238 
239 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
240  * once. This exercises the codepaths for restart.
241  */
242 //#define DEBUG_ERESTARTSYS
243 
244 //#include <linux/msdos_fs.h>
245 #define VFAT_IOCTL_READDIR_BOTH \
246     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
247 #define VFAT_IOCTL_READDIR_SHORT \
248     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
249 
250 #undef _syscall0
251 #undef _syscall1
252 #undef _syscall2
253 #undef _syscall3
254 #undef _syscall4
255 #undef _syscall5
256 #undef _syscall6
257 
258 #define _syscall0(type,name)		\
259 static type name (void)			\
260 {					\
261 	return syscall(__NR_##name);	\
262 }
263 
264 #define _syscall1(type,name,type1,arg1)		\
265 static type name (type1 arg1)			\
266 {						\
267 	return syscall(__NR_##name, arg1);	\
268 }
269 
270 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
271 static type name (type1 arg1,type2 arg2)		\
272 {							\
273 	return syscall(__NR_##name, arg1, arg2);	\
274 }
275 
276 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
277 static type name (type1 arg1,type2 arg2,type3 arg3)		\
278 {								\
279 	return syscall(__NR_##name, arg1, arg2, arg3);		\
280 }
281 
282 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
283 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
284 {										\
285 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
286 }
287 
288 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
289 		  type5,arg5)							\
290 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
291 {										\
292 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
293 }
294 
295 
296 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
297 		  type5,arg5,type6,arg6)					\
298 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
299                   type6 arg6)							\
300 {										\
301 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
302 }
303 
304 
305 #define __NR_sys_uname __NR_uname
306 #define __NR_sys_getcwd1 __NR_getcwd
307 #define __NR_sys_getdents __NR_getdents
308 #define __NR_sys_getdents64 __NR_getdents64
309 #define __NR_sys_getpriority __NR_getpriority
310 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
311 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
312 #define __NR_sys_syslog __NR_syslog
313 #if defined(__NR_futex)
314 # define __NR_sys_futex __NR_futex
315 #endif
316 #if defined(__NR_futex_time64)
317 # define __NR_sys_futex_time64 __NR_futex_time64
318 #endif
319 #define __NR_sys_statx __NR_statx
320 
321 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
322 #define __NR__llseek __NR_lseek
323 #endif
324 
325 /* Newer kernel ports have llseek() instead of _llseek() */
326 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
327 #define TARGET_NR__llseek TARGET_NR_llseek
328 #endif
329 
330 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
331 #ifndef TARGET_O_NONBLOCK_MASK
332 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
333 #endif
334 
335 #define __NR_sys_gettid __NR_gettid
336 _syscall0(int, sys_gettid)
337 
338 /* For the 64-bit guest on 32-bit host case we must emulate
339  * getdents using getdents64, because otherwise the host
340  * might hand us back more dirent records than we can fit
341  * into the guest buffer after structure format conversion.
342  * Otherwise we emulate getdents with getdents if the host has it.
343  */
344 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
345 #define EMULATE_GETDENTS_WITH_GETDENTS
346 #endif
347 
348 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
349 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
350 #endif
351 #if (defined(TARGET_NR_getdents) && \
352       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
353     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
354 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
355 #endif
356 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
357 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
358           loff_t *, res, uint, wh);
359 #endif
360 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
361 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
362           siginfo_t *, uinfo)
363 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
364 #ifdef __NR_exit_group
365 _syscall1(int,exit_group,int,error_code)
366 #endif
367 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
368 #define __NR_sys_close_range __NR_close_range
369 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
370 #ifndef CLOSE_RANGE_CLOEXEC
371 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
372 #endif
373 #endif
374 #if defined(__NR_futex)
375 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
376           const struct timespec *,timeout,int *,uaddr2,int,val3)
377 #endif
378 #if defined(__NR_futex_time64)
379 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
380           const struct timespec *,timeout,int *,uaddr2,int,val3)
381 #endif
382 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
383 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
384 #endif
385 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
386 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
387                              unsigned int, flags);
388 #endif
389 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
390 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
391 #endif
392 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
393 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
394           unsigned long *, user_mask_ptr);
395 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
396 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
397           unsigned long *, user_mask_ptr);
398 /* sched_attr is not defined in glibc */
399 struct sched_attr {
400     uint32_t size;
401     uint32_t sched_policy;
402     uint64_t sched_flags;
403     int32_t sched_nice;
404     uint32_t sched_priority;
405     uint64_t sched_runtime;
406     uint64_t sched_deadline;
407     uint64_t sched_period;
408     uint32_t sched_util_min;
409     uint32_t sched_util_max;
410 };
411 #define __NR_sys_sched_getattr __NR_sched_getattr
412 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
413           unsigned int, size, unsigned int, flags);
414 #define __NR_sys_sched_setattr __NR_sched_setattr
415 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
416           unsigned int, flags);
417 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
418 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
419 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
420 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
421           const struct sched_param *, param);
422 #define __NR_sys_sched_getparam __NR_sched_getparam
423 _syscall2(int, sys_sched_getparam, pid_t, pid,
424           struct sched_param *, param);
425 #define __NR_sys_sched_setparam __NR_sched_setparam
426 _syscall2(int, sys_sched_setparam, pid_t, pid,
427           const struct sched_param *, param);
428 #define __NR_sys_getcpu __NR_getcpu
429 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
430 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
431           void *, arg);
432 _syscall2(int, capget, struct __user_cap_header_struct *, header,
433           struct __user_cap_data_struct *, data);
434 _syscall2(int, capset, struct __user_cap_header_struct *, header,
435           struct __user_cap_data_struct *, data);
436 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
437 _syscall2(int, ioprio_get, int, which, int, who)
438 #endif
439 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
440 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
441 #endif
442 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
443 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
444 #endif
445 
446 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
447 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
448           unsigned long, idx1, unsigned long, idx2)
449 #endif
450 
451 /*
452  * It is assumed that struct statx is architecture independent.
453  */
454 #if defined(TARGET_NR_statx) && defined(__NR_statx)
455 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
456           unsigned int, mask, struct target_statx *, statxbuf)
457 #endif
458 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
459 _syscall2(int, membarrier, int, cmd, int, flags)
460 #endif
461 
462 static const bitmask_transtbl fcntl_flags_tbl[] = {
463   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
464   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
465   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
466   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
467   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
468   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
469   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
470   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
471   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
472   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
473   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
474   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
475   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
476 #if defined(O_DIRECT)
477   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
478 #endif
479 #if defined(O_NOATIME)
480   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
481 #endif
482 #if defined(O_CLOEXEC)
483   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
484 #endif
485 #if defined(O_PATH)
486   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
487 #endif
488 #if defined(O_TMPFILE)
489   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
490 #endif
491   /* Don't terminate the list prematurely on 64-bit host+guest.  */
492 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
493   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
494 #endif
495   { 0, 0, 0, 0 }
496 };
497 
498 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
499 
500 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
501 #if defined(__NR_utimensat)
502 #define __NR_sys_utimensat __NR_utimensat
503 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
504           const struct timespec *,tsp,int,flags)
505 #else
506 static int sys_utimensat(int dirfd, const char *pathname,
507                          const struct timespec times[2], int flags)
508 {
509     errno = ENOSYS;
510     return -1;
511 }
512 #endif
513 #endif /* TARGET_NR_utimensat */
514 
515 #ifdef TARGET_NR_renameat2
516 #if defined(__NR_renameat2)
517 #define __NR_sys_renameat2 __NR_renameat2
518 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
519           const char *, new, unsigned int, flags)
520 #else
521 static int sys_renameat2(int oldfd, const char *old,
522                          int newfd, const char *new, int flags)
523 {
524     if (flags == 0) {
525         return renameat(oldfd, old, newfd, new);
526     }
527     errno = ENOSYS;
528     return -1;
529 }
530 #endif
531 #endif /* TARGET_NR_renameat2 */
532 
533 #ifdef CONFIG_INOTIFY
534 #include <sys/inotify.h>
535 #else
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY  */
542 
543 #if defined(TARGET_NR_prlimit64)
544 #ifndef __NR_prlimit64
545 # define __NR_prlimit64 -1
546 #endif
547 #define __NR_sys_prlimit64 __NR_prlimit64
548 /* The glibc rlimit structure may not be that used by the underlying syscall */
549 struct host_rlimit64 {
550     uint64_t rlim_cur;
551     uint64_t rlim_max;
552 };
553 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
554           const struct host_rlimit64 *, new_limit,
555           struct host_rlimit64 *, old_limit)
556 #endif
557 
558 
559 #if defined(TARGET_NR_timer_create)
560 /* Maximum of 32 active POSIX timers allowed at any one time. */
561 #define GUEST_TIMER_MAX 32
562 static timer_t g_posix_timers[GUEST_TIMER_MAX];
563 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
564 
565 static inline int next_free_host_timer(void)
566 {
567     int k;
568     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
569         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
570             return k;
571         }
572     }
573     return -1;
574 }
575 
576 static inline void free_host_timer_slot(int id)
577 {
578     qatomic_store_release(g_posix_timer_allocated + id, 0);
579 }
580 #endif
581 
582 static inline int host_to_target_errno(int host_errno)
583 {
584     switch (host_errno) {
585 #define E(X)  case X: return TARGET_##X;
586 #include "errnos.c.inc"
587 #undef E
588     default:
589         return host_errno;
590     }
591 }
592 
593 static inline int target_to_host_errno(int target_errno)
594 {
595     switch (target_errno) {
596 #define E(X)  case TARGET_##X: return X;
597 #include "errnos.c.inc"
598 #undef E
599     default:
600         return target_errno;
601     }
602 }
603 
604 abi_long get_errno(abi_long ret)
605 {
606     if (ret == -1)
607         return -host_to_target_errno(errno);
608     else
609         return ret;
610 }
611 
612 const char *target_strerror(int err)
613 {
614     if (err == QEMU_ERESTARTSYS) {
615         return "To be restarted";
616     }
617     if (err == QEMU_ESIGRETURN) {
618         return "Successful exit from sigreturn";
619     }
620 
621     return strerror(target_to_host_errno(err));
622 }
623 
624 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
625 {
626     int i;
627     uint8_t b;
628     if (usize <= ksize) {
629         return 1;
630     }
631     for (i = ksize; i < usize; i++) {
632         if (get_user_u8(b, addr + i)) {
633             return -TARGET_EFAULT;
634         }
635         if (b != 0) {
636             return 0;
637         }
638     }
639     return 1;
640 }
641 
642 #define safe_syscall0(type, name) \
643 static type safe_##name(void) \
644 { \
645     return safe_syscall(__NR_##name); \
646 }
647 
648 #define safe_syscall1(type, name, type1, arg1) \
649 static type safe_##name(type1 arg1) \
650 { \
651     return safe_syscall(__NR_##name, arg1); \
652 }
653 
654 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
655 static type safe_##name(type1 arg1, type2 arg2) \
656 { \
657     return safe_syscall(__NR_##name, arg1, arg2); \
658 }
659 
660 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
661 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
662 { \
663     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
664 }
665 
666 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
667     type4, arg4) \
668 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
669 { \
670     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
671 }
672 
673 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
674     type4, arg4, type5, arg5) \
675 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
676     type5 arg5) \
677 { \
678     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
679 }
680 
681 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
682     type4, arg4, type5, arg5, type6, arg6) \
683 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
684     type5 arg5, type6 arg6) \
685 { \
686     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
687 }
688 
689 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
690 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
691 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
692               int, flags, mode_t, mode)
693 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
694 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
695               struct rusage *, rusage)
696 #endif
697 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
698               int, options, struct rusage *, rusage)
699 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
700 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
701     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
702 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
703               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
704 #endif
705 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
706 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
707               struct timespec *, tsp, const sigset_t *, sigmask,
708               size_t, sigsetsize)
709 #endif
710 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
711               int, maxevents, int, timeout, const sigset_t *, sigmask,
712               size_t, sigsetsize)
713 #if defined(__NR_futex)
714 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
715               const struct timespec *,timeout,int *,uaddr2,int,val3)
716 #endif
717 #if defined(__NR_futex_time64)
718 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
719               const struct timespec *,timeout,int *,uaddr2,int,val3)
720 #endif
721 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
722 safe_syscall2(int, kill, pid_t, pid, int, sig)
723 safe_syscall2(int, tkill, int, tid, int, sig)
724 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
725 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
726 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
727 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
728               unsigned long, pos_l, unsigned long, pos_h)
729 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
730               unsigned long, pos_l, unsigned long, pos_h)
731 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
732               socklen_t, addrlen)
733 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
734               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
735 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
736               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
737 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
738 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
739 safe_syscall2(int, flock, int, fd, int, operation)
740 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
741 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
742               const struct timespec *, uts, size_t, sigsetsize)
743 #endif
744 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
745               int, flags)
746 #if defined(TARGET_NR_nanosleep)
747 safe_syscall2(int, nanosleep, const struct timespec *, req,
748               struct timespec *, rem)
749 #endif
750 #if defined(TARGET_NR_clock_nanosleep) || \
751     defined(TARGET_NR_clock_nanosleep_time64)
752 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
753               const struct timespec *, req, struct timespec *, rem)
754 #endif
755 #ifdef __NR_ipc
756 #ifdef __s390x__
757 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
758               void *, ptr)
759 #else
760 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
761               void *, ptr, long, fifth)
762 #endif
763 #endif
764 #ifdef __NR_msgsnd
765 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
766               int, flags)
767 #endif
768 #ifdef __NR_msgrcv
769 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
770               long, msgtype, int, flags)
771 #endif
772 #ifdef __NR_semtimedop
773 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
774               unsigned, nsops, const struct timespec *, timeout)
775 #endif
776 #if defined(TARGET_NR_mq_timedsend) || \
777     defined(TARGET_NR_mq_timedsend_time64)
778 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
779               size_t, len, unsigned, prio, const struct timespec *, timeout)
780 #endif
781 #if defined(TARGET_NR_mq_timedreceive) || \
782     defined(TARGET_NR_mq_timedreceive_time64)
783 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
784               size_t, len, unsigned *, prio, const struct timespec *, timeout)
785 #endif
786 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
787 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
788               int, outfd, loff_t *, poutoff, size_t, length,
789               unsigned int, flags)
790 #endif
791 
792 /* We do ioctl like this rather than via safe_syscall3 to preserve the
793  * "third argument might be integer or pointer or not present" behaviour of
794  * the libc function.
795  */
796 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
797 /* Similarly for fcntl. Note that callers must always:
798  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
799  *  use the flock64 struct rather than unsuffixed flock
800  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
801  */
802 #ifdef __NR_fcntl64
803 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
804 #else
805 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
806 #endif
807 
808 static inline int host_to_target_sock_type(int host_type)
809 {
810     int target_type;
811 
812     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
813     case SOCK_DGRAM:
814         target_type = TARGET_SOCK_DGRAM;
815         break;
816     case SOCK_STREAM:
817         target_type = TARGET_SOCK_STREAM;
818         break;
819     default:
820         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
821         break;
822     }
823 
824 #if defined(SOCK_CLOEXEC)
825     if (host_type & SOCK_CLOEXEC) {
826         target_type |= TARGET_SOCK_CLOEXEC;
827     }
828 #endif
829 
830 #if defined(SOCK_NONBLOCK)
831     if (host_type & SOCK_NONBLOCK) {
832         target_type |= TARGET_SOCK_NONBLOCK;
833     }
834 #endif
835 
836     return target_type;
837 }
838 
839 static abi_ulong target_brk;
840 static abi_ulong target_original_brk;
841 static abi_ulong brk_page;
842 
843 void target_set_brk(abi_ulong new_brk)
844 {
845     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
846     brk_page = HOST_PAGE_ALIGN(target_brk);
847 }
848 
849 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
850 #define DEBUGF_BRK(message, args...)
851 
852 /* do_brk() must return target values and target errnos. */
853 abi_long do_brk(abi_ulong new_brk)
854 {
855     abi_long mapped_addr;
856     abi_ulong new_alloc_size;
857 
858     /* brk pointers are always untagged */
859 
860     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
861 
862     if (!new_brk) {
863         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
864         return target_brk;
865     }
866     if (new_brk < target_original_brk) {
867         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
868                    target_brk);
869         return target_brk;
870     }
871 
872     /* If the new brk is less than the highest page reserved to the
873      * target heap allocation, set it and we're almost done...  */
874     if (new_brk <= brk_page) {
875         /* Heap contents are initialized to zero, as for anonymous
876          * mapped pages.  */
877         if (new_brk > target_brk) {
878             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
879         }
880 	target_brk = new_brk;
881         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
882 	return target_brk;
883     }
884 
885     /* We need to allocate more memory after the brk... Note that
886      * we don't use MAP_FIXED because that will map over the top of
887      * any existing mapping (like the one with the host libc or qemu
888      * itself); instead we treat "mapped but at wrong address" as
889      * a failure and unmap again.
890      */
891     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
892     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
893                                         PROT_READ|PROT_WRITE,
894                                         MAP_ANON|MAP_PRIVATE, 0, 0));
895 
896     if (mapped_addr == brk_page) {
897         /* Heap contents are initialized to zero, as for anonymous
898          * mapped pages.  Technically the new pages are already
899          * initialized to zero since they *are* anonymous mapped
900          * pages, however we have to take care with the contents that
901          * come from the remaining part of the previous page: it may
902          * contains garbage data due to a previous heap usage (grown
903          * then shrunken).  */
904         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
905 
906         target_brk = new_brk;
907         brk_page = HOST_PAGE_ALIGN(target_brk);
908         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
909             target_brk);
910         return target_brk;
911     } else if (mapped_addr != -1) {
912         /* Mapped but at wrong address, meaning there wasn't actually
913          * enough space for this brk.
914          */
915         target_munmap(mapped_addr, new_alloc_size);
916         mapped_addr = -1;
917         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
918     }
919     else {
920         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
921     }
922 
923 #if defined(TARGET_ALPHA)
924     /* We (partially) emulate OSF/1 on Alpha, which requires we
925        return a proper errno, not an unchanged brk value.  */
926     return -TARGET_ENOMEM;
927 #endif
928     /* For everything else, return the previous break. */
929     return target_brk;
930 }
931 
932 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
933     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
934 static inline abi_long copy_from_user_fdset(fd_set *fds,
935                                             abi_ulong target_fds_addr,
936                                             int n)
937 {
938     int i, nw, j, k;
939     abi_ulong b, *target_fds;
940 
941     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
942     if (!(target_fds = lock_user(VERIFY_READ,
943                                  target_fds_addr,
944                                  sizeof(abi_ulong) * nw,
945                                  1)))
946         return -TARGET_EFAULT;
947 
948     FD_ZERO(fds);
949     k = 0;
950     for (i = 0; i < nw; i++) {
951         /* grab the abi_ulong */
952         __get_user(b, &target_fds[i]);
953         for (j = 0; j < TARGET_ABI_BITS; j++) {
954             /* check the bit inside the abi_ulong */
955             if ((b >> j) & 1)
956                 FD_SET(k, fds);
957             k++;
958         }
959     }
960 
961     unlock_user(target_fds, target_fds_addr, 0);
962 
963     return 0;
964 }
965 
966 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
967                                                  abi_ulong target_fds_addr,
968                                                  int n)
969 {
970     if (target_fds_addr) {
971         if (copy_from_user_fdset(fds, target_fds_addr, n))
972             return -TARGET_EFAULT;
973         *fds_ptr = fds;
974     } else {
975         *fds_ptr = NULL;
976     }
977     return 0;
978 }
979 
980 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
981                                           const fd_set *fds,
982                                           int n)
983 {
984     int i, nw, j, k;
985     abi_long v;
986     abi_ulong *target_fds;
987 
988     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
989     if (!(target_fds = lock_user(VERIFY_WRITE,
990                                  target_fds_addr,
991                                  sizeof(abi_ulong) * nw,
992                                  0)))
993         return -TARGET_EFAULT;
994 
995     k = 0;
996     for (i = 0; i < nw; i++) {
997         v = 0;
998         for (j = 0; j < TARGET_ABI_BITS; j++) {
999             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1000             k++;
1001         }
1002         __put_user(v, &target_fds[i]);
1003     }
1004 
1005     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1006 
1007     return 0;
1008 }
1009 #endif
1010 
1011 #if defined(__alpha__)
1012 #define HOST_HZ 1024
1013 #else
1014 #define HOST_HZ 100
1015 #endif
1016 
1017 static inline abi_long host_to_target_clock_t(long ticks)
1018 {
1019 #if HOST_HZ == TARGET_HZ
1020     return ticks;
1021 #else
1022     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1023 #endif
1024 }
1025 
1026 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1027                                              const struct rusage *rusage)
1028 {
1029     struct target_rusage *target_rusage;
1030 
1031     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1032         return -TARGET_EFAULT;
1033     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1034     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1035     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1036     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1037     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1038     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1039     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1040     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1041     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1042     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1043     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1044     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1045     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1046     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1047     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1048     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1049     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1050     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1051     unlock_user_struct(target_rusage, target_addr, 1);
1052 
1053     return 0;
1054 }
1055 
1056 #ifdef TARGET_NR_setrlimit
1057 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1058 {
1059     abi_ulong target_rlim_swap;
1060     rlim_t result;
1061 
1062     target_rlim_swap = tswapal(target_rlim);
1063     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1064         return RLIM_INFINITY;
1065 
1066     result = target_rlim_swap;
1067     if (target_rlim_swap != (rlim_t)result)
1068         return RLIM_INFINITY;
1069 
1070     return result;
1071 }
1072 #endif
1073 
1074 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1075 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1076 {
1077     abi_ulong target_rlim_swap;
1078     abi_ulong result;
1079 
1080     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1081         target_rlim_swap = TARGET_RLIM_INFINITY;
1082     else
1083         target_rlim_swap = rlim;
1084     result = tswapal(target_rlim_swap);
1085 
1086     return result;
1087 }
1088 #endif
1089 
1090 static inline int target_to_host_resource(int code)
1091 {
1092     switch (code) {
1093     case TARGET_RLIMIT_AS:
1094         return RLIMIT_AS;
1095     case TARGET_RLIMIT_CORE:
1096         return RLIMIT_CORE;
1097     case TARGET_RLIMIT_CPU:
1098         return RLIMIT_CPU;
1099     case TARGET_RLIMIT_DATA:
1100         return RLIMIT_DATA;
1101     case TARGET_RLIMIT_FSIZE:
1102         return RLIMIT_FSIZE;
1103     case TARGET_RLIMIT_LOCKS:
1104         return RLIMIT_LOCKS;
1105     case TARGET_RLIMIT_MEMLOCK:
1106         return RLIMIT_MEMLOCK;
1107     case TARGET_RLIMIT_MSGQUEUE:
1108         return RLIMIT_MSGQUEUE;
1109     case TARGET_RLIMIT_NICE:
1110         return RLIMIT_NICE;
1111     case TARGET_RLIMIT_NOFILE:
1112         return RLIMIT_NOFILE;
1113     case TARGET_RLIMIT_NPROC:
1114         return RLIMIT_NPROC;
1115     case TARGET_RLIMIT_RSS:
1116         return RLIMIT_RSS;
1117     case TARGET_RLIMIT_RTPRIO:
1118         return RLIMIT_RTPRIO;
1119 #ifdef RLIMIT_RTTIME
1120     case TARGET_RLIMIT_RTTIME:
1121         return RLIMIT_RTTIME;
1122 #endif
1123     case TARGET_RLIMIT_SIGPENDING:
1124         return RLIMIT_SIGPENDING;
1125     case TARGET_RLIMIT_STACK:
1126         return RLIMIT_STACK;
1127     default:
1128         return code;
1129     }
1130 }
1131 
1132 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1133                                               abi_ulong target_tv_addr)
1134 {
1135     struct target_timeval *target_tv;
1136 
1137     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1138         return -TARGET_EFAULT;
1139     }
1140 
1141     __get_user(tv->tv_sec, &target_tv->tv_sec);
1142     __get_user(tv->tv_usec, &target_tv->tv_usec);
1143 
1144     unlock_user_struct(target_tv, target_tv_addr, 0);
1145 
1146     return 0;
1147 }
1148 
1149 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1150                                             const struct timeval *tv)
1151 {
1152     struct target_timeval *target_tv;
1153 
1154     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1155         return -TARGET_EFAULT;
1156     }
1157 
1158     __put_user(tv->tv_sec, &target_tv->tv_sec);
1159     __put_user(tv->tv_usec, &target_tv->tv_usec);
1160 
1161     unlock_user_struct(target_tv, target_tv_addr, 1);
1162 
1163     return 0;
1164 }
1165 
1166 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1167 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1168                                                 abi_ulong target_tv_addr)
1169 {
1170     struct target__kernel_sock_timeval *target_tv;
1171 
1172     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1173         return -TARGET_EFAULT;
1174     }
1175 
1176     __get_user(tv->tv_sec, &target_tv->tv_sec);
1177     __get_user(tv->tv_usec, &target_tv->tv_usec);
1178 
1179     unlock_user_struct(target_tv, target_tv_addr, 0);
1180 
1181     return 0;
1182 }
1183 #endif
1184 
1185 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1186                                               const struct timeval *tv)
1187 {
1188     struct target__kernel_sock_timeval *target_tv;
1189 
1190     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1191         return -TARGET_EFAULT;
1192     }
1193 
1194     __put_user(tv->tv_sec, &target_tv->tv_sec);
1195     __put_user(tv->tv_usec, &target_tv->tv_usec);
1196 
1197     unlock_user_struct(target_tv, target_tv_addr, 1);
1198 
1199     return 0;
1200 }
1201 
1202 #if defined(TARGET_NR_futex) || \
1203     defined(TARGET_NR_rt_sigtimedwait) || \
1204     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1205     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1206     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1207     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1208     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1209     defined(TARGET_NR_timer_settime) || \
1210     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1211 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1212                                                abi_ulong target_addr)
1213 {
1214     struct target_timespec *target_ts;
1215 
1216     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1217         return -TARGET_EFAULT;
1218     }
1219     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1220     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1221     unlock_user_struct(target_ts, target_addr, 0);
1222     return 0;
1223 }
1224 #endif
1225 
1226 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1227     defined(TARGET_NR_timer_settime64) || \
1228     defined(TARGET_NR_mq_timedsend_time64) || \
1229     defined(TARGET_NR_mq_timedreceive_time64) || \
1230     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1231     defined(TARGET_NR_clock_nanosleep_time64) || \
1232     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1233     defined(TARGET_NR_utimensat) || \
1234     defined(TARGET_NR_utimensat_time64) || \
1235     defined(TARGET_NR_semtimedop_time64) || \
1236     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1237 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1238                                                  abi_ulong target_addr)
1239 {
1240     struct target__kernel_timespec *target_ts;
1241 
1242     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1243         return -TARGET_EFAULT;
1244     }
1245     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1246     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1247     /* in 32bit mode, this drops the padding */
1248     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1249     unlock_user_struct(target_ts, target_addr, 0);
1250     return 0;
1251 }
1252 #endif
1253 
1254 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1255                                                struct timespec *host_ts)
1256 {
1257     struct target_timespec *target_ts;
1258 
1259     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1260         return -TARGET_EFAULT;
1261     }
1262     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1263     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1264     unlock_user_struct(target_ts, target_addr, 1);
1265     return 0;
1266 }
1267 
1268 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1269                                                  struct timespec *host_ts)
1270 {
1271     struct target__kernel_timespec *target_ts;
1272 
1273     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1274         return -TARGET_EFAULT;
1275     }
1276     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1277     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1278     unlock_user_struct(target_ts, target_addr, 1);
1279     return 0;
1280 }
1281 
1282 #if defined(TARGET_NR_gettimeofday)
1283 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1284                                              struct timezone *tz)
1285 {
1286     struct target_timezone *target_tz;
1287 
1288     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1289         return -TARGET_EFAULT;
1290     }
1291 
1292     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1293     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1294 
1295     unlock_user_struct(target_tz, target_tz_addr, 1);
1296 
1297     return 0;
1298 }
1299 #endif
1300 
1301 #if defined(TARGET_NR_settimeofday)
1302 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1303                                                abi_ulong target_tz_addr)
1304 {
1305     struct target_timezone *target_tz;
1306 
1307     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1308         return -TARGET_EFAULT;
1309     }
1310 
1311     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1312     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1313 
1314     unlock_user_struct(target_tz, target_tz_addr, 0);
1315 
1316     return 0;
1317 }
1318 #endif
1319 
1320 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1321 #include <mqueue.h>
1322 
1323 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1324                                               abi_ulong target_mq_attr_addr)
1325 {
1326     struct target_mq_attr *target_mq_attr;
1327 
1328     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1329                           target_mq_attr_addr, 1))
1330         return -TARGET_EFAULT;
1331 
1332     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1333     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1334     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1335     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1336 
1337     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1338 
1339     return 0;
1340 }
1341 
1342 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1343                                             const struct mq_attr *attr)
1344 {
1345     struct target_mq_attr *target_mq_attr;
1346 
1347     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1348                           target_mq_attr_addr, 0))
1349         return -TARGET_EFAULT;
1350 
1351     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1352     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1353     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1354     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1355 
1356     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1357 
1358     return 0;
1359 }
1360 #endif
1361 
1362 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1363 /* do_select() must return target values and target errnos. */
1364 static abi_long do_select(int n,
1365                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1366                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1367 {
1368     fd_set rfds, wfds, efds;
1369     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1370     struct timeval tv;
1371     struct timespec ts, *ts_ptr;
1372     abi_long ret;
1373 
1374     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1375     if (ret) {
1376         return ret;
1377     }
1378     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1379     if (ret) {
1380         return ret;
1381     }
1382     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1383     if (ret) {
1384         return ret;
1385     }
1386 
1387     if (target_tv_addr) {
1388         if (copy_from_user_timeval(&tv, target_tv_addr))
1389             return -TARGET_EFAULT;
1390         ts.tv_sec = tv.tv_sec;
1391         ts.tv_nsec = tv.tv_usec * 1000;
1392         ts_ptr = &ts;
1393     } else {
1394         ts_ptr = NULL;
1395     }
1396 
1397     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1398                                   ts_ptr, NULL));
1399 
1400     if (!is_error(ret)) {
1401         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1402             return -TARGET_EFAULT;
1403         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1404             return -TARGET_EFAULT;
1405         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1406             return -TARGET_EFAULT;
1407 
1408         if (target_tv_addr) {
1409             tv.tv_sec = ts.tv_sec;
1410             tv.tv_usec = ts.tv_nsec / 1000;
1411             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1412                 return -TARGET_EFAULT;
1413             }
1414         }
1415     }
1416 
1417     return ret;
1418 }
1419 
1420 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1421 static abi_long do_old_select(abi_ulong arg1)
1422 {
1423     struct target_sel_arg_struct *sel;
1424     abi_ulong inp, outp, exp, tvp;
1425     long nsel;
1426 
1427     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1428         return -TARGET_EFAULT;
1429     }
1430 
1431     nsel = tswapal(sel->n);
1432     inp = tswapal(sel->inp);
1433     outp = tswapal(sel->outp);
1434     exp = tswapal(sel->exp);
1435     tvp = tswapal(sel->tvp);
1436 
1437     unlock_user_struct(sel, arg1, 0);
1438 
1439     return do_select(nsel, inp, outp, exp, tvp);
1440 }
1441 #endif
1442 #endif
1443 
1444 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1445 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1446                             abi_long arg4, abi_long arg5, abi_long arg6,
1447                             bool time64)
1448 {
1449     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1450     fd_set rfds, wfds, efds;
1451     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1452     struct timespec ts, *ts_ptr;
1453     abi_long ret;
1454 
1455     /*
1456      * The 6th arg is actually two args smashed together,
1457      * so we cannot use the C library.
1458      */
1459     struct {
1460         sigset_t *set;
1461         size_t size;
1462     } sig, *sig_ptr;
1463 
1464     abi_ulong arg_sigset, arg_sigsize, *arg7;
1465 
1466     n = arg1;
1467     rfd_addr = arg2;
1468     wfd_addr = arg3;
1469     efd_addr = arg4;
1470     ts_addr = arg5;
1471 
1472     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1473     if (ret) {
1474         return ret;
1475     }
1476     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1477     if (ret) {
1478         return ret;
1479     }
1480     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1481     if (ret) {
1482         return ret;
1483     }
1484 
1485     /*
1486      * This takes a timespec, and not a timeval, so we cannot
1487      * use the do_select() helper ...
1488      */
1489     if (ts_addr) {
1490         if (time64) {
1491             if (target_to_host_timespec64(&ts, ts_addr)) {
1492                 return -TARGET_EFAULT;
1493             }
1494         } else {
1495             if (target_to_host_timespec(&ts, ts_addr)) {
1496                 return -TARGET_EFAULT;
1497             }
1498         }
1499             ts_ptr = &ts;
1500     } else {
1501         ts_ptr = NULL;
1502     }
1503 
1504     /* Extract the two packed args for the sigset */
1505     sig_ptr = NULL;
1506     if (arg6) {
1507         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1508         if (!arg7) {
1509             return -TARGET_EFAULT;
1510         }
1511         arg_sigset = tswapal(arg7[0]);
1512         arg_sigsize = tswapal(arg7[1]);
1513         unlock_user(arg7, arg6, 0);
1514 
1515         if (arg_sigset) {
1516             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1517             if (ret != 0) {
1518                 return ret;
1519             }
1520             sig_ptr = &sig;
1521             sig.size = SIGSET_T_SIZE;
1522         }
1523     }
1524 
1525     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1526                                   ts_ptr, sig_ptr));
1527 
1528     if (sig_ptr) {
1529         finish_sigsuspend_mask(ret);
1530     }
1531 
1532     if (!is_error(ret)) {
1533         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1534             return -TARGET_EFAULT;
1535         }
1536         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1537             return -TARGET_EFAULT;
1538         }
1539         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1540             return -TARGET_EFAULT;
1541         }
1542         if (time64) {
1543             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1544                 return -TARGET_EFAULT;
1545             }
1546         } else {
1547             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1548                 return -TARGET_EFAULT;
1549             }
1550         }
1551     }
1552     return ret;
1553 }
1554 #endif
1555 
1556 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1557     defined(TARGET_NR_ppoll_time64)
1558 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1559                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1560 {
1561     struct target_pollfd *target_pfd;
1562     unsigned int nfds = arg2;
1563     struct pollfd *pfd;
1564     unsigned int i;
1565     abi_long ret;
1566 
1567     pfd = NULL;
1568     target_pfd = NULL;
1569     if (nfds) {
1570         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1571             return -TARGET_EINVAL;
1572         }
1573         target_pfd = lock_user(VERIFY_WRITE, arg1,
1574                                sizeof(struct target_pollfd) * nfds, 1);
1575         if (!target_pfd) {
1576             return -TARGET_EFAULT;
1577         }
1578 
1579         pfd = alloca(sizeof(struct pollfd) * nfds);
1580         for (i = 0; i < nfds; i++) {
1581             pfd[i].fd = tswap32(target_pfd[i].fd);
1582             pfd[i].events = tswap16(target_pfd[i].events);
1583         }
1584     }
1585     if (ppoll) {
1586         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1587         sigset_t *set = NULL;
1588 
1589         if (arg3) {
1590             if (time64) {
1591                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1592                     unlock_user(target_pfd, arg1, 0);
1593                     return -TARGET_EFAULT;
1594                 }
1595             } else {
1596                 if (target_to_host_timespec(timeout_ts, arg3)) {
1597                     unlock_user(target_pfd, arg1, 0);
1598                     return -TARGET_EFAULT;
1599                 }
1600             }
1601         } else {
1602             timeout_ts = NULL;
1603         }
1604 
1605         if (arg4) {
1606             ret = process_sigsuspend_mask(&set, arg4, arg5);
1607             if (ret != 0) {
1608                 unlock_user(target_pfd, arg1, 0);
1609                 return ret;
1610             }
1611         }
1612 
1613         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1614                                    set, SIGSET_T_SIZE));
1615 
1616         if (set) {
1617             finish_sigsuspend_mask(ret);
1618         }
1619         if (!is_error(ret) && arg3) {
1620             if (time64) {
1621                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1622                     return -TARGET_EFAULT;
1623                 }
1624             } else {
1625                 if (host_to_target_timespec(arg3, timeout_ts)) {
1626                     return -TARGET_EFAULT;
1627                 }
1628             }
1629         }
1630     } else {
1631           struct timespec ts, *pts;
1632 
1633           if (arg3 >= 0) {
1634               /* Convert ms to secs, ns */
1635               ts.tv_sec = arg3 / 1000;
1636               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1637               pts = &ts;
1638           } else {
1639               /* -ve poll() timeout means "infinite" */
1640               pts = NULL;
1641           }
1642           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1643     }
1644 
1645     if (!is_error(ret)) {
1646         for (i = 0; i < nfds; i++) {
1647             target_pfd[i].revents = tswap16(pfd[i].revents);
1648         }
1649     }
1650     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1651     return ret;
1652 }
1653 #endif
1654 
1655 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1656                         int flags, int is_pipe2)
1657 {
1658     int host_pipe[2];
1659     abi_long ret;
1660     ret = pipe2(host_pipe, flags);
1661 
1662     if (is_error(ret))
1663         return get_errno(ret);
1664 
1665     /* Several targets have special calling conventions for the original
1666        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1667     if (!is_pipe2) {
1668 #if defined(TARGET_ALPHA)
1669         cpu_env->ir[IR_A4] = host_pipe[1];
1670         return host_pipe[0];
1671 #elif defined(TARGET_MIPS)
1672         cpu_env->active_tc.gpr[3] = host_pipe[1];
1673         return host_pipe[0];
1674 #elif defined(TARGET_SH4)
1675         cpu_env->gregs[1] = host_pipe[1];
1676         return host_pipe[0];
1677 #elif defined(TARGET_SPARC)
1678         cpu_env->regwptr[1] = host_pipe[1];
1679         return host_pipe[0];
1680 #endif
1681     }
1682 
1683     if (put_user_s32(host_pipe[0], pipedes)
1684         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1685         return -TARGET_EFAULT;
1686     return get_errno(ret);
1687 }
1688 
1689 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1690                                               abi_ulong target_addr,
1691                                               socklen_t len)
1692 {
1693     struct target_ip_mreqn *target_smreqn;
1694 
1695     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1696     if (!target_smreqn)
1697         return -TARGET_EFAULT;
1698     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1699     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1700     if (len == sizeof(struct target_ip_mreqn))
1701         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1702     unlock_user(target_smreqn, target_addr, 0);
1703 
1704     return 0;
1705 }
1706 
1707 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1708                                                abi_ulong target_addr,
1709                                                socklen_t len)
1710 {
1711     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1712     sa_family_t sa_family;
1713     struct target_sockaddr *target_saddr;
1714 
1715     if (fd_trans_target_to_host_addr(fd)) {
1716         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1717     }
1718 
1719     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1720     if (!target_saddr)
1721         return -TARGET_EFAULT;
1722 
1723     sa_family = tswap16(target_saddr->sa_family);
1724 
1725     /* Oops. The caller might send a incomplete sun_path; sun_path
1726      * must be terminated by \0 (see the manual page), but
1727      * unfortunately it is quite common to specify sockaddr_un
1728      * length as "strlen(x->sun_path)" while it should be
1729      * "strlen(...) + 1". We'll fix that here if needed.
1730      * Linux kernel has a similar feature.
1731      */
1732 
1733     if (sa_family == AF_UNIX) {
1734         if (len < unix_maxlen && len > 0) {
1735             char *cp = (char*)target_saddr;
1736 
1737             if ( cp[len-1] && !cp[len] )
1738                 len++;
1739         }
1740         if (len > unix_maxlen)
1741             len = unix_maxlen;
1742     }
1743 
1744     memcpy(addr, target_saddr, len);
1745     addr->sa_family = sa_family;
1746     if (sa_family == AF_NETLINK) {
1747         struct sockaddr_nl *nladdr;
1748 
1749         nladdr = (struct sockaddr_nl *)addr;
1750         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1751         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1752     } else if (sa_family == AF_PACKET) {
1753 	struct target_sockaddr_ll *lladdr;
1754 
1755 	lladdr = (struct target_sockaddr_ll *)addr;
1756 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1757 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1758     }
1759     unlock_user(target_saddr, target_addr, 0);
1760 
1761     return 0;
1762 }
1763 
1764 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1765                                                struct sockaddr *addr,
1766                                                socklen_t len)
1767 {
1768     struct target_sockaddr *target_saddr;
1769 
1770     if (len == 0) {
1771         return 0;
1772     }
1773     assert(addr);
1774 
1775     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1776     if (!target_saddr)
1777         return -TARGET_EFAULT;
1778     memcpy(target_saddr, addr, len);
1779     if (len >= offsetof(struct target_sockaddr, sa_family) +
1780         sizeof(target_saddr->sa_family)) {
1781         target_saddr->sa_family = tswap16(addr->sa_family);
1782     }
1783     if (addr->sa_family == AF_NETLINK &&
1784         len >= sizeof(struct target_sockaddr_nl)) {
1785         struct target_sockaddr_nl *target_nl =
1786                (struct target_sockaddr_nl *)target_saddr;
1787         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1788         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1789     } else if (addr->sa_family == AF_PACKET) {
1790         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1791         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1792         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1793     } else if (addr->sa_family == AF_INET6 &&
1794                len >= sizeof(struct target_sockaddr_in6)) {
1795         struct target_sockaddr_in6 *target_in6 =
1796                (struct target_sockaddr_in6 *)target_saddr;
1797         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1798     }
1799     unlock_user(target_saddr, target_addr, len);
1800 
1801     return 0;
1802 }
1803 
1804 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1805                                            struct target_msghdr *target_msgh)
1806 {
1807     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1808     abi_long msg_controllen;
1809     abi_ulong target_cmsg_addr;
1810     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1811     socklen_t space = 0;
1812 
1813     msg_controllen = tswapal(target_msgh->msg_controllen);
1814     if (msg_controllen < sizeof (struct target_cmsghdr))
1815         goto the_end;
1816     target_cmsg_addr = tswapal(target_msgh->msg_control);
1817     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1818     target_cmsg_start = target_cmsg;
1819     if (!target_cmsg)
1820         return -TARGET_EFAULT;
1821 
1822     while (cmsg && target_cmsg) {
1823         void *data = CMSG_DATA(cmsg);
1824         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1825 
1826         int len = tswapal(target_cmsg->cmsg_len)
1827             - sizeof(struct target_cmsghdr);
1828 
1829         space += CMSG_SPACE(len);
1830         if (space > msgh->msg_controllen) {
1831             space -= CMSG_SPACE(len);
1832             /* This is a QEMU bug, since we allocated the payload
1833              * area ourselves (unlike overflow in host-to-target
1834              * conversion, which is just the guest giving us a buffer
1835              * that's too small). It can't happen for the payload types
1836              * we currently support; if it becomes an issue in future
1837              * we would need to improve our allocation strategy to
1838              * something more intelligent than "twice the size of the
1839              * target buffer we're reading from".
1840              */
1841             qemu_log_mask(LOG_UNIMP,
1842                           ("Unsupported ancillary data %d/%d: "
1843                            "unhandled msg size\n"),
1844                           tswap32(target_cmsg->cmsg_level),
1845                           tswap32(target_cmsg->cmsg_type));
1846             break;
1847         }
1848 
1849         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1850             cmsg->cmsg_level = SOL_SOCKET;
1851         } else {
1852             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1853         }
1854         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1855         cmsg->cmsg_len = CMSG_LEN(len);
1856 
1857         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1858             int *fd = (int *)data;
1859             int *target_fd = (int *)target_data;
1860             int i, numfds = len / sizeof(int);
1861 
1862             for (i = 0; i < numfds; i++) {
1863                 __get_user(fd[i], target_fd + i);
1864             }
1865         } else if (cmsg->cmsg_level == SOL_SOCKET
1866                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1867             struct ucred *cred = (struct ucred *)data;
1868             struct target_ucred *target_cred =
1869                 (struct target_ucred *)target_data;
1870 
1871             __get_user(cred->pid, &target_cred->pid);
1872             __get_user(cred->uid, &target_cred->uid);
1873             __get_user(cred->gid, &target_cred->gid);
1874         } else {
1875             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1876                           cmsg->cmsg_level, cmsg->cmsg_type);
1877             memcpy(data, target_data, len);
1878         }
1879 
1880         cmsg = CMSG_NXTHDR(msgh, cmsg);
1881         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1882                                          target_cmsg_start);
1883     }
1884     unlock_user(target_cmsg, target_cmsg_addr, 0);
1885  the_end:
1886     msgh->msg_controllen = space;
1887     return 0;
1888 }
1889 
1890 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1891                                            struct msghdr *msgh)
1892 {
1893     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1894     abi_long msg_controllen;
1895     abi_ulong target_cmsg_addr;
1896     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1897     socklen_t space = 0;
1898 
1899     msg_controllen = tswapal(target_msgh->msg_controllen);
1900     if (msg_controllen < sizeof (struct target_cmsghdr))
1901         goto the_end;
1902     target_cmsg_addr = tswapal(target_msgh->msg_control);
1903     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1904     target_cmsg_start = target_cmsg;
1905     if (!target_cmsg)
1906         return -TARGET_EFAULT;
1907 
1908     while (cmsg && target_cmsg) {
1909         void *data = CMSG_DATA(cmsg);
1910         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1911 
1912         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1913         int tgt_len, tgt_space;
1914 
1915         /* We never copy a half-header but may copy half-data;
1916          * this is Linux's behaviour in put_cmsg(). Note that
1917          * truncation here is a guest problem (which we report
1918          * to the guest via the CTRUNC bit), unlike truncation
1919          * in target_to_host_cmsg, which is a QEMU bug.
1920          */
1921         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1922             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1923             break;
1924         }
1925 
1926         if (cmsg->cmsg_level == SOL_SOCKET) {
1927             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1928         } else {
1929             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1930         }
1931         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1932 
1933         /* Payload types which need a different size of payload on
1934          * the target must adjust tgt_len here.
1935          */
1936         tgt_len = len;
1937         switch (cmsg->cmsg_level) {
1938         case SOL_SOCKET:
1939             switch (cmsg->cmsg_type) {
1940             case SO_TIMESTAMP:
1941                 tgt_len = sizeof(struct target_timeval);
1942                 break;
1943             default:
1944                 break;
1945             }
1946             break;
1947         default:
1948             break;
1949         }
1950 
1951         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1952             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1953             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1954         }
1955 
1956         /* We must now copy-and-convert len bytes of payload
1957          * into tgt_len bytes of destination space. Bear in mind
1958          * that in both source and destination we may be dealing
1959          * with a truncated value!
1960          */
1961         switch (cmsg->cmsg_level) {
1962         case SOL_SOCKET:
1963             switch (cmsg->cmsg_type) {
1964             case SCM_RIGHTS:
1965             {
1966                 int *fd = (int *)data;
1967                 int *target_fd = (int *)target_data;
1968                 int i, numfds = tgt_len / sizeof(int);
1969 
1970                 for (i = 0; i < numfds; i++) {
1971                     __put_user(fd[i], target_fd + i);
1972                 }
1973                 break;
1974             }
1975             case SO_TIMESTAMP:
1976             {
1977                 struct timeval *tv = (struct timeval *)data;
1978                 struct target_timeval *target_tv =
1979                     (struct target_timeval *)target_data;
1980 
1981                 if (len != sizeof(struct timeval) ||
1982                     tgt_len != sizeof(struct target_timeval)) {
1983                     goto unimplemented;
1984                 }
1985 
1986                 /* copy struct timeval to target */
1987                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1988                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1989                 break;
1990             }
1991             case SCM_CREDENTIALS:
1992             {
1993                 struct ucred *cred = (struct ucred *)data;
1994                 struct target_ucred *target_cred =
1995                     (struct target_ucred *)target_data;
1996 
1997                 __put_user(cred->pid, &target_cred->pid);
1998                 __put_user(cred->uid, &target_cred->uid);
1999                 __put_user(cred->gid, &target_cred->gid);
2000                 break;
2001             }
2002             default:
2003                 goto unimplemented;
2004             }
2005             break;
2006 
2007         case SOL_IP:
2008             switch (cmsg->cmsg_type) {
2009             case IP_TTL:
2010             {
2011                 uint32_t *v = (uint32_t *)data;
2012                 uint32_t *t_int = (uint32_t *)target_data;
2013 
2014                 if (len != sizeof(uint32_t) ||
2015                     tgt_len != sizeof(uint32_t)) {
2016                     goto unimplemented;
2017                 }
2018                 __put_user(*v, t_int);
2019                 break;
2020             }
2021             case IP_RECVERR:
2022             {
2023                 struct errhdr_t {
2024                    struct sock_extended_err ee;
2025                    struct sockaddr_in offender;
2026                 };
2027                 struct errhdr_t *errh = (struct errhdr_t *)data;
2028                 struct errhdr_t *target_errh =
2029                     (struct errhdr_t *)target_data;
2030 
2031                 if (len != sizeof(struct errhdr_t) ||
2032                     tgt_len != sizeof(struct errhdr_t)) {
2033                     goto unimplemented;
2034                 }
2035                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2036                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2037                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2038                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2039                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2040                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2041                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2042                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2043                     (void *) &errh->offender, sizeof(errh->offender));
2044                 break;
2045             }
2046             default:
2047                 goto unimplemented;
2048             }
2049             break;
2050 
2051         case SOL_IPV6:
2052             switch (cmsg->cmsg_type) {
2053             case IPV6_HOPLIMIT:
2054             {
2055                 uint32_t *v = (uint32_t *)data;
2056                 uint32_t *t_int = (uint32_t *)target_data;
2057 
2058                 if (len != sizeof(uint32_t) ||
2059                     tgt_len != sizeof(uint32_t)) {
2060                     goto unimplemented;
2061                 }
2062                 __put_user(*v, t_int);
2063                 break;
2064             }
2065             case IPV6_RECVERR:
2066             {
2067                 struct errhdr6_t {
2068                    struct sock_extended_err ee;
2069                    struct sockaddr_in6 offender;
2070                 };
2071                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2072                 struct errhdr6_t *target_errh =
2073                     (struct errhdr6_t *)target_data;
2074 
2075                 if (len != sizeof(struct errhdr6_t) ||
2076                     tgt_len != sizeof(struct errhdr6_t)) {
2077                     goto unimplemented;
2078                 }
2079                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2080                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2081                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2082                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2083                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2084                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2085                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2086                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2087                     (void *) &errh->offender, sizeof(errh->offender));
2088                 break;
2089             }
2090             default:
2091                 goto unimplemented;
2092             }
2093             break;
2094 
2095         default:
2096         unimplemented:
2097             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2098                           cmsg->cmsg_level, cmsg->cmsg_type);
2099             memcpy(target_data, data, MIN(len, tgt_len));
2100             if (tgt_len > len) {
2101                 memset(target_data + len, 0, tgt_len - len);
2102             }
2103         }
2104 
2105         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2106         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2107         if (msg_controllen < tgt_space) {
2108             tgt_space = msg_controllen;
2109         }
2110         msg_controllen -= tgt_space;
2111         space += tgt_space;
2112         cmsg = CMSG_NXTHDR(msgh, cmsg);
2113         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2114                                          target_cmsg_start);
2115     }
2116     unlock_user(target_cmsg, target_cmsg_addr, space);
2117  the_end:
2118     target_msgh->msg_controllen = tswapal(space);
2119     return 0;
2120 }
2121 
2122 /* do_setsockopt() Must return target values and target errnos. */
2123 static abi_long do_setsockopt(int sockfd, int level, int optname,
2124                               abi_ulong optval_addr, socklen_t optlen)
2125 {
2126     abi_long ret;
2127     int val;
2128     struct ip_mreqn *ip_mreq;
2129     struct ip_mreq_source *ip_mreq_source;
2130 
2131     switch(level) {
2132     case SOL_TCP:
2133     case SOL_UDP:
2134         /* TCP and UDP options all take an 'int' value.  */
2135         if (optlen < sizeof(uint32_t))
2136             return -TARGET_EINVAL;
2137 
2138         if (get_user_u32(val, optval_addr))
2139             return -TARGET_EFAULT;
2140         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2141         break;
2142     case SOL_IP:
2143         switch(optname) {
2144         case IP_TOS:
2145         case IP_TTL:
2146         case IP_HDRINCL:
2147         case IP_ROUTER_ALERT:
2148         case IP_RECVOPTS:
2149         case IP_RETOPTS:
2150         case IP_PKTINFO:
2151         case IP_MTU_DISCOVER:
2152         case IP_RECVERR:
2153         case IP_RECVTTL:
2154         case IP_RECVTOS:
2155 #ifdef IP_FREEBIND
2156         case IP_FREEBIND:
2157 #endif
2158         case IP_MULTICAST_TTL:
2159         case IP_MULTICAST_LOOP:
2160             val = 0;
2161             if (optlen >= sizeof(uint32_t)) {
2162                 if (get_user_u32(val, optval_addr))
2163                     return -TARGET_EFAULT;
2164             } else if (optlen >= 1) {
2165                 if (get_user_u8(val, optval_addr))
2166                     return -TARGET_EFAULT;
2167             }
2168             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2169             break;
2170         case IP_ADD_MEMBERSHIP:
2171         case IP_DROP_MEMBERSHIP:
2172             if (optlen < sizeof (struct target_ip_mreq) ||
2173                 optlen > sizeof (struct target_ip_mreqn))
2174                 return -TARGET_EINVAL;
2175 
2176             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2177             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2178             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2179             break;
2180 
2181         case IP_BLOCK_SOURCE:
2182         case IP_UNBLOCK_SOURCE:
2183         case IP_ADD_SOURCE_MEMBERSHIP:
2184         case IP_DROP_SOURCE_MEMBERSHIP:
2185             if (optlen != sizeof (struct target_ip_mreq_source))
2186                 return -TARGET_EINVAL;
2187 
2188             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2189             if (!ip_mreq_source) {
2190                 return -TARGET_EFAULT;
2191             }
2192             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2193             unlock_user (ip_mreq_source, optval_addr, 0);
2194             break;
2195 
2196         default:
2197             goto unimplemented;
2198         }
2199         break;
2200     case SOL_IPV6:
2201         switch (optname) {
2202         case IPV6_MTU_DISCOVER:
2203         case IPV6_MTU:
2204         case IPV6_V6ONLY:
2205         case IPV6_RECVPKTINFO:
2206         case IPV6_UNICAST_HOPS:
2207         case IPV6_MULTICAST_HOPS:
2208         case IPV6_MULTICAST_LOOP:
2209         case IPV6_RECVERR:
2210         case IPV6_RECVHOPLIMIT:
2211         case IPV6_2292HOPLIMIT:
2212         case IPV6_CHECKSUM:
2213         case IPV6_ADDRFORM:
2214         case IPV6_2292PKTINFO:
2215         case IPV6_RECVTCLASS:
2216         case IPV6_RECVRTHDR:
2217         case IPV6_2292RTHDR:
2218         case IPV6_RECVHOPOPTS:
2219         case IPV6_2292HOPOPTS:
2220         case IPV6_RECVDSTOPTS:
2221         case IPV6_2292DSTOPTS:
2222         case IPV6_TCLASS:
2223         case IPV6_ADDR_PREFERENCES:
2224 #ifdef IPV6_RECVPATHMTU
2225         case IPV6_RECVPATHMTU:
2226 #endif
2227 #ifdef IPV6_TRANSPARENT
2228         case IPV6_TRANSPARENT:
2229 #endif
2230 #ifdef IPV6_FREEBIND
2231         case IPV6_FREEBIND:
2232 #endif
2233 #ifdef IPV6_RECVORIGDSTADDR
2234         case IPV6_RECVORIGDSTADDR:
2235 #endif
2236             val = 0;
2237             if (optlen < sizeof(uint32_t)) {
2238                 return -TARGET_EINVAL;
2239             }
2240             if (get_user_u32(val, optval_addr)) {
2241                 return -TARGET_EFAULT;
2242             }
2243             ret = get_errno(setsockopt(sockfd, level, optname,
2244                                        &val, sizeof(val)));
2245             break;
2246         case IPV6_PKTINFO:
2247         {
2248             struct in6_pktinfo pki;
2249 
2250             if (optlen < sizeof(pki)) {
2251                 return -TARGET_EINVAL;
2252             }
2253 
2254             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2255                 return -TARGET_EFAULT;
2256             }
2257 
2258             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2259 
2260             ret = get_errno(setsockopt(sockfd, level, optname,
2261                                        &pki, sizeof(pki)));
2262             break;
2263         }
2264         case IPV6_ADD_MEMBERSHIP:
2265         case IPV6_DROP_MEMBERSHIP:
2266         {
2267             struct ipv6_mreq ipv6mreq;
2268 
2269             if (optlen < sizeof(ipv6mreq)) {
2270                 return -TARGET_EINVAL;
2271             }
2272 
2273             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2274                 return -TARGET_EFAULT;
2275             }
2276 
2277             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2278 
2279             ret = get_errno(setsockopt(sockfd, level, optname,
2280                                        &ipv6mreq, sizeof(ipv6mreq)));
2281             break;
2282         }
2283         default:
2284             goto unimplemented;
2285         }
2286         break;
2287     case SOL_ICMPV6:
2288         switch (optname) {
2289         case ICMPV6_FILTER:
2290         {
2291             struct icmp6_filter icmp6f;
2292 
2293             if (optlen > sizeof(icmp6f)) {
2294                 optlen = sizeof(icmp6f);
2295             }
2296 
2297             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2298                 return -TARGET_EFAULT;
2299             }
2300 
2301             for (val = 0; val < 8; val++) {
2302                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2303             }
2304 
2305             ret = get_errno(setsockopt(sockfd, level, optname,
2306                                        &icmp6f, optlen));
2307             break;
2308         }
2309         default:
2310             goto unimplemented;
2311         }
2312         break;
2313     case SOL_RAW:
2314         switch (optname) {
2315         case ICMP_FILTER:
2316         case IPV6_CHECKSUM:
2317             /* those take an u32 value */
2318             if (optlen < sizeof(uint32_t)) {
2319                 return -TARGET_EINVAL;
2320             }
2321 
2322             if (get_user_u32(val, optval_addr)) {
2323                 return -TARGET_EFAULT;
2324             }
2325             ret = get_errno(setsockopt(sockfd, level, optname,
2326                                        &val, sizeof(val)));
2327             break;
2328 
2329         default:
2330             goto unimplemented;
2331         }
2332         break;
2333 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2334     case SOL_ALG:
2335         switch (optname) {
2336         case ALG_SET_KEY:
2337         {
2338             char *alg_key = g_malloc(optlen);
2339 
2340             if (!alg_key) {
2341                 return -TARGET_ENOMEM;
2342             }
2343             if (copy_from_user(alg_key, optval_addr, optlen)) {
2344                 g_free(alg_key);
2345                 return -TARGET_EFAULT;
2346             }
2347             ret = get_errno(setsockopt(sockfd, level, optname,
2348                                        alg_key, optlen));
2349             g_free(alg_key);
2350             break;
2351         }
2352         case ALG_SET_AEAD_AUTHSIZE:
2353         {
2354             ret = get_errno(setsockopt(sockfd, level, optname,
2355                                        NULL, optlen));
2356             break;
2357         }
2358         default:
2359             goto unimplemented;
2360         }
2361         break;
2362 #endif
2363     case TARGET_SOL_SOCKET:
2364         switch (optname) {
2365         case TARGET_SO_RCVTIMEO:
2366         {
2367                 struct timeval tv;
2368 
2369                 optname = SO_RCVTIMEO;
2370 
2371 set_timeout:
2372                 if (optlen != sizeof(struct target_timeval)) {
2373                     return -TARGET_EINVAL;
2374                 }
2375 
2376                 if (copy_from_user_timeval(&tv, optval_addr)) {
2377                     return -TARGET_EFAULT;
2378                 }
2379 
2380                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2381                                 &tv, sizeof(tv)));
2382                 return ret;
2383         }
2384         case TARGET_SO_SNDTIMEO:
2385                 optname = SO_SNDTIMEO;
2386                 goto set_timeout;
2387         case TARGET_SO_ATTACH_FILTER:
2388         {
2389                 struct target_sock_fprog *tfprog;
2390                 struct target_sock_filter *tfilter;
2391                 struct sock_fprog fprog;
2392                 struct sock_filter *filter;
2393                 int i;
2394 
2395                 if (optlen != sizeof(*tfprog)) {
2396                     return -TARGET_EINVAL;
2397                 }
2398                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2399                     return -TARGET_EFAULT;
2400                 }
2401                 if (!lock_user_struct(VERIFY_READ, tfilter,
2402                                       tswapal(tfprog->filter), 0)) {
2403                     unlock_user_struct(tfprog, optval_addr, 1);
2404                     return -TARGET_EFAULT;
2405                 }
2406 
2407                 fprog.len = tswap16(tfprog->len);
2408                 filter = g_try_new(struct sock_filter, fprog.len);
2409                 if (filter == NULL) {
2410                     unlock_user_struct(tfilter, tfprog->filter, 1);
2411                     unlock_user_struct(tfprog, optval_addr, 1);
2412                     return -TARGET_ENOMEM;
2413                 }
2414                 for (i = 0; i < fprog.len; i++) {
2415                     filter[i].code = tswap16(tfilter[i].code);
2416                     filter[i].jt = tfilter[i].jt;
2417                     filter[i].jf = tfilter[i].jf;
2418                     filter[i].k = tswap32(tfilter[i].k);
2419                 }
2420                 fprog.filter = filter;
2421 
2422                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2423                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2424                 g_free(filter);
2425 
2426                 unlock_user_struct(tfilter, tfprog->filter, 1);
2427                 unlock_user_struct(tfprog, optval_addr, 1);
2428                 return ret;
2429         }
2430 	case TARGET_SO_BINDTODEVICE:
2431 	{
2432 		char *dev_ifname, *addr_ifname;
2433 
2434 		if (optlen > IFNAMSIZ - 1) {
2435 		    optlen = IFNAMSIZ - 1;
2436 		}
2437 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2438 		if (!dev_ifname) {
2439 		    return -TARGET_EFAULT;
2440 		}
2441 		optname = SO_BINDTODEVICE;
2442 		addr_ifname = alloca(IFNAMSIZ);
2443 		memcpy(addr_ifname, dev_ifname, optlen);
2444 		addr_ifname[optlen] = 0;
2445 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2446                                            addr_ifname, optlen));
2447 		unlock_user (dev_ifname, optval_addr, 0);
2448 		return ret;
2449 	}
2450         case TARGET_SO_LINGER:
2451         {
2452                 struct linger lg;
2453                 struct target_linger *tlg;
2454 
2455                 if (optlen != sizeof(struct target_linger)) {
2456                     return -TARGET_EINVAL;
2457                 }
2458                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2459                     return -TARGET_EFAULT;
2460                 }
2461                 __get_user(lg.l_onoff, &tlg->l_onoff);
2462                 __get_user(lg.l_linger, &tlg->l_linger);
2463                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2464                                 &lg, sizeof(lg)));
2465                 unlock_user_struct(tlg, optval_addr, 0);
2466                 return ret;
2467         }
2468             /* Options with 'int' argument.  */
2469         case TARGET_SO_DEBUG:
2470 		optname = SO_DEBUG;
2471 		break;
2472         case TARGET_SO_REUSEADDR:
2473 		optname = SO_REUSEADDR;
2474 		break;
2475 #ifdef SO_REUSEPORT
2476         case TARGET_SO_REUSEPORT:
2477                 optname = SO_REUSEPORT;
2478                 break;
2479 #endif
2480         case TARGET_SO_TYPE:
2481 		optname = SO_TYPE;
2482 		break;
2483         case TARGET_SO_ERROR:
2484 		optname = SO_ERROR;
2485 		break;
2486         case TARGET_SO_DONTROUTE:
2487 		optname = SO_DONTROUTE;
2488 		break;
2489         case TARGET_SO_BROADCAST:
2490 		optname = SO_BROADCAST;
2491 		break;
2492         case TARGET_SO_SNDBUF:
2493 		optname = SO_SNDBUF;
2494 		break;
2495         case TARGET_SO_SNDBUFFORCE:
2496                 optname = SO_SNDBUFFORCE;
2497                 break;
2498         case TARGET_SO_RCVBUF:
2499 		optname = SO_RCVBUF;
2500 		break;
2501         case TARGET_SO_RCVBUFFORCE:
2502                 optname = SO_RCVBUFFORCE;
2503                 break;
2504         case TARGET_SO_KEEPALIVE:
2505 		optname = SO_KEEPALIVE;
2506 		break;
2507         case TARGET_SO_OOBINLINE:
2508 		optname = SO_OOBINLINE;
2509 		break;
2510         case TARGET_SO_NO_CHECK:
2511 		optname = SO_NO_CHECK;
2512 		break;
2513         case TARGET_SO_PRIORITY:
2514 		optname = SO_PRIORITY;
2515 		break;
2516 #ifdef SO_BSDCOMPAT
2517         case TARGET_SO_BSDCOMPAT:
2518 		optname = SO_BSDCOMPAT;
2519 		break;
2520 #endif
2521         case TARGET_SO_PASSCRED:
2522 		optname = SO_PASSCRED;
2523 		break;
2524         case TARGET_SO_PASSSEC:
2525                 optname = SO_PASSSEC;
2526                 break;
2527         case TARGET_SO_TIMESTAMP:
2528 		optname = SO_TIMESTAMP;
2529 		break;
2530         case TARGET_SO_RCVLOWAT:
2531 		optname = SO_RCVLOWAT;
2532 		break;
2533         default:
2534             goto unimplemented;
2535         }
2536 	if (optlen < sizeof(uint32_t))
2537             return -TARGET_EINVAL;
2538 
2539 	if (get_user_u32(val, optval_addr))
2540             return -TARGET_EFAULT;
2541 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2542         break;
2543 #ifdef SOL_NETLINK
2544     case SOL_NETLINK:
2545         switch (optname) {
2546         case NETLINK_PKTINFO:
2547         case NETLINK_ADD_MEMBERSHIP:
2548         case NETLINK_DROP_MEMBERSHIP:
2549         case NETLINK_BROADCAST_ERROR:
2550         case NETLINK_NO_ENOBUFS:
2551 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2552         case NETLINK_LISTEN_ALL_NSID:
2553         case NETLINK_CAP_ACK:
2554 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2555 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2556         case NETLINK_EXT_ACK:
2557 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2558 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2559         case NETLINK_GET_STRICT_CHK:
2560 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2561             break;
2562         default:
2563             goto unimplemented;
2564         }
2565         val = 0;
2566         if (optlen < sizeof(uint32_t)) {
2567             return -TARGET_EINVAL;
2568         }
2569         if (get_user_u32(val, optval_addr)) {
2570             return -TARGET_EFAULT;
2571         }
2572         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2573                                    sizeof(val)));
2574         break;
2575 #endif /* SOL_NETLINK */
2576     default:
2577     unimplemented:
2578         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2579                       level, optname);
2580         ret = -TARGET_ENOPROTOOPT;
2581     }
2582     return ret;
2583 }
2584 
2585 /* do_getsockopt() Must return target values and target errnos. */
2586 static abi_long do_getsockopt(int sockfd, int level, int optname,
2587                               abi_ulong optval_addr, abi_ulong optlen)
2588 {
2589     abi_long ret;
2590     int len, val;
2591     socklen_t lv;
2592 
2593     switch(level) {
2594     case TARGET_SOL_SOCKET:
2595         level = SOL_SOCKET;
2596         switch (optname) {
2597         /* These don't just return a single integer */
2598         case TARGET_SO_PEERNAME:
2599             goto unimplemented;
2600         case TARGET_SO_RCVTIMEO: {
2601             struct timeval tv;
2602             socklen_t tvlen;
2603 
2604             optname = SO_RCVTIMEO;
2605 
2606 get_timeout:
2607             if (get_user_u32(len, optlen)) {
2608                 return -TARGET_EFAULT;
2609             }
2610             if (len < 0) {
2611                 return -TARGET_EINVAL;
2612             }
2613 
2614             tvlen = sizeof(tv);
2615             ret = get_errno(getsockopt(sockfd, level, optname,
2616                                        &tv, &tvlen));
2617             if (ret < 0) {
2618                 return ret;
2619             }
2620             if (len > sizeof(struct target_timeval)) {
2621                 len = sizeof(struct target_timeval);
2622             }
2623             if (copy_to_user_timeval(optval_addr, &tv)) {
2624                 return -TARGET_EFAULT;
2625             }
2626             if (put_user_u32(len, optlen)) {
2627                 return -TARGET_EFAULT;
2628             }
2629             break;
2630         }
2631         case TARGET_SO_SNDTIMEO:
2632             optname = SO_SNDTIMEO;
2633             goto get_timeout;
2634         case TARGET_SO_PEERCRED: {
2635             struct ucred cr;
2636             socklen_t crlen;
2637             struct target_ucred *tcr;
2638 
2639             if (get_user_u32(len, optlen)) {
2640                 return -TARGET_EFAULT;
2641             }
2642             if (len < 0) {
2643                 return -TARGET_EINVAL;
2644             }
2645 
2646             crlen = sizeof(cr);
2647             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2648                                        &cr, &crlen));
2649             if (ret < 0) {
2650                 return ret;
2651             }
2652             if (len > crlen) {
2653                 len = crlen;
2654             }
2655             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2656                 return -TARGET_EFAULT;
2657             }
2658             __put_user(cr.pid, &tcr->pid);
2659             __put_user(cr.uid, &tcr->uid);
2660             __put_user(cr.gid, &tcr->gid);
2661             unlock_user_struct(tcr, optval_addr, 1);
2662             if (put_user_u32(len, optlen)) {
2663                 return -TARGET_EFAULT;
2664             }
2665             break;
2666         }
2667         case TARGET_SO_PEERSEC: {
2668             char *name;
2669 
2670             if (get_user_u32(len, optlen)) {
2671                 return -TARGET_EFAULT;
2672             }
2673             if (len < 0) {
2674                 return -TARGET_EINVAL;
2675             }
2676             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2677             if (!name) {
2678                 return -TARGET_EFAULT;
2679             }
2680             lv = len;
2681             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2682                                        name, &lv));
2683             if (put_user_u32(lv, optlen)) {
2684                 ret = -TARGET_EFAULT;
2685             }
2686             unlock_user(name, optval_addr, lv);
2687             break;
2688         }
2689         case TARGET_SO_LINGER:
2690         {
2691             struct linger lg;
2692             socklen_t lglen;
2693             struct target_linger *tlg;
2694 
2695             if (get_user_u32(len, optlen)) {
2696                 return -TARGET_EFAULT;
2697             }
2698             if (len < 0) {
2699                 return -TARGET_EINVAL;
2700             }
2701 
2702             lglen = sizeof(lg);
2703             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2704                                        &lg, &lglen));
2705             if (ret < 0) {
2706                 return ret;
2707             }
2708             if (len > lglen) {
2709                 len = lglen;
2710             }
2711             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2712                 return -TARGET_EFAULT;
2713             }
2714             __put_user(lg.l_onoff, &tlg->l_onoff);
2715             __put_user(lg.l_linger, &tlg->l_linger);
2716             unlock_user_struct(tlg, optval_addr, 1);
2717             if (put_user_u32(len, optlen)) {
2718                 return -TARGET_EFAULT;
2719             }
2720             break;
2721         }
2722         /* Options with 'int' argument.  */
2723         case TARGET_SO_DEBUG:
2724             optname = SO_DEBUG;
2725             goto int_case;
2726         case TARGET_SO_REUSEADDR:
2727             optname = SO_REUSEADDR;
2728             goto int_case;
2729 #ifdef SO_REUSEPORT
2730         case TARGET_SO_REUSEPORT:
2731             optname = SO_REUSEPORT;
2732             goto int_case;
2733 #endif
2734         case TARGET_SO_TYPE:
2735             optname = SO_TYPE;
2736             goto int_case;
2737         case TARGET_SO_ERROR:
2738             optname = SO_ERROR;
2739             goto int_case;
2740         case TARGET_SO_DONTROUTE:
2741             optname = SO_DONTROUTE;
2742             goto int_case;
2743         case TARGET_SO_BROADCAST:
2744             optname = SO_BROADCAST;
2745             goto int_case;
2746         case TARGET_SO_SNDBUF:
2747             optname = SO_SNDBUF;
2748             goto int_case;
2749         case TARGET_SO_RCVBUF:
2750             optname = SO_RCVBUF;
2751             goto int_case;
2752         case TARGET_SO_KEEPALIVE:
2753             optname = SO_KEEPALIVE;
2754             goto int_case;
2755         case TARGET_SO_OOBINLINE:
2756             optname = SO_OOBINLINE;
2757             goto int_case;
2758         case TARGET_SO_NO_CHECK:
2759             optname = SO_NO_CHECK;
2760             goto int_case;
2761         case TARGET_SO_PRIORITY:
2762             optname = SO_PRIORITY;
2763             goto int_case;
2764 #ifdef SO_BSDCOMPAT
2765         case TARGET_SO_BSDCOMPAT:
2766             optname = SO_BSDCOMPAT;
2767             goto int_case;
2768 #endif
2769         case TARGET_SO_PASSCRED:
2770             optname = SO_PASSCRED;
2771             goto int_case;
2772         case TARGET_SO_TIMESTAMP:
2773             optname = SO_TIMESTAMP;
2774             goto int_case;
2775         case TARGET_SO_RCVLOWAT:
2776             optname = SO_RCVLOWAT;
2777             goto int_case;
2778         case TARGET_SO_ACCEPTCONN:
2779             optname = SO_ACCEPTCONN;
2780             goto int_case;
2781         case TARGET_SO_PROTOCOL:
2782             optname = SO_PROTOCOL;
2783             goto int_case;
2784         case TARGET_SO_DOMAIN:
2785             optname = SO_DOMAIN;
2786             goto int_case;
2787         default:
2788             goto int_case;
2789         }
2790         break;
2791     case SOL_TCP:
2792     case SOL_UDP:
2793         /* TCP and UDP options all take an 'int' value.  */
2794     int_case:
2795         if (get_user_u32(len, optlen))
2796             return -TARGET_EFAULT;
2797         if (len < 0)
2798             return -TARGET_EINVAL;
2799         lv = sizeof(lv);
2800         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2801         if (ret < 0)
2802             return ret;
2803         if (optname == SO_TYPE) {
2804             val = host_to_target_sock_type(val);
2805         }
2806         if (len > lv)
2807             len = lv;
2808         if (len == 4) {
2809             if (put_user_u32(val, optval_addr))
2810                 return -TARGET_EFAULT;
2811         } else {
2812             if (put_user_u8(val, optval_addr))
2813                 return -TARGET_EFAULT;
2814         }
2815         if (put_user_u32(len, optlen))
2816             return -TARGET_EFAULT;
2817         break;
2818     case SOL_IP:
2819         switch(optname) {
2820         case IP_TOS:
2821         case IP_TTL:
2822         case IP_HDRINCL:
2823         case IP_ROUTER_ALERT:
2824         case IP_RECVOPTS:
2825         case IP_RETOPTS:
2826         case IP_PKTINFO:
2827         case IP_MTU_DISCOVER:
2828         case IP_RECVERR:
2829         case IP_RECVTOS:
2830 #ifdef IP_FREEBIND
2831         case IP_FREEBIND:
2832 #endif
2833         case IP_MULTICAST_TTL:
2834         case IP_MULTICAST_LOOP:
2835             if (get_user_u32(len, optlen))
2836                 return -TARGET_EFAULT;
2837             if (len < 0)
2838                 return -TARGET_EINVAL;
2839             lv = sizeof(lv);
2840             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2841             if (ret < 0)
2842                 return ret;
2843             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2844                 len = 1;
2845                 if (put_user_u32(len, optlen)
2846                     || put_user_u8(val, optval_addr))
2847                     return -TARGET_EFAULT;
2848             } else {
2849                 if (len > sizeof(int))
2850                     len = sizeof(int);
2851                 if (put_user_u32(len, optlen)
2852                     || put_user_u32(val, optval_addr))
2853                     return -TARGET_EFAULT;
2854             }
2855             break;
2856         default:
2857             ret = -TARGET_ENOPROTOOPT;
2858             break;
2859         }
2860         break;
2861     case SOL_IPV6:
2862         switch (optname) {
2863         case IPV6_MTU_DISCOVER:
2864         case IPV6_MTU:
2865         case IPV6_V6ONLY:
2866         case IPV6_RECVPKTINFO:
2867         case IPV6_UNICAST_HOPS:
2868         case IPV6_MULTICAST_HOPS:
2869         case IPV6_MULTICAST_LOOP:
2870         case IPV6_RECVERR:
2871         case IPV6_RECVHOPLIMIT:
2872         case IPV6_2292HOPLIMIT:
2873         case IPV6_CHECKSUM:
2874         case IPV6_ADDRFORM:
2875         case IPV6_2292PKTINFO:
2876         case IPV6_RECVTCLASS:
2877         case IPV6_RECVRTHDR:
2878         case IPV6_2292RTHDR:
2879         case IPV6_RECVHOPOPTS:
2880         case IPV6_2292HOPOPTS:
2881         case IPV6_RECVDSTOPTS:
2882         case IPV6_2292DSTOPTS:
2883         case IPV6_TCLASS:
2884         case IPV6_ADDR_PREFERENCES:
2885 #ifdef IPV6_RECVPATHMTU
2886         case IPV6_RECVPATHMTU:
2887 #endif
2888 #ifdef IPV6_TRANSPARENT
2889         case IPV6_TRANSPARENT:
2890 #endif
2891 #ifdef IPV6_FREEBIND
2892         case IPV6_FREEBIND:
2893 #endif
2894 #ifdef IPV6_RECVORIGDSTADDR
2895         case IPV6_RECVORIGDSTADDR:
2896 #endif
2897             if (get_user_u32(len, optlen))
2898                 return -TARGET_EFAULT;
2899             if (len < 0)
2900                 return -TARGET_EINVAL;
2901             lv = sizeof(lv);
2902             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2903             if (ret < 0)
2904                 return ret;
2905             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2906                 len = 1;
2907                 if (put_user_u32(len, optlen)
2908                     || put_user_u8(val, optval_addr))
2909                     return -TARGET_EFAULT;
2910             } else {
2911                 if (len > sizeof(int))
2912                     len = sizeof(int);
2913                 if (put_user_u32(len, optlen)
2914                     || put_user_u32(val, optval_addr))
2915                     return -TARGET_EFAULT;
2916             }
2917             break;
2918         default:
2919             ret = -TARGET_ENOPROTOOPT;
2920             break;
2921         }
2922         break;
2923 #ifdef SOL_NETLINK
2924     case SOL_NETLINK:
2925         switch (optname) {
2926         case NETLINK_PKTINFO:
2927         case NETLINK_BROADCAST_ERROR:
2928         case NETLINK_NO_ENOBUFS:
2929 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2930         case NETLINK_LISTEN_ALL_NSID:
2931         case NETLINK_CAP_ACK:
2932 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2933 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2934         case NETLINK_EXT_ACK:
2935 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2936 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2937         case NETLINK_GET_STRICT_CHK:
2938 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2939             if (get_user_u32(len, optlen)) {
2940                 return -TARGET_EFAULT;
2941             }
2942             if (len != sizeof(val)) {
2943                 return -TARGET_EINVAL;
2944             }
2945             lv = len;
2946             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2947             if (ret < 0) {
2948                 return ret;
2949             }
2950             if (put_user_u32(lv, optlen)
2951                 || put_user_u32(val, optval_addr)) {
2952                 return -TARGET_EFAULT;
2953             }
2954             break;
2955 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2956         case NETLINK_LIST_MEMBERSHIPS:
2957         {
2958             uint32_t *results;
2959             int i;
2960             if (get_user_u32(len, optlen)) {
2961                 return -TARGET_EFAULT;
2962             }
2963             if (len < 0) {
2964                 return -TARGET_EINVAL;
2965             }
2966             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2967             if (!results && len > 0) {
2968                 return -TARGET_EFAULT;
2969             }
2970             lv = len;
2971             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2972             if (ret < 0) {
2973                 unlock_user(results, optval_addr, 0);
2974                 return ret;
2975             }
2976             /* swap host endianess to target endianess. */
2977             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2978                 results[i] = tswap32(results[i]);
2979             }
2980             if (put_user_u32(lv, optlen)) {
2981                 return -TARGET_EFAULT;
2982             }
2983             unlock_user(results, optval_addr, 0);
2984             break;
2985         }
2986 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2987         default:
2988             goto unimplemented;
2989         }
2990         break;
2991 #endif /* SOL_NETLINK */
2992     default:
2993     unimplemented:
2994         qemu_log_mask(LOG_UNIMP,
2995                       "getsockopt level=%d optname=%d not yet supported\n",
2996                       level, optname);
2997         ret = -TARGET_EOPNOTSUPP;
2998         break;
2999     }
3000     return ret;
3001 }
3002 
3003 /* Convert target low/high pair representing file offset into the host
3004  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3005  * as the kernel doesn't handle them either.
3006  */
3007 static void target_to_host_low_high(abi_ulong tlow,
3008                                     abi_ulong thigh,
3009                                     unsigned long *hlow,
3010                                     unsigned long *hhigh)
3011 {
3012     uint64_t off = tlow |
3013         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3014         TARGET_LONG_BITS / 2;
3015 
3016     *hlow = off;
3017     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3018 }
3019 
3020 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3021                                 abi_ulong count, int copy)
3022 {
3023     struct target_iovec *target_vec;
3024     struct iovec *vec;
3025     abi_ulong total_len, max_len;
3026     int i;
3027     int err = 0;
3028     bool bad_address = false;
3029 
3030     if (count == 0) {
3031         errno = 0;
3032         return NULL;
3033     }
3034     if (count > IOV_MAX) {
3035         errno = EINVAL;
3036         return NULL;
3037     }
3038 
3039     vec = g_try_new0(struct iovec, count);
3040     if (vec == NULL) {
3041         errno = ENOMEM;
3042         return NULL;
3043     }
3044 
3045     target_vec = lock_user(VERIFY_READ, target_addr,
3046                            count * sizeof(struct target_iovec), 1);
3047     if (target_vec == NULL) {
3048         err = EFAULT;
3049         goto fail2;
3050     }
3051 
3052     /* ??? If host page size > target page size, this will result in a
3053        value larger than what we can actually support.  */
3054     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3055     total_len = 0;
3056 
3057     for (i = 0; i < count; i++) {
3058         abi_ulong base = tswapal(target_vec[i].iov_base);
3059         abi_long len = tswapal(target_vec[i].iov_len);
3060 
3061         if (len < 0) {
3062             err = EINVAL;
3063             goto fail;
3064         } else if (len == 0) {
3065             /* Zero length pointer is ignored.  */
3066             vec[i].iov_base = 0;
3067         } else {
3068             vec[i].iov_base = lock_user(type, base, len, copy);
3069             /* If the first buffer pointer is bad, this is a fault.  But
3070              * subsequent bad buffers will result in a partial write; this
3071              * is realized by filling the vector with null pointers and
3072              * zero lengths. */
3073             if (!vec[i].iov_base) {
3074                 if (i == 0) {
3075                     err = EFAULT;
3076                     goto fail;
3077                 } else {
3078                     bad_address = true;
3079                 }
3080             }
3081             if (bad_address) {
3082                 len = 0;
3083             }
3084             if (len > max_len - total_len) {
3085                 len = max_len - total_len;
3086             }
3087         }
3088         vec[i].iov_len = len;
3089         total_len += len;
3090     }
3091 
3092     unlock_user(target_vec, target_addr, 0);
3093     return vec;
3094 
3095  fail:
3096     while (--i >= 0) {
3097         if (tswapal(target_vec[i].iov_len) > 0) {
3098             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3099         }
3100     }
3101     unlock_user(target_vec, target_addr, 0);
3102  fail2:
3103     g_free(vec);
3104     errno = err;
3105     return NULL;
3106 }
3107 
3108 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3109                          abi_ulong count, int copy)
3110 {
3111     struct target_iovec *target_vec;
3112     int i;
3113 
3114     target_vec = lock_user(VERIFY_READ, target_addr,
3115                            count * sizeof(struct target_iovec), 1);
3116     if (target_vec) {
3117         for (i = 0; i < count; i++) {
3118             abi_ulong base = tswapal(target_vec[i].iov_base);
3119             abi_long len = tswapal(target_vec[i].iov_len);
3120             if (len < 0) {
3121                 break;
3122             }
3123             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3124         }
3125         unlock_user(target_vec, target_addr, 0);
3126     }
3127 
3128     g_free(vec);
3129 }
3130 
3131 static inline int target_to_host_sock_type(int *type)
3132 {
3133     int host_type = 0;
3134     int target_type = *type;
3135 
3136     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3137     case TARGET_SOCK_DGRAM:
3138         host_type = SOCK_DGRAM;
3139         break;
3140     case TARGET_SOCK_STREAM:
3141         host_type = SOCK_STREAM;
3142         break;
3143     default:
3144         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3145         break;
3146     }
3147     if (target_type & TARGET_SOCK_CLOEXEC) {
3148 #if defined(SOCK_CLOEXEC)
3149         host_type |= SOCK_CLOEXEC;
3150 #else
3151         return -TARGET_EINVAL;
3152 #endif
3153     }
3154     if (target_type & TARGET_SOCK_NONBLOCK) {
3155 #if defined(SOCK_NONBLOCK)
3156         host_type |= SOCK_NONBLOCK;
3157 #elif !defined(O_NONBLOCK)
3158         return -TARGET_EINVAL;
3159 #endif
3160     }
3161     *type = host_type;
3162     return 0;
3163 }
3164 
3165 /* Try to emulate socket type flags after socket creation.  */
3166 static int sock_flags_fixup(int fd, int target_type)
3167 {
3168 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3169     if (target_type & TARGET_SOCK_NONBLOCK) {
3170         int flags = fcntl(fd, F_GETFL);
3171         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3172             close(fd);
3173             return -TARGET_EINVAL;
3174         }
3175     }
3176 #endif
3177     return fd;
3178 }
3179 
3180 /* do_socket() Must return target values and target errnos. */
3181 static abi_long do_socket(int domain, int type, int protocol)
3182 {
3183     int target_type = type;
3184     int ret;
3185 
3186     ret = target_to_host_sock_type(&type);
3187     if (ret) {
3188         return ret;
3189     }
3190 
3191     if (domain == PF_NETLINK && !(
3192 #ifdef CONFIG_RTNETLINK
3193          protocol == NETLINK_ROUTE ||
3194 #endif
3195          protocol == NETLINK_KOBJECT_UEVENT ||
3196          protocol == NETLINK_AUDIT)) {
3197         return -TARGET_EPROTONOSUPPORT;
3198     }
3199 
3200     if (domain == AF_PACKET ||
3201         (domain == AF_INET && type == SOCK_PACKET)) {
3202         protocol = tswap16(protocol);
3203     }
3204 
3205     ret = get_errno(socket(domain, type, protocol));
3206     if (ret >= 0) {
3207         ret = sock_flags_fixup(ret, target_type);
3208         if (type == SOCK_PACKET) {
3209             /* Manage an obsolete case :
3210              * if socket type is SOCK_PACKET, bind by name
3211              */
3212             fd_trans_register(ret, &target_packet_trans);
3213         } else if (domain == PF_NETLINK) {
3214             switch (protocol) {
3215 #ifdef CONFIG_RTNETLINK
3216             case NETLINK_ROUTE:
3217                 fd_trans_register(ret, &target_netlink_route_trans);
3218                 break;
3219 #endif
3220             case NETLINK_KOBJECT_UEVENT:
3221                 /* nothing to do: messages are strings */
3222                 break;
3223             case NETLINK_AUDIT:
3224                 fd_trans_register(ret, &target_netlink_audit_trans);
3225                 break;
3226             default:
3227                 g_assert_not_reached();
3228             }
3229         }
3230     }
3231     return ret;
3232 }
3233 
3234 /* do_bind() Must return target values and target errnos. */
3235 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3236                         socklen_t addrlen)
3237 {
3238     void *addr;
3239     abi_long ret;
3240 
3241     if ((int)addrlen < 0) {
3242         return -TARGET_EINVAL;
3243     }
3244 
3245     addr = alloca(addrlen+1);
3246 
3247     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3248     if (ret)
3249         return ret;
3250 
3251     return get_errno(bind(sockfd, addr, addrlen));
3252 }
3253 
3254 /* do_connect() Must return target values and target errnos. */
3255 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3256                            socklen_t addrlen)
3257 {
3258     void *addr;
3259     abi_long ret;
3260 
3261     if ((int)addrlen < 0) {
3262         return -TARGET_EINVAL;
3263     }
3264 
3265     addr = alloca(addrlen+1);
3266 
3267     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3268     if (ret)
3269         return ret;
3270 
3271     return get_errno(safe_connect(sockfd, addr, addrlen));
3272 }
3273 
3274 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3275 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3276                                       int flags, int send)
3277 {
3278     abi_long ret, len;
3279     struct msghdr msg;
3280     abi_ulong count;
3281     struct iovec *vec;
3282     abi_ulong target_vec;
3283 
3284     if (msgp->msg_name) {
3285         msg.msg_namelen = tswap32(msgp->msg_namelen);
3286         msg.msg_name = alloca(msg.msg_namelen+1);
3287         ret = target_to_host_sockaddr(fd, msg.msg_name,
3288                                       tswapal(msgp->msg_name),
3289                                       msg.msg_namelen);
3290         if (ret == -TARGET_EFAULT) {
3291             /* For connected sockets msg_name and msg_namelen must
3292              * be ignored, so returning EFAULT immediately is wrong.
3293              * Instead, pass a bad msg_name to the host kernel, and
3294              * let it decide whether to return EFAULT or not.
3295              */
3296             msg.msg_name = (void *)-1;
3297         } else if (ret) {
3298             goto out2;
3299         }
3300     } else {
3301         msg.msg_name = NULL;
3302         msg.msg_namelen = 0;
3303     }
3304     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3305     msg.msg_control = alloca(msg.msg_controllen);
3306     memset(msg.msg_control, 0, msg.msg_controllen);
3307 
3308     msg.msg_flags = tswap32(msgp->msg_flags);
3309 
3310     count = tswapal(msgp->msg_iovlen);
3311     target_vec = tswapal(msgp->msg_iov);
3312 
3313     if (count > IOV_MAX) {
3314         /* sendrcvmsg returns a different errno for this condition than
3315          * readv/writev, so we must catch it here before lock_iovec() does.
3316          */
3317         ret = -TARGET_EMSGSIZE;
3318         goto out2;
3319     }
3320 
3321     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3322                      target_vec, count, send);
3323     if (vec == NULL) {
3324         ret = -host_to_target_errno(errno);
3325         goto out2;
3326     }
3327     msg.msg_iovlen = count;
3328     msg.msg_iov = vec;
3329 
3330     if (send) {
3331         if (fd_trans_target_to_host_data(fd)) {
3332             void *host_msg;
3333 
3334             host_msg = g_malloc(msg.msg_iov->iov_len);
3335             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3336             ret = fd_trans_target_to_host_data(fd)(host_msg,
3337                                                    msg.msg_iov->iov_len);
3338             if (ret >= 0) {
3339                 msg.msg_iov->iov_base = host_msg;
3340                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3341             }
3342             g_free(host_msg);
3343         } else {
3344             ret = target_to_host_cmsg(&msg, msgp);
3345             if (ret == 0) {
3346                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3347             }
3348         }
3349     } else {
3350         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3351         if (!is_error(ret)) {
3352             len = ret;
3353             if (fd_trans_host_to_target_data(fd)) {
3354                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3355                                                MIN(msg.msg_iov->iov_len, len));
3356             }
3357             if (!is_error(ret)) {
3358                 ret = host_to_target_cmsg(msgp, &msg);
3359             }
3360             if (!is_error(ret)) {
3361                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3362                 msgp->msg_flags = tswap32(msg.msg_flags);
3363                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3364                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3365                                     msg.msg_name, msg.msg_namelen);
3366                     if (ret) {
3367                         goto out;
3368                     }
3369                 }
3370 
3371                 ret = len;
3372             }
3373         }
3374     }
3375 
3376 out:
3377     unlock_iovec(vec, target_vec, count, !send);
3378 out2:
3379     return ret;
3380 }
3381 
3382 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3383                                int flags, int send)
3384 {
3385     abi_long ret;
3386     struct target_msghdr *msgp;
3387 
3388     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3389                           msgp,
3390                           target_msg,
3391                           send ? 1 : 0)) {
3392         return -TARGET_EFAULT;
3393     }
3394     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3395     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3396     return ret;
3397 }
3398 
3399 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3400  * so it might not have this *mmsg-specific flag either.
3401  */
3402 #ifndef MSG_WAITFORONE
3403 #define MSG_WAITFORONE 0x10000
3404 #endif
3405 
3406 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3407                                 unsigned int vlen, unsigned int flags,
3408                                 int send)
3409 {
3410     struct target_mmsghdr *mmsgp;
3411     abi_long ret = 0;
3412     int i;
3413 
3414     if (vlen > UIO_MAXIOV) {
3415         vlen = UIO_MAXIOV;
3416     }
3417 
3418     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3419     if (!mmsgp) {
3420         return -TARGET_EFAULT;
3421     }
3422 
3423     for (i = 0; i < vlen; i++) {
3424         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3425         if (is_error(ret)) {
3426             break;
3427         }
3428         mmsgp[i].msg_len = tswap32(ret);
3429         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3430         if (flags & MSG_WAITFORONE) {
3431             flags |= MSG_DONTWAIT;
3432         }
3433     }
3434 
3435     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3436 
3437     /* Return number of datagrams sent if we sent any at all;
3438      * otherwise return the error.
3439      */
3440     if (i) {
3441         return i;
3442     }
3443     return ret;
3444 }
3445 
3446 /* do_accept4() Must return target values and target errnos. */
3447 static abi_long do_accept4(int fd, abi_ulong target_addr,
3448                            abi_ulong target_addrlen_addr, int flags)
3449 {
3450     socklen_t addrlen, ret_addrlen;
3451     void *addr;
3452     abi_long ret;
3453     int host_flags;
3454 
3455     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3456 
3457     if (target_addr == 0) {
3458         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3459     }
3460 
3461     /* linux returns EFAULT if addrlen pointer is invalid */
3462     if (get_user_u32(addrlen, target_addrlen_addr))
3463         return -TARGET_EFAULT;
3464 
3465     if ((int)addrlen < 0) {
3466         return -TARGET_EINVAL;
3467     }
3468 
3469     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3470         return -TARGET_EFAULT;
3471     }
3472 
3473     addr = alloca(addrlen);
3474 
3475     ret_addrlen = addrlen;
3476     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3477     if (!is_error(ret)) {
3478         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3479         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3480             ret = -TARGET_EFAULT;
3481         }
3482     }
3483     return ret;
3484 }
3485 
3486 /* do_getpeername() Must return target values and target errnos. */
3487 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3488                                abi_ulong target_addrlen_addr)
3489 {
3490     socklen_t addrlen, ret_addrlen;
3491     void *addr;
3492     abi_long ret;
3493 
3494     if (get_user_u32(addrlen, target_addrlen_addr))
3495         return -TARGET_EFAULT;
3496 
3497     if ((int)addrlen < 0) {
3498         return -TARGET_EINVAL;
3499     }
3500 
3501     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3502         return -TARGET_EFAULT;
3503     }
3504 
3505     addr = alloca(addrlen);
3506 
3507     ret_addrlen = addrlen;
3508     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3509     if (!is_error(ret)) {
3510         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3511         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3512             ret = -TARGET_EFAULT;
3513         }
3514     }
3515     return ret;
3516 }
3517 
3518 /* do_getsockname() Must return target values and target errnos. */
3519 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3520                                abi_ulong target_addrlen_addr)
3521 {
3522     socklen_t addrlen, ret_addrlen;
3523     void *addr;
3524     abi_long ret;
3525 
3526     if (get_user_u32(addrlen, target_addrlen_addr))
3527         return -TARGET_EFAULT;
3528 
3529     if ((int)addrlen < 0) {
3530         return -TARGET_EINVAL;
3531     }
3532 
3533     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3534         return -TARGET_EFAULT;
3535     }
3536 
3537     addr = alloca(addrlen);
3538 
3539     ret_addrlen = addrlen;
3540     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3541     if (!is_error(ret)) {
3542         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3543         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3544             ret = -TARGET_EFAULT;
3545         }
3546     }
3547     return ret;
3548 }
3549 
3550 /* do_socketpair() Must return target values and target errnos. */
3551 static abi_long do_socketpair(int domain, int type, int protocol,
3552                               abi_ulong target_tab_addr)
3553 {
3554     int tab[2];
3555     abi_long ret;
3556 
3557     target_to_host_sock_type(&type);
3558 
3559     ret = get_errno(socketpair(domain, type, protocol, tab));
3560     if (!is_error(ret)) {
3561         if (put_user_s32(tab[0], target_tab_addr)
3562             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3563             ret = -TARGET_EFAULT;
3564     }
3565     return ret;
3566 }
3567 
3568 /* do_sendto() Must return target values and target errnos. */
3569 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3570                           abi_ulong target_addr, socklen_t addrlen)
3571 {
3572     void *addr;
3573     void *host_msg;
3574     void *copy_msg = NULL;
3575     abi_long ret;
3576 
3577     if ((int)addrlen < 0) {
3578         return -TARGET_EINVAL;
3579     }
3580 
3581     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3582     if (!host_msg)
3583         return -TARGET_EFAULT;
3584     if (fd_trans_target_to_host_data(fd)) {
3585         copy_msg = host_msg;
3586         host_msg = g_malloc(len);
3587         memcpy(host_msg, copy_msg, len);
3588         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3589         if (ret < 0) {
3590             goto fail;
3591         }
3592     }
3593     if (target_addr) {
3594         addr = alloca(addrlen+1);
3595         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3596         if (ret) {
3597             goto fail;
3598         }
3599         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3600     } else {
3601         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3602     }
3603 fail:
3604     if (copy_msg) {
3605         g_free(host_msg);
3606         host_msg = copy_msg;
3607     }
3608     unlock_user(host_msg, msg, 0);
3609     return ret;
3610 }
3611 
3612 /* do_recvfrom() Must return target values and target errnos. */
3613 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3614                             abi_ulong target_addr,
3615                             abi_ulong target_addrlen)
3616 {
3617     socklen_t addrlen, ret_addrlen;
3618     void *addr;
3619     void *host_msg;
3620     abi_long ret;
3621 
3622     if (!msg) {
3623         host_msg = NULL;
3624     } else {
3625         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3626         if (!host_msg) {
3627             return -TARGET_EFAULT;
3628         }
3629     }
3630     if (target_addr) {
3631         if (get_user_u32(addrlen, target_addrlen)) {
3632             ret = -TARGET_EFAULT;
3633             goto fail;
3634         }
3635         if ((int)addrlen < 0) {
3636             ret = -TARGET_EINVAL;
3637             goto fail;
3638         }
3639         addr = alloca(addrlen);
3640         ret_addrlen = addrlen;
3641         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3642                                       addr, &ret_addrlen));
3643     } else {
3644         addr = NULL; /* To keep compiler quiet.  */
3645         addrlen = 0; /* To keep compiler quiet.  */
3646         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3647     }
3648     if (!is_error(ret)) {
3649         if (fd_trans_host_to_target_data(fd)) {
3650             abi_long trans;
3651             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3652             if (is_error(trans)) {
3653                 ret = trans;
3654                 goto fail;
3655             }
3656         }
3657         if (target_addr) {
3658             host_to_target_sockaddr(target_addr, addr,
3659                                     MIN(addrlen, ret_addrlen));
3660             if (put_user_u32(ret_addrlen, target_addrlen)) {
3661                 ret = -TARGET_EFAULT;
3662                 goto fail;
3663             }
3664         }
3665         unlock_user(host_msg, msg, len);
3666     } else {
3667 fail:
3668         unlock_user(host_msg, msg, 0);
3669     }
3670     return ret;
3671 }
3672 
3673 #ifdef TARGET_NR_socketcall
3674 /* do_socketcall() must return target values and target errnos. */
3675 static abi_long do_socketcall(int num, abi_ulong vptr)
3676 {
3677     static const unsigned nargs[] = { /* number of arguments per operation */
3678         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3679         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3680         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3681         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3682         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3683         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3684         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3685         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3686         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3687         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3688         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3689         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3690         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3691         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3692         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3693         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3694         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3695         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3696         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3697         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3698     };
3699     abi_long a[6]; /* max 6 args */
3700     unsigned i;
3701 
3702     /* check the range of the first argument num */
3703     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3704     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3705         return -TARGET_EINVAL;
3706     }
3707     /* ensure we have space for args */
3708     if (nargs[num] > ARRAY_SIZE(a)) {
3709         return -TARGET_EINVAL;
3710     }
3711     /* collect the arguments in a[] according to nargs[] */
3712     for (i = 0; i < nargs[num]; ++i) {
3713         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3714             return -TARGET_EFAULT;
3715         }
3716     }
3717     /* now when we have the args, invoke the appropriate underlying function */
3718     switch (num) {
3719     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3720         return do_socket(a[0], a[1], a[2]);
3721     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3722         return do_bind(a[0], a[1], a[2]);
3723     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3724         return do_connect(a[0], a[1], a[2]);
3725     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3726         return get_errno(listen(a[0], a[1]));
3727     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3728         return do_accept4(a[0], a[1], a[2], 0);
3729     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3730         return do_getsockname(a[0], a[1], a[2]);
3731     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3732         return do_getpeername(a[0], a[1], a[2]);
3733     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3734         return do_socketpair(a[0], a[1], a[2], a[3]);
3735     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3736         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3737     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3738         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3739     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3740         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3741     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3742         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3743     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3744         return get_errno(shutdown(a[0], a[1]));
3745     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3746         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3747     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3748         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3749     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3750         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3751     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3752         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3753     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3754         return do_accept4(a[0], a[1], a[2], a[3]);
3755     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3756         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3757     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3758         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3759     default:
3760         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3761         return -TARGET_EINVAL;
3762     }
3763 }
3764 #endif
3765 
3766 #define N_SHM_REGIONS	32
3767 
3768 static struct shm_region {
3769     abi_ulong start;
3770     abi_ulong size;
3771     bool in_use;
3772 } shm_regions[N_SHM_REGIONS];
3773 
3774 #ifndef TARGET_SEMID64_DS
3775 /* asm-generic version of this struct */
3776 struct target_semid64_ds
3777 {
3778   struct target_ipc_perm sem_perm;
3779   abi_ulong sem_otime;
3780 #if TARGET_ABI_BITS == 32
3781   abi_ulong __unused1;
3782 #endif
3783   abi_ulong sem_ctime;
3784 #if TARGET_ABI_BITS == 32
3785   abi_ulong __unused2;
3786 #endif
3787   abi_ulong sem_nsems;
3788   abi_ulong __unused3;
3789   abi_ulong __unused4;
3790 };
3791 #endif
3792 
3793 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3794                                                abi_ulong target_addr)
3795 {
3796     struct target_ipc_perm *target_ip;
3797     struct target_semid64_ds *target_sd;
3798 
3799     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3800         return -TARGET_EFAULT;
3801     target_ip = &(target_sd->sem_perm);
3802     host_ip->__key = tswap32(target_ip->__key);
3803     host_ip->uid = tswap32(target_ip->uid);
3804     host_ip->gid = tswap32(target_ip->gid);
3805     host_ip->cuid = tswap32(target_ip->cuid);
3806     host_ip->cgid = tswap32(target_ip->cgid);
3807 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3808     host_ip->mode = tswap32(target_ip->mode);
3809 #else
3810     host_ip->mode = tswap16(target_ip->mode);
3811 #endif
3812 #if defined(TARGET_PPC)
3813     host_ip->__seq = tswap32(target_ip->__seq);
3814 #else
3815     host_ip->__seq = tswap16(target_ip->__seq);
3816 #endif
3817     unlock_user_struct(target_sd, target_addr, 0);
3818     return 0;
3819 }
3820 
3821 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3822                                                struct ipc_perm *host_ip)
3823 {
3824     struct target_ipc_perm *target_ip;
3825     struct target_semid64_ds *target_sd;
3826 
3827     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3828         return -TARGET_EFAULT;
3829     target_ip = &(target_sd->sem_perm);
3830     target_ip->__key = tswap32(host_ip->__key);
3831     target_ip->uid = tswap32(host_ip->uid);
3832     target_ip->gid = tswap32(host_ip->gid);
3833     target_ip->cuid = tswap32(host_ip->cuid);
3834     target_ip->cgid = tswap32(host_ip->cgid);
3835 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3836     target_ip->mode = tswap32(host_ip->mode);
3837 #else
3838     target_ip->mode = tswap16(host_ip->mode);
3839 #endif
3840 #if defined(TARGET_PPC)
3841     target_ip->__seq = tswap32(host_ip->__seq);
3842 #else
3843     target_ip->__seq = tswap16(host_ip->__seq);
3844 #endif
3845     unlock_user_struct(target_sd, target_addr, 1);
3846     return 0;
3847 }
3848 
3849 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3850                                                abi_ulong target_addr)
3851 {
3852     struct target_semid64_ds *target_sd;
3853 
3854     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3855         return -TARGET_EFAULT;
3856     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3857         return -TARGET_EFAULT;
3858     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3859     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3860     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3861     unlock_user_struct(target_sd, target_addr, 0);
3862     return 0;
3863 }
3864 
3865 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3866                                                struct semid_ds *host_sd)
3867 {
3868     struct target_semid64_ds *target_sd;
3869 
3870     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3871         return -TARGET_EFAULT;
3872     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3873         return -TARGET_EFAULT;
3874     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3875     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3876     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3877     unlock_user_struct(target_sd, target_addr, 1);
3878     return 0;
3879 }
3880 
3881 struct target_seminfo {
3882     int semmap;
3883     int semmni;
3884     int semmns;
3885     int semmnu;
3886     int semmsl;
3887     int semopm;
3888     int semume;
3889     int semusz;
3890     int semvmx;
3891     int semaem;
3892 };
3893 
3894 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3895                                               struct seminfo *host_seminfo)
3896 {
3897     struct target_seminfo *target_seminfo;
3898     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3899         return -TARGET_EFAULT;
3900     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3901     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3902     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3903     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3904     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3905     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3906     __put_user(host_seminfo->semume, &target_seminfo->semume);
3907     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3908     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3909     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3910     unlock_user_struct(target_seminfo, target_addr, 1);
3911     return 0;
3912 }
3913 
3914 union semun {
3915 	int val;
3916 	struct semid_ds *buf;
3917 	unsigned short *array;
3918 	struct seminfo *__buf;
3919 };
3920 
3921 union target_semun {
3922 	int val;
3923 	abi_ulong buf;
3924 	abi_ulong array;
3925 	abi_ulong __buf;
3926 };
3927 
3928 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3929                                                abi_ulong target_addr)
3930 {
3931     int nsems;
3932     unsigned short *array;
3933     union semun semun;
3934     struct semid_ds semid_ds;
3935     int i, ret;
3936 
3937     semun.buf = &semid_ds;
3938 
3939     ret = semctl(semid, 0, IPC_STAT, semun);
3940     if (ret == -1)
3941         return get_errno(ret);
3942 
3943     nsems = semid_ds.sem_nsems;
3944 
3945     *host_array = g_try_new(unsigned short, nsems);
3946     if (!*host_array) {
3947         return -TARGET_ENOMEM;
3948     }
3949     array = lock_user(VERIFY_READ, target_addr,
3950                       nsems*sizeof(unsigned short), 1);
3951     if (!array) {
3952         g_free(*host_array);
3953         return -TARGET_EFAULT;
3954     }
3955 
3956     for(i=0; i<nsems; i++) {
3957         __get_user((*host_array)[i], &array[i]);
3958     }
3959     unlock_user(array, target_addr, 0);
3960 
3961     return 0;
3962 }
3963 
3964 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3965                                                unsigned short **host_array)
3966 {
3967     int nsems;
3968     unsigned short *array;
3969     union semun semun;
3970     struct semid_ds semid_ds;
3971     int i, ret;
3972 
3973     semun.buf = &semid_ds;
3974 
3975     ret = semctl(semid, 0, IPC_STAT, semun);
3976     if (ret == -1)
3977         return get_errno(ret);
3978 
3979     nsems = semid_ds.sem_nsems;
3980 
3981     array = lock_user(VERIFY_WRITE, target_addr,
3982                       nsems*sizeof(unsigned short), 0);
3983     if (!array)
3984         return -TARGET_EFAULT;
3985 
3986     for(i=0; i<nsems; i++) {
3987         __put_user((*host_array)[i], &array[i]);
3988     }
3989     g_free(*host_array);
3990     unlock_user(array, target_addr, 1);
3991 
3992     return 0;
3993 }
3994 
3995 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3996                                  abi_ulong target_arg)
3997 {
3998     union target_semun target_su = { .buf = target_arg };
3999     union semun arg;
4000     struct semid_ds dsarg;
4001     unsigned short *array = NULL;
4002     struct seminfo seminfo;
4003     abi_long ret = -TARGET_EINVAL;
4004     abi_long err;
4005     cmd &= 0xff;
4006 
4007     switch( cmd ) {
4008 	case GETVAL:
4009 	case SETVAL:
4010             /* In 64 bit cross-endian situations, we will erroneously pick up
4011              * the wrong half of the union for the "val" element.  To rectify
4012              * this, the entire 8-byte structure is byteswapped, followed by
4013 	     * a swap of the 4 byte val field. In other cases, the data is
4014 	     * already in proper host byte order. */
4015 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4016 		target_su.buf = tswapal(target_su.buf);
4017 		arg.val = tswap32(target_su.val);
4018 	    } else {
4019 		arg.val = target_su.val;
4020 	    }
4021             ret = get_errno(semctl(semid, semnum, cmd, arg));
4022             break;
4023 	case GETALL:
4024 	case SETALL:
4025             err = target_to_host_semarray(semid, &array, target_su.array);
4026             if (err)
4027                 return err;
4028             arg.array = array;
4029             ret = get_errno(semctl(semid, semnum, cmd, arg));
4030             err = host_to_target_semarray(semid, target_su.array, &array);
4031             if (err)
4032                 return err;
4033             break;
4034 	case IPC_STAT:
4035 	case IPC_SET:
4036 	case SEM_STAT:
4037             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4038             if (err)
4039                 return err;
4040             arg.buf = &dsarg;
4041             ret = get_errno(semctl(semid, semnum, cmd, arg));
4042             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4043             if (err)
4044                 return err;
4045             break;
4046 	case IPC_INFO:
4047 	case SEM_INFO:
4048             arg.__buf = &seminfo;
4049             ret = get_errno(semctl(semid, semnum, cmd, arg));
4050             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4051             if (err)
4052                 return err;
4053             break;
4054 	case IPC_RMID:
4055 	case GETPID:
4056 	case GETNCNT:
4057 	case GETZCNT:
4058             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4059             break;
4060     }
4061 
4062     return ret;
4063 }
4064 
4065 struct target_sembuf {
4066     unsigned short sem_num;
4067     short sem_op;
4068     short sem_flg;
4069 };
4070 
4071 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4072                                              abi_ulong target_addr,
4073                                              unsigned nsops)
4074 {
4075     struct target_sembuf *target_sembuf;
4076     int i;
4077 
4078     target_sembuf = lock_user(VERIFY_READ, target_addr,
4079                               nsops*sizeof(struct target_sembuf), 1);
4080     if (!target_sembuf)
4081         return -TARGET_EFAULT;
4082 
4083     for(i=0; i<nsops; i++) {
4084         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4085         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4086         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4087     }
4088 
4089     unlock_user(target_sembuf, target_addr, 0);
4090 
4091     return 0;
4092 }
4093 
4094 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4095     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4096 
4097 /*
4098  * This macro is required to handle the s390 variants, which passes the
4099  * arguments in a different order than default.
4100  */
4101 #ifdef __s390x__
4102 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4103   (__nsops), (__timeout), (__sops)
4104 #else
4105 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4106   (__nsops), 0, (__sops), (__timeout)
4107 #endif
4108 
4109 static inline abi_long do_semtimedop(int semid,
4110                                      abi_long ptr,
4111                                      unsigned nsops,
4112                                      abi_long timeout, bool time64)
4113 {
4114     struct sembuf *sops;
4115     struct timespec ts, *pts = NULL;
4116     abi_long ret;
4117 
4118     if (timeout) {
4119         pts = &ts;
4120         if (time64) {
4121             if (target_to_host_timespec64(pts, timeout)) {
4122                 return -TARGET_EFAULT;
4123             }
4124         } else {
4125             if (target_to_host_timespec(pts, timeout)) {
4126                 return -TARGET_EFAULT;
4127             }
4128         }
4129     }
4130 
4131     if (nsops > TARGET_SEMOPM) {
4132         return -TARGET_E2BIG;
4133     }
4134 
4135     sops = g_new(struct sembuf, nsops);
4136 
4137     if (target_to_host_sembuf(sops, ptr, nsops)) {
4138         g_free(sops);
4139         return -TARGET_EFAULT;
4140     }
4141 
4142     ret = -TARGET_ENOSYS;
4143 #ifdef __NR_semtimedop
4144     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4145 #endif
4146 #ifdef __NR_ipc
4147     if (ret == -TARGET_ENOSYS) {
4148         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4149                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4150     }
4151 #endif
4152     g_free(sops);
4153     return ret;
4154 }
4155 #endif
4156 
4157 struct target_msqid_ds
4158 {
4159     struct target_ipc_perm msg_perm;
4160     abi_ulong msg_stime;
4161 #if TARGET_ABI_BITS == 32
4162     abi_ulong __unused1;
4163 #endif
4164     abi_ulong msg_rtime;
4165 #if TARGET_ABI_BITS == 32
4166     abi_ulong __unused2;
4167 #endif
4168     abi_ulong msg_ctime;
4169 #if TARGET_ABI_BITS == 32
4170     abi_ulong __unused3;
4171 #endif
4172     abi_ulong __msg_cbytes;
4173     abi_ulong msg_qnum;
4174     abi_ulong msg_qbytes;
4175     abi_ulong msg_lspid;
4176     abi_ulong msg_lrpid;
4177     abi_ulong __unused4;
4178     abi_ulong __unused5;
4179 };
4180 
4181 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4182                                                abi_ulong target_addr)
4183 {
4184     struct target_msqid_ds *target_md;
4185 
4186     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4187         return -TARGET_EFAULT;
4188     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4189         return -TARGET_EFAULT;
4190     host_md->msg_stime = tswapal(target_md->msg_stime);
4191     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4192     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4193     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4194     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4195     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4196     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4197     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4198     unlock_user_struct(target_md, target_addr, 0);
4199     return 0;
4200 }
4201 
4202 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4203                                                struct msqid_ds *host_md)
4204 {
4205     struct target_msqid_ds *target_md;
4206 
4207     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4208         return -TARGET_EFAULT;
4209     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4210         return -TARGET_EFAULT;
4211     target_md->msg_stime = tswapal(host_md->msg_stime);
4212     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4213     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4214     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4215     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4216     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4217     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4218     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4219     unlock_user_struct(target_md, target_addr, 1);
4220     return 0;
4221 }
4222 
4223 struct target_msginfo {
4224     int msgpool;
4225     int msgmap;
4226     int msgmax;
4227     int msgmnb;
4228     int msgmni;
4229     int msgssz;
4230     int msgtql;
4231     unsigned short int msgseg;
4232 };
4233 
4234 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4235                                               struct msginfo *host_msginfo)
4236 {
4237     struct target_msginfo *target_msginfo;
4238     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4239         return -TARGET_EFAULT;
4240     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4241     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4242     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4243     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4244     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4245     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4246     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4247     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4248     unlock_user_struct(target_msginfo, target_addr, 1);
4249     return 0;
4250 }
4251 
4252 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4253 {
4254     struct msqid_ds dsarg;
4255     struct msginfo msginfo;
4256     abi_long ret = -TARGET_EINVAL;
4257 
4258     cmd &= 0xff;
4259 
4260     switch (cmd) {
4261     case IPC_STAT:
4262     case IPC_SET:
4263     case MSG_STAT:
4264         if (target_to_host_msqid_ds(&dsarg,ptr))
4265             return -TARGET_EFAULT;
4266         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4267         if (host_to_target_msqid_ds(ptr,&dsarg))
4268             return -TARGET_EFAULT;
4269         break;
4270     case IPC_RMID:
4271         ret = get_errno(msgctl(msgid, cmd, NULL));
4272         break;
4273     case IPC_INFO:
4274     case MSG_INFO:
4275         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4276         if (host_to_target_msginfo(ptr, &msginfo))
4277             return -TARGET_EFAULT;
4278         break;
4279     }
4280 
4281     return ret;
4282 }
4283 
4284 struct target_msgbuf {
4285     abi_long mtype;
4286     char	mtext[1];
4287 };
4288 
4289 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4290                                  ssize_t msgsz, int msgflg)
4291 {
4292     struct target_msgbuf *target_mb;
4293     struct msgbuf *host_mb;
4294     abi_long ret = 0;
4295 
4296     if (msgsz < 0) {
4297         return -TARGET_EINVAL;
4298     }
4299 
4300     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4301         return -TARGET_EFAULT;
4302     host_mb = g_try_malloc(msgsz + sizeof(long));
4303     if (!host_mb) {
4304         unlock_user_struct(target_mb, msgp, 0);
4305         return -TARGET_ENOMEM;
4306     }
4307     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4308     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4309     ret = -TARGET_ENOSYS;
4310 #ifdef __NR_msgsnd
4311     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4312 #endif
4313 #ifdef __NR_ipc
4314     if (ret == -TARGET_ENOSYS) {
4315 #ifdef __s390x__
4316         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4317                                  host_mb));
4318 #else
4319         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4320                                  host_mb, 0));
4321 #endif
4322     }
4323 #endif
4324     g_free(host_mb);
4325     unlock_user_struct(target_mb, msgp, 0);
4326 
4327     return ret;
4328 }
4329 
4330 #ifdef __NR_ipc
4331 #if defined(__sparc__)
4332 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4333 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4334 #elif defined(__s390x__)
4335 /* The s390 sys_ipc variant has only five parameters.  */
4336 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4337     ((long int[]){(long int)__msgp, __msgtyp})
4338 #else
4339 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4340     ((long int[]){(long int)__msgp, __msgtyp}), 0
4341 #endif
4342 #endif
4343 
4344 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4345                                  ssize_t msgsz, abi_long msgtyp,
4346                                  int msgflg)
4347 {
4348     struct target_msgbuf *target_mb;
4349     char *target_mtext;
4350     struct msgbuf *host_mb;
4351     abi_long ret = 0;
4352 
4353     if (msgsz < 0) {
4354         return -TARGET_EINVAL;
4355     }
4356 
4357     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4358         return -TARGET_EFAULT;
4359 
4360     host_mb = g_try_malloc(msgsz + sizeof(long));
4361     if (!host_mb) {
4362         ret = -TARGET_ENOMEM;
4363         goto end;
4364     }
4365     ret = -TARGET_ENOSYS;
4366 #ifdef __NR_msgrcv
4367     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4368 #endif
4369 #ifdef __NR_ipc
4370     if (ret == -TARGET_ENOSYS) {
4371         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4372                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4373     }
4374 #endif
4375 
4376     if (ret > 0) {
4377         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4378         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4379         if (!target_mtext) {
4380             ret = -TARGET_EFAULT;
4381             goto end;
4382         }
4383         memcpy(target_mb->mtext, host_mb->mtext, ret);
4384         unlock_user(target_mtext, target_mtext_addr, ret);
4385     }
4386 
4387     target_mb->mtype = tswapal(host_mb->mtype);
4388 
4389 end:
4390     if (target_mb)
4391         unlock_user_struct(target_mb, msgp, 1);
4392     g_free(host_mb);
4393     return ret;
4394 }
4395 
4396 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4397                                                abi_ulong target_addr)
4398 {
4399     struct target_shmid_ds *target_sd;
4400 
4401     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4402         return -TARGET_EFAULT;
4403     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4404         return -TARGET_EFAULT;
4405     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4406     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4407     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4408     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4409     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4410     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4411     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4412     unlock_user_struct(target_sd, target_addr, 0);
4413     return 0;
4414 }
4415 
4416 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4417                                                struct shmid_ds *host_sd)
4418 {
4419     struct target_shmid_ds *target_sd;
4420 
4421     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4422         return -TARGET_EFAULT;
4423     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4424         return -TARGET_EFAULT;
4425     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4426     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4427     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4428     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4429     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4430     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4431     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4432     unlock_user_struct(target_sd, target_addr, 1);
4433     return 0;
4434 }
4435 
4436 struct  target_shminfo {
4437     abi_ulong shmmax;
4438     abi_ulong shmmin;
4439     abi_ulong shmmni;
4440     abi_ulong shmseg;
4441     abi_ulong shmall;
4442 };
4443 
4444 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4445                                               struct shminfo *host_shminfo)
4446 {
4447     struct target_shminfo *target_shminfo;
4448     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4449         return -TARGET_EFAULT;
4450     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4451     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4452     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4453     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4454     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4455     unlock_user_struct(target_shminfo, target_addr, 1);
4456     return 0;
4457 }
4458 
4459 struct target_shm_info {
4460     int used_ids;
4461     abi_ulong shm_tot;
4462     abi_ulong shm_rss;
4463     abi_ulong shm_swp;
4464     abi_ulong swap_attempts;
4465     abi_ulong swap_successes;
4466 };
4467 
4468 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4469                                                struct shm_info *host_shm_info)
4470 {
4471     struct target_shm_info *target_shm_info;
4472     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4473         return -TARGET_EFAULT;
4474     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4475     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4476     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4477     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4478     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4479     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4480     unlock_user_struct(target_shm_info, target_addr, 1);
4481     return 0;
4482 }
4483 
4484 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4485 {
4486     struct shmid_ds dsarg;
4487     struct shminfo shminfo;
4488     struct shm_info shm_info;
4489     abi_long ret = -TARGET_EINVAL;
4490 
4491     cmd &= 0xff;
4492 
4493     switch(cmd) {
4494     case IPC_STAT:
4495     case IPC_SET:
4496     case SHM_STAT:
4497         if (target_to_host_shmid_ds(&dsarg, buf))
4498             return -TARGET_EFAULT;
4499         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4500         if (host_to_target_shmid_ds(buf, &dsarg))
4501             return -TARGET_EFAULT;
4502         break;
4503     case IPC_INFO:
4504         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4505         if (host_to_target_shminfo(buf, &shminfo))
4506             return -TARGET_EFAULT;
4507         break;
4508     case SHM_INFO:
4509         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4510         if (host_to_target_shm_info(buf, &shm_info))
4511             return -TARGET_EFAULT;
4512         break;
4513     case IPC_RMID:
4514     case SHM_LOCK:
4515     case SHM_UNLOCK:
4516         ret = get_errno(shmctl(shmid, cmd, NULL));
4517         break;
4518     }
4519 
4520     return ret;
4521 }
4522 
4523 #ifndef TARGET_FORCE_SHMLBA
4524 /* For most architectures, SHMLBA is the same as the page size;
4525  * some architectures have larger values, in which case they should
4526  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4527  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4528  * and defining its own value for SHMLBA.
4529  *
4530  * The kernel also permits SHMLBA to be set by the architecture to a
4531  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4532  * this means that addresses are rounded to the large size if
4533  * SHM_RND is set but addresses not aligned to that size are not rejected
4534  * as long as they are at least page-aligned. Since the only architecture
4535  * which uses this is ia64 this code doesn't provide for that oddity.
4536  */
4537 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4538 {
4539     return TARGET_PAGE_SIZE;
4540 }
4541 #endif
4542 
4543 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4544                                  int shmid, abi_ulong shmaddr, int shmflg)
4545 {
4546     CPUState *cpu = env_cpu(cpu_env);
4547     abi_long raddr;
4548     void *host_raddr;
4549     struct shmid_ds shm_info;
4550     int i,ret;
4551     abi_ulong shmlba;
4552 
4553     /* shmat pointers are always untagged */
4554 
4555     /* find out the length of the shared memory segment */
4556     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4557     if (is_error(ret)) {
4558         /* can't get length, bail out */
4559         return ret;
4560     }
4561 
4562     shmlba = target_shmlba(cpu_env);
4563 
4564     if (shmaddr & (shmlba - 1)) {
4565         if (shmflg & SHM_RND) {
4566             shmaddr &= ~(shmlba - 1);
4567         } else {
4568             return -TARGET_EINVAL;
4569         }
4570     }
4571     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4572         return -TARGET_EINVAL;
4573     }
4574 
4575     mmap_lock();
4576 
4577     /*
4578      * We're mapping shared memory, so ensure we generate code for parallel
4579      * execution and flush old translations.  This will work up to the level
4580      * supported by the host -- anything that requires EXCP_ATOMIC will not
4581      * be atomic with respect to an external process.
4582      */
4583     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4584         cpu->tcg_cflags |= CF_PARALLEL;
4585         tb_flush(cpu);
4586     }
4587 
4588     if (shmaddr)
4589         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4590     else {
4591         abi_ulong mmap_start;
4592 
4593         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4594         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4595 
4596         if (mmap_start == -1) {
4597             errno = ENOMEM;
4598             host_raddr = (void *)-1;
4599         } else
4600             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4601                                shmflg | SHM_REMAP);
4602     }
4603 
4604     if (host_raddr == (void *)-1) {
4605         mmap_unlock();
4606         return get_errno((long)host_raddr);
4607     }
4608     raddr=h2g((unsigned long)host_raddr);
4609 
4610     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4611                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4612                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4613 
4614     for (i = 0; i < N_SHM_REGIONS; i++) {
4615         if (!shm_regions[i].in_use) {
4616             shm_regions[i].in_use = true;
4617             shm_regions[i].start = raddr;
4618             shm_regions[i].size = shm_info.shm_segsz;
4619             break;
4620         }
4621     }
4622 
4623     mmap_unlock();
4624     return raddr;
4625 
4626 }
4627 
4628 static inline abi_long do_shmdt(abi_ulong shmaddr)
4629 {
4630     int i;
4631     abi_long rv;
4632 
4633     /* shmdt pointers are always untagged */
4634 
4635     mmap_lock();
4636 
4637     for (i = 0; i < N_SHM_REGIONS; ++i) {
4638         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4639             shm_regions[i].in_use = false;
4640             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4641             break;
4642         }
4643     }
4644     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4645 
4646     mmap_unlock();
4647 
4648     return rv;
4649 }
4650 
4651 #ifdef TARGET_NR_ipc
4652 /* ??? This only works with linear mappings.  */
4653 /* do_ipc() must return target values and target errnos. */
4654 static abi_long do_ipc(CPUArchState *cpu_env,
4655                        unsigned int call, abi_long first,
4656                        abi_long second, abi_long third,
4657                        abi_long ptr, abi_long fifth)
4658 {
4659     int version;
4660     abi_long ret = 0;
4661 
4662     version = call >> 16;
4663     call &= 0xffff;
4664 
4665     switch (call) {
4666     case IPCOP_semop:
4667         ret = do_semtimedop(first, ptr, second, 0, false);
4668         break;
4669     case IPCOP_semtimedop:
4670     /*
4671      * The s390 sys_ipc variant has only five parameters instead of six
4672      * (as for default variant) and the only difference is the handling of
4673      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4674      * to a struct timespec where the generic variant uses fifth parameter.
4675      */
4676 #if defined(TARGET_S390X)
4677         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4678 #else
4679         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4680 #endif
4681         break;
4682 
4683     case IPCOP_semget:
4684         ret = get_errno(semget(first, second, third));
4685         break;
4686 
4687     case IPCOP_semctl: {
4688         /* The semun argument to semctl is passed by value, so dereference the
4689          * ptr argument. */
4690         abi_ulong atptr;
4691         get_user_ual(atptr, ptr);
4692         ret = do_semctl(first, second, third, atptr);
4693         break;
4694     }
4695 
4696     case IPCOP_msgget:
4697         ret = get_errno(msgget(first, second));
4698         break;
4699 
4700     case IPCOP_msgsnd:
4701         ret = do_msgsnd(first, ptr, second, third);
4702         break;
4703 
4704     case IPCOP_msgctl:
4705         ret = do_msgctl(first, second, ptr);
4706         break;
4707 
4708     case IPCOP_msgrcv:
4709         switch (version) {
4710         case 0:
4711             {
4712                 struct target_ipc_kludge {
4713                     abi_long msgp;
4714                     abi_long msgtyp;
4715                 } *tmp;
4716 
4717                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4718                     ret = -TARGET_EFAULT;
4719                     break;
4720                 }
4721 
4722                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4723 
4724                 unlock_user_struct(tmp, ptr, 0);
4725                 break;
4726             }
4727         default:
4728             ret = do_msgrcv(first, ptr, second, fifth, third);
4729         }
4730         break;
4731 
4732     case IPCOP_shmat:
4733         switch (version) {
4734         default:
4735         {
4736             abi_ulong raddr;
4737             raddr = do_shmat(cpu_env, first, ptr, second);
4738             if (is_error(raddr))
4739                 return get_errno(raddr);
4740             if (put_user_ual(raddr, third))
4741                 return -TARGET_EFAULT;
4742             break;
4743         }
4744         case 1:
4745             ret = -TARGET_EINVAL;
4746             break;
4747         }
4748 	break;
4749     case IPCOP_shmdt:
4750         ret = do_shmdt(ptr);
4751 	break;
4752 
4753     case IPCOP_shmget:
4754 	/* IPC_* flag values are the same on all linux platforms */
4755 	ret = get_errno(shmget(first, second, third));
4756 	break;
4757 
4758 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4759     case IPCOP_shmctl:
4760         ret = do_shmctl(first, second, ptr);
4761         break;
4762     default:
4763         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4764                       call, version);
4765 	ret = -TARGET_ENOSYS;
4766 	break;
4767     }
4768     return ret;
4769 }
4770 #endif
4771 
4772 /* kernel structure types definitions */
4773 
4774 #define STRUCT(name, ...) STRUCT_ ## name,
4775 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4776 enum {
4777 #include "syscall_types.h"
4778 STRUCT_MAX
4779 };
4780 #undef STRUCT
4781 #undef STRUCT_SPECIAL
4782 
4783 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4784 #define STRUCT_SPECIAL(name)
4785 #include "syscall_types.h"
4786 #undef STRUCT
4787 #undef STRUCT_SPECIAL
4788 
4789 #define MAX_STRUCT_SIZE 4096
4790 
4791 #ifdef CONFIG_FIEMAP
4792 /* So fiemap access checks don't overflow on 32 bit systems.
4793  * This is very slightly smaller than the limit imposed by
4794  * the underlying kernel.
4795  */
4796 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4797                             / sizeof(struct fiemap_extent))
4798 
4799 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4800                                        int fd, int cmd, abi_long arg)
4801 {
4802     /* The parameter for this ioctl is a struct fiemap followed
4803      * by an array of struct fiemap_extent whose size is set
4804      * in fiemap->fm_extent_count. The array is filled in by the
4805      * ioctl.
4806      */
4807     int target_size_in, target_size_out;
4808     struct fiemap *fm;
4809     const argtype *arg_type = ie->arg_type;
4810     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4811     void *argptr, *p;
4812     abi_long ret;
4813     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4814     uint32_t outbufsz;
4815     int free_fm = 0;
4816 
4817     assert(arg_type[0] == TYPE_PTR);
4818     assert(ie->access == IOC_RW);
4819     arg_type++;
4820     target_size_in = thunk_type_size(arg_type, 0);
4821     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4822     if (!argptr) {
4823         return -TARGET_EFAULT;
4824     }
4825     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4826     unlock_user(argptr, arg, 0);
4827     fm = (struct fiemap *)buf_temp;
4828     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4829         return -TARGET_EINVAL;
4830     }
4831 
4832     outbufsz = sizeof (*fm) +
4833         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4834 
4835     if (outbufsz > MAX_STRUCT_SIZE) {
4836         /* We can't fit all the extents into the fixed size buffer.
4837          * Allocate one that is large enough and use it instead.
4838          */
4839         fm = g_try_malloc(outbufsz);
4840         if (!fm) {
4841             return -TARGET_ENOMEM;
4842         }
4843         memcpy(fm, buf_temp, sizeof(struct fiemap));
4844         free_fm = 1;
4845     }
4846     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4847     if (!is_error(ret)) {
4848         target_size_out = target_size_in;
4849         /* An extent_count of 0 means we were only counting the extents
4850          * so there are no structs to copy
4851          */
4852         if (fm->fm_extent_count != 0) {
4853             target_size_out += fm->fm_mapped_extents * extent_size;
4854         }
4855         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4856         if (!argptr) {
4857             ret = -TARGET_EFAULT;
4858         } else {
4859             /* Convert the struct fiemap */
4860             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4861             if (fm->fm_extent_count != 0) {
4862                 p = argptr + target_size_in;
4863                 /* ...and then all the struct fiemap_extents */
4864                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4865                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4866                                   THUNK_TARGET);
4867                     p += extent_size;
4868                 }
4869             }
4870             unlock_user(argptr, arg, target_size_out);
4871         }
4872     }
4873     if (free_fm) {
4874         g_free(fm);
4875     }
4876     return ret;
4877 }
4878 #endif
4879 
4880 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4881                                 int fd, int cmd, abi_long arg)
4882 {
4883     const argtype *arg_type = ie->arg_type;
4884     int target_size;
4885     void *argptr;
4886     int ret;
4887     struct ifconf *host_ifconf;
4888     uint32_t outbufsz;
4889     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4890     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4891     int target_ifreq_size;
4892     int nb_ifreq;
4893     int free_buf = 0;
4894     int i;
4895     int target_ifc_len;
4896     abi_long target_ifc_buf;
4897     int host_ifc_len;
4898     char *host_ifc_buf;
4899 
4900     assert(arg_type[0] == TYPE_PTR);
4901     assert(ie->access == IOC_RW);
4902 
4903     arg_type++;
4904     target_size = thunk_type_size(arg_type, 0);
4905 
4906     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4907     if (!argptr)
4908         return -TARGET_EFAULT;
4909     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4910     unlock_user(argptr, arg, 0);
4911 
4912     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4913     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4914     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4915 
4916     if (target_ifc_buf != 0) {
4917         target_ifc_len = host_ifconf->ifc_len;
4918         nb_ifreq = target_ifc_len / target_ifreq_size;
4919         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4920 
4921         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4922         if (outbufsz > MAX_STRUCT_SIZE) {
4923             /*
4924              * We can't fit all the extents into the fixed size buffer.
4925              * Allocate one that is large enough and use it instead.
4926              */
4927             host_ifconf = g_try_malloc(outbufsz);
4928             if (!host_ifconf) {
4929                 return -TARGET_ENOMEM;
4930             }
4931             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4932             free_buf = 1;
4933         }
4934         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4935 
4936         host_ifconf->ifc_len = host_ifc_len;
4937     } else {
4938       host_ifc_buf = NULL;
4939     }
4940     host_ifconf->ifc_buf = host_ifc_buf;
4941 
4942     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4943     if (!is_error(ret)) {
4944 	/* convert host ifc_len to target ifc_len */
4945 
4946         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4947         target_ifc_len = nb_ifreq * target_ifreq_size;
4948         host_ifconf->ifc_len = target_ifc_len;
4949 
4950 	/* restore target ifc_buf */
4951 
4952         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4953 
4954 	/* copy struct ifconf to target user */
4955 
4956         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4957         if (!argptr)
4958             return -TARGET_EFAULT;
4959         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4960         unlock_user(argptr, arg, target_size);
4961 
4962         if (target_ifc_buf != 0) {
4963             /* copy ifreq[] to target user */
4964             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4965             for (i = 0; i < nb_ifreq ; i++) {
4966                 thunk_convert(argptr + i * target_ifreq_size,
4967                               host_ifc_buf + i * sizeof(struct ifreq),
4968                               ifreq_arg_type, THUNK_TARGET);
4969             }
4970             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4971         }
4972     }
4973 
4974     if (free_buf) {
4975         g_free(host_ifconf);
4976     }
4977 
4978     return ret;
4979 }
4980 
4981 #if defined(CONFIG_USBFS)
4982 #if HOST_LONG_BITS > 64
4983 #error USBDEVFS thunks do not support >64 bit hosts yet.
4984 #endif
4985 struct live_urb {
4986     uint64_t target_urb_adr;
4987     uint64_t target_buf_adr;
4988     char *target_buf_ptr;
4989     struct usbdevfs_urb host_urb;
4990 };
4991 
4992 static GHashTable *usbdevfs_urb_hashtable(void)
4993 {
4994     static GHashTable *urb_hashtable;
4995 
4996     if (!urb_hashtable) {
4997         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4998     }
4999     return urb_hashtable;
5000 }
5001 
5002 static void urb_hashtable_insert(struct live_urb *urb)
5003 {
5004     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5005     g_hash_table_insert(urb_hashtable, urb, urb);
5006 }
5007 
5008 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5009 {
5010     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5011     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5012 }
5013 
5014 static void urb_hashtable_remove(struct live_urb *urb)
5015 {
5016     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5017     g_hash_table_remove(urb_hashtable, urb);
5018 }
5019 
5020 static abi_long
5021 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5022                           int fd, int cmd, abi_long arg)
5023 {
5024     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5025     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5026     struct live_urb *lurb;
5027     void *argptr;
5028     uint64_t hurb;
5029     int target_size;
5030     uintptr_t target_urb_adr;
5031     abi_long ret;
5032 
5033     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5034 
5035     memset(buf_temp, 0, sizeof(uint64_t));
5036     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5037     if (is_error(ret)) {
5038         return ret;
5039     }
5040 
5041     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5042     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5043     if (!lurb->target_urb_adr) {
5044         return -TARGET_EFAULT;
5045     }
5046     urb_hashtable_remove(lurb);
5047     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5048         lurb->host_urb.buffer_length);
5049     lurb->target_buf_ptr = NULL;
5050 
5051     /* restore the guest buffer pointer */
5052     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5053 
5054     /* update the guest urb struct */
5055     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5056     if (!argptr) {
5057         g_free(lurb);
5058         return -TARGET_EFAULT;
5059     }
5060     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5061     unlock_user(argptr, lurb->target_urb_adr, target_size);
5062 
5063     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5064     /* write back the urb handle */
5065     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5066     if (!argptr) {
5067         g_free(lurb);
5068         return -TARGET_EFAULT;
5069     }
5070 
5071     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5072     target_urb_adr = lurb->target_urb_adr;
5073     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5074     unlock_user(argptr, arg, target_size);
5075 
5076     g_free(lurb);
5077     return ret;
5078 }
5079 
5080 static abi_long
5081 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5082                              uint8_t *buf_temp __attribute__((unused)),
5083                              int fd, int cmd, abi_long arg)
5084 {
5085     struct live_urb *lurb;
5086 
5087     /* map target address back to host URB with metadata. */
5088     lurb = urb_hashtable_lookup(arg);
5089     if (!lurb) {
5090         return -TARGET_EFAULT;
5091     }
5092     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5093 }
5094 
5095 static abi_long
5096 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5097                             int fd, int cmd, abi_long arg)
5098 {
5099     const argtype *arg_type = ie->arg_type;
5100     int target_size;
5101     abi_long ret;
5102     void *argptr;
5103     int rw_dir;
5104     struct live_urb *lurb;
5105 
5106     /*
5107      * each submitted URB needs to map to a unique ID for the
5108      * kernel, and that unique ID needs to be a pointer to
5109      * host memory.  hence, we need to malloc for each URB.
5110      * isochronous transfers have a variable length struct.
5111      */
5112     arg_type++;
5113     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5114 
5115     /* construct host copy of urb and metadata */
5116     lurb = g_try_new0(struct live_urb, 1);
5117     if (!lurb) {
5118         return -TARGET_ENOMEM;
5119     }
5120 
5121     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5122     if (!argptr) {
5123         g_free(lurb);
5124         return -TARGET_EFAULT;
5125     }
5126     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5127     unlock_user(argptr, arg, 0);
5128 
5129     lurb->target_urb_adr = arg;
5130     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5131 
5132     /* buffer space used depends on endpoint type so lock the entire buffer */
5133     /* control type urbs should check the buffer contents for true direction */
5134     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5135     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5136         lurb->host_urb.buffer_length, 1);
5137     if (lurb->target_buf_ptr == NULL) {
5138         g_free(lurb);
5139         return -TARGET_EFAULT;
5140     }
5141 
5142     /* update buffer pointer in host copy */
5143     lurb->host_urb.buffer = lurb->target_buf_ptr;
5144 
5145     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5146     if (is_error(ret)) {
5147         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5148         g_free(lurb);
5149     } else {
5150         urb_hashtable_insert(lurb);
5151     }
5152 
5153     return ret;
5154 }
5155 #endif /* CONFIG_USBFS */
5156 
5157 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5158                             int cmd, abi_long arg)
5159 {
5160     void *argptr;
5161     struct dm_ioctl *host_dm;
5162     abi_long guest_data;
5163     uint32_t guest_data_size;
5164     int target_size;
5165     const argtype *arg_type = ie->arg_type;
5166     abi_long ret;
5167     void *big_buf = NULL;
5168     char *host_data;
5169 
5170     arg_type++;
5171     target_size = thunk_type_size(arg_type, 0);
5172     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5173     if (!argptr) {
5174         ret = -TARGET_EFAULT;
5175         goto out;
5176     }
5177     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5178     unlock_user(argptr, arg, 0);
5179 
5180     /* buf_temp is too small, so fetch things into a bigger buffer */
5181     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5182     memcpy(big_buf, buf_temp, target_size);
5183     buf_temp = big_buf;
5184     host_dm = big_buf;
5185 
5186     guest_data = arg + host_dm->data_start;
5187     if ((guest_data - arg) < 0) {
5188         ret = -TARGET_EINVAL;
5189         goto out;
5190     }
5191     guest_data_size = host_dm->data_size - host_dm->data_start;
5192     host_data = (char*)host_dm + host_dm->data_start;
5193 
5194     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5195     if (!argptr) {
5196         ret = -TARGET_EFAULT;
5197         goto out;
5198     }
5199 
5200     switch (ie->host_cmd) {
5201     case DM_REMOVE_ALL:
5202     case DM_LIST_DEVICES:
5203     case DM_DEV_CREATE:
5204     case DM_DEV_REMOVE:
5205     case DM_DEV_SUSPEND:
5206     case DM_DEV_STATUS:
5207     case DM_DEV_WAIT:
5208     case DM_TABLE_STATUS:
5209     case DM_TABLE_CLEAR:
5210     case DM_TABLE_DEPS:
5211     case DM_LIST_VERSIONS:
5212         /* no input data */
5213         break;
5214     case DM_DEV_RENAME:
5215     case DM_DEV_SET_GEOMETRY:
5216         /* data contains only strings */
5217         memcpy(host_data, argptr, guest_data_size);
5218         break;
5219     case DM_TARGET_MSG:
5220         memcpy(host_data, argptr, guest_data_size);
5221         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5222         break;
5223     case DM_TABLE_LOAD:
5224     {
5225         void *gspec = argptr;
5226         void *cur_data = host_data;
5227         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5228         int spec_size = thunk_type_size(arg_type, 0);
5229         int i;
5230 
5231         for (i = 0; i < host_dm->target_count; i++) {
5232             struct dm_target_spec *spec = cur_data;
5233             uint32_t next;
5234             int slen;
5235 
5236             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5237             slen = strlen((char*)gspec + spec_size) + 1;
5238             next = spec->next;
5239             spec->next = sizeof(*spec) + slen;
5240             strcpy((char*)&spec[1], gspec + spec_size);
5241             gspec += next;
5242             cur_data += spec->next;
5243         }
5244         break;
5245     }
5246     default:
5247         ret = -TARGET_EINVAL;
5248         unlock_user(argptr, guest_data, 0);
5249         goto out;
5250     }
5251     unlock_user(argptr, guest_data, 0);
5252 
5253     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5254     if (!is_error(ret)) {
5255         guest_data = arg + host_dm->data_start;
5256         guest_data_size = host_dm->data_size - host_dm->data_start;
5257         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5258         switch (ie->host_cmd) {
5259         case DM_REMOVE_ALL:
5260         case DM_DEV_CREATE:
5261         case DM_DEV_REMOVE:
5262         case DM_DEV_RENAME:
5263         case DM_DEV_SUSPEND:
5264         case DM_DEV_STATUS:
5265         case DM_TABLE_LOAD:
5266         case DM_TABLE_CLEAR:
5267         case DM_TARGET_MSG:
5268         case DM_DEV_SET_GEOMETRY:
5269             /* no return data */
5270             break;
5271         case DM_LIST_DEVICES:
5272         {
5273             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5274             uint32_t remaining_data = guest_data_size;
5275             void *cur_data = argptr;
5276             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5277             int nl_size = 12; /* can't use thunk_size due to alignment */
5278 
5279             while (1) {
5280                 uint32_t next = nl->next;
5281                 if (next) {
5282                     nl->next = nl_size + (strlen(nl->name) + 1);
5283                 }
5284                 if (remaining_data < nl->next) {
5285                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5286                     break;
5287                 }
5288                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5289                 strcpy(cur_data + nl_size, nl->name);
5290                 cur_data += nl->next;
5291                 remaining_data -= nl->next;
5292                 if (!next) {
5293                     break;
5294                 }
5295                 nl = (void*)nl + next;
5296             }
5297             break;
5298         }
5299         case DM_DEV_WAIT:
5300         case DM_TABLE_STATUS:
5301         {
5302             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5303             void *cur_data = argptr;
5304             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5305             int spec_size = thunk_type_size(arg_type, 0);
5306             int i;
5307 
5308             for (i = 0; i < host_dm->target_count; i++) {
5309                 uint32_t next = spec->next;
5310                 int slen = strlen((char*)&spec[1]) + 1;
5311                 spec->next = (cur_data - argptr) + spec_size + slen;
5312                 if (guest_data_size < spec->next) {
5313                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5314                     break;
5315                 }
5316                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5317                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5318                 cur_data = argptr + spec->next;
5319                 spec = (void*)host_dm + host_dm->data_start + next;
5320             }
5321             break;
5322         }
5323         case DM_TABLE_DEPS:
5324         {
5325             void *hdata = (void*)host_dm + host_dm->data_start;
5326             int count = *(uint32_t*)hdata;
5327             uint64_t *hdev = hdata + 8;
5328             uint64_t *gdev = argptr + 8;
5329             int i;
5330 
5331             *(uint32_t*)argptr = tswap32(count);
5332             for (i = 0; i < count; i++) {
5333                 *gdev = tswap64(*hdev);
5334                 gdev++;
5335                 hdev++;
5336             }
5337             break;
5338         }
5339         case DM_LIST_VERSIONS:
5340         {
5341             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5342             uint32_t remaining_data = guest_data_size;
5343             void *cur_data = argptr;
5344             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5345             int vers_size = thunk_type_size(arg_type, 0);
5346 
5347             while (1) {
5348                 uint32_t next = vers->next;
5349                 if (next) {
5350                     vers->next = vers_size + (strlen(vers->name) + 1);
5351                 }
5352                 if (remaining_data < vers->next) {
5353                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5354                     break;
5355                 }
5356                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5357                 strcpy(cur_data + vers_size, vers->name);
5358                 cur_data += vers->next;
5359                 remaining_data -= vers->next;
5360                 if (!next) {
5361                     break;
5362                 }
5363                 vers = (void*)vers + next;
5364             }
5365             break;
5366         }
5367         default:
5368             unlock_user(argptr, guest_data, 0);
5369             ret = -TARGET_EINVAL;
5370             goto out;
5371         }
5372         unlock_user(argptr, guest_data, guest_data_size);
5373 
5374         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5375         if (!argptr) {
5376             ret = -TARGET_EFAULT;
5377             goto out;
5378         }
5379         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5380         unlock_user(argptr, arg, target_size);
5381     }
5382 out:
5383     g_free(big_buf);
5384     return ret;
5385 }
5386 
5387 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5388                                int cmd, abi_long arg)
5389 {
5390     void *argptr;
5391     int target_size;
5392     const argtype *arg_type = ie->arg_type;
5393     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5394     abi_long ret;
5395 
5396     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5397     struct blkpg_partition host_part;
5398 
5399     /* Read and convert blkpg */
5400     arg_type++;
5401     target_size = thunk_type_size(arg_type, 0);
5402     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5403     if (!argptr) {
5404         ret = -TARGET_EFAULT;
5405         goto out;
5406     }
5407     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5408     unlock_user(argptr, arg, 0);
5409 
5410     switch (host_blkpg->op) {
5411     case BLKPG_ADD_PARTITION:
5412     case BLKPG_DEL_PARTITION:
5413         /* payload is struct blkpg_partition */
5414         break;
5415     default:
5416         /* Unknown opcode */
5417         ret = -TARGET_EINVAL;
5418         goto out;
5419     }
5420 
5421     /* Read and convert blkpg->data */
5422     arg = (abi_long)(uintptr_t)host_blkpg->data;
5423     target_size = thunk_type_size(part_arg_type, 0);
5424     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5425     if (!argptr) {
5426         ret = -TARGET_EFAULT;
5427         goto out;
5428     }
5429     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5430     unlock_user(argptr, arg, 0);
5431 
5432     /* Swizzle the data pointer to our local copy and call! */
5433     host_blkpg->data = &host_part;
5434     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5435 
5436 out:
5437     return ret;
5438 }
5439 
5440 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5441                                 int fd, int cmd, abi_long arg)
5442 {
5443     const argtype *arg_type = ie->arg_type;
5444     const StructEntry *se;
5445     const argtype *field_types;
5446     const int *dst_offsets, *src_offsets;
5447     int target_size;
5448     void *argptr;
5449     abi_ulong *target_rt_dev_ptr = NULL;
5450     unsigned long *host_rt_dev_ptr = NULL;
5451     abi_long ret;
5452     int i;
5453 
5454     assert(ie->access == IOC_W);
5455     assert(*arg_type == TYPE_PTR);
5456     arg_type++;
5457     assert(*arg_type == TYPE_STRUCT);
5458     target_size = thunk_type_size(arg_type, 0);
5459     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5460     if (!argptr) {
5461         return -TARGET_EFAULT;
5462     }
5463     arg_type++;
5464     assert(*arg_type == (int)STRUCT_rtentry);
5465     se = struct_entries + *arg_type++;
5466     assert(se->convert[0] == NULL);
5467     /* convert struct here to be able to catch rt_dev string */
5468     field_types = se->field_types;
5469     dst_offsets = se->field_offsets[THUNK_HOST];
5470     src_offsets = se->field_offsets[THUNK_TARGET];
5471     for (i = 0; i < se->nb_fields; i++) {
5472         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5473             assert(*field_types == TYPE_PTRVOID);
5474             target_rt_dev_ptr = argptr + src_offsets[i];
5475             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5476             if (*target_rt_dev_ptr != 0) {
5477                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5478                                                   tswapal(*target_rt_dev_ptr));
5479                 if (!*host_rt_dev_ptr) {
5480                     unlock_user(argptr, arg, 0);
5481                     return -TARGET_EFAULT;
5482                 }
5483             } else {
5484                 *host_rt_dev_ptr = 0;
5485             }
5486             field_types++;
5487             continue;
5488         }
5489         field_types = thunk_convert(buf_temp + dst_offsets[i],
5490                                     argptr + src_offsets[i],
5491                                     field_types, THUNK_HOST);
5492     }
5493     unlock_user(argptr, arg, 0);
5494 
5495     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5496 
5497     assert(host_rt_dev_ptr != NULL);
5498     assert(target_rt_dev_ptr != NULL);
5499     if (*host_rt_dev_ptr != 0) {
5500         unlock_user((void *)*host_rt_dev_ptr,
5501                     *target_rt_dev_ptr, 0);
5502     }
5503     return ret;
5504 }
5505 
5506 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5507                                      int fd, int cmd, abi_long arg)
5508 {
5509     int sig = target_to_host_signal(arg);
5510     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5511 }
5512 
5513 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5514                                     int fd, int cmd, abi_long arg)
5515 {
5516     struct timeval tv;
5517     abi_long ret;
5518 
5519     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5520     if (is_error(ret)) {
5521         return ret;
5522     }
5523 
5524     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5525         if (copy_to_user_timeval(arg, &tv)) {
5526             return -TARGET_EFAULT;
5527         }
5528     } else {
5529         if (copy_to_user_timeval64(arg, &tv)) {
5530             return -TARGET_EFAULT;
5531         }
5532     }
5533 
5534     return ret;
5535 }
5536 
5537 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5538                                       int fd, int cmd, abi_long arg)
5539 {
5540     struct timespec ts;
5541     abi_long ret;
5542 
5543     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5544     if (is_error(ret)) {
5545         return ret;
5546     }
5547 
5548     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5549         if (host_to_target_timespec(arg, &ts)) {
5550             return -TARGET_EFAULT;
5551         }
5552     } else{
5553         if (host_to_target_timespec64(arg, &ts)) {
5554             return -TARGET_EFAULT;
5555         }
5556     }
5557 
5558     return ret;
5559 }
5560 
5561 #ifdef TIOCGPTPEER
5562 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5563                                      int fd, int cmd, abi_long arg)
5564 {
5565     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5566     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5567 }
5568 #endif
5569 
5570 #ifdef HAVE_DRM_H
5571 
5572 static void unlock_drm_version(struct drm_version *host_ver,
5573                                struct target_drm_version *target_ver,
5574                                bool copy)
5575 {
5576     unlock_user(host_ver->name, target_ver->name,
5577                                 copy ? host_ver->name_len : 0);
5578     unlock_user(host_ver->date, target_ver->date,
5579                                 copy ? host_ver->date_len : 0);
5580     unlock_user(host_ver->desc, target_ver->desc,
5581                                 copy ? host_ver->desc_len : 0);
5582 }
5583 
5584 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5585                                           struct target_drm_version *target_ver)
5586 {
5587     memset(host_ver, 0, sizeof(*host_ver));
5588 
5589     __get_user(host_ver->name_len, &target_ver->name_len);
5590     if (host_ver->name_len) {
5591         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5592                                    target_ver->name_len, 0);
5593         if (!host_ver->name) {
5594             return -EFAULT;
5595         }
5596     }
5597 
5598     __get_user(host_ver->date_len, &target_ver->date_len);
5599     if (host_ver->date_len) {
5600         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5601                                    target_ver->date_len, 0);
5602         if (!host_ver->date) {
5603             goto err;
5604         }
5605     }
5606 
5607     __get_user(host_ver->desc_len, &target_ver->desc_len);
5608     if (host_ver->desc_len) {
5609         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5610                                    target_ver->desc_len, 0);
5611         if (!host_ver->desc) {
5612             goto err;
5613         }
5614     }
5615 
5616     return 0;
5617 err:
5618     unlock_drm_version(host_ver, target_ver, false);
5619     return -EFAULT;
5620 }
5621 
5622 static inline void host_to_target_drmversion(
5623                                           struct target_drm_version *target_ver,
5624                                           struct drm_version *host_ver)
5625 {
5626     __put_user(host_ver->version_major, &target_ver->version_major);
5627     __put_user(host_ver->version_minor, &target_ver->version_minor);
5628     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5629     __put_user(host_ver->name_len, &target_ver->name_len);
5630     __put_user(host_ver->date_len, &target_ver->date_len);
5631     __put_user(host_ver->desc_len, &target_ver->desc_len);
5632     unlock_drm_version(host_ver, target_ver, true);
5633 }
5634 
5635 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5636                              int fd, int cmd, abi_long arg)
5637 {
5638     struct drm_version *ver;
5639     struct target_drm_version *target_ver;
5640     abi_long ret;
5641 
5642     switch (ie->host_cmd) {
5643     case DRM_IOCTL_VERSION:
5644         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5645             return -TARGET_EFAULT;
5646         }
5647         ver = (struct drm_version *)buf_temp;
5648         ret = target_to_host_drmversion(ver, target_ver);
5649         if (!is_error(ret)) {
5650             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5651             if (is_error(ret)) {
5652                 unlock_drm_version(ver, target_ver, false);
5653             } else {
5654                 host_to_target_drmversion(target_ver, ver);
5655             }
5656         }
5657         unlock_user_struct(target_ver, arg, 0);
5658         return ret;
5659     }
5660     return -TARGET_ENOSYS;
5661 }
5662 
5663 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5664                                            struct drm_i915_getparam *gparam,
5665                                            int fd, abi_long arg)
5666 {
5667     abi_long ret;
5668     int value;
5669     struct target_drm_i915_getparam *target_gparam;
5670 
5671     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5672         return -TARGET_EFAULT;
5673     }
5674 
5675     __get_user(gparam->param, &target_gparam->param);
5676     gparam->value = &value;
5677     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5678     put_user_s32(value, target_gparam->value);
5679 
5680     unlock_user_struct(target_gparam, arg, 0);
5681     return ret;
5682 }
5683 
5684 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5685                                   int fd, int cmd, abi_long arg)
5686 {
5687     switch (ie->host_cmd) {
5688     case DRM_IOCTL_I915_GETPARAM:
5689         return do_ioctl_drm_i915_getparam(ie,
5690                                           (struct drm_i915_getparam *)buf_temp,
5691                                           fd, arg);
5692     default:
5693         return -TARGET_ENOSYS;
5694     }
5695 }
5696 
5697 #endif
5698 
5699 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5700                                         int fd, int cmd, abi_long arg)
5701 {
5702     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5703     struct tun_filter *target_filter;
5704     char *target_addr;
5705 
5706     assert(ie->access == IOC_W);
5707 
5708     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5709     if (!target_filter) {
5710         return -TARGET_EFAULT;
5711     }
5712     filter->flags = tswap16(target_filter->flags);
5713     filter->count = tswap16(target_filter->count);
5714     unlock_user(target_filter, arg, 0);
5715 
5716     if (filter->count) {
5717         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5718             MAX_STRUCT_SIZE) {
5719             return -TARGET_EFAULT;
5720         }
5721 
5722         target_addr = lock_user(VERIFY_READ,
5723                                 arg + offsetof(struct tun_filter, addr),
5724                                 filter->count * ETH_ALEN, 1);
5725         if (!target_addr) {
5726             return -TARGET_EFAULT;
5727         }
5728         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5729         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5730     }
5731 
5732     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5733 }
5734 
5735 IOCTLEntry ioctl_entries[] = {
5736 #define IOCTL(cmd, access, ...) \
5737     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5738 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5739     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5740 #define IOCTL_IGNORE(cmd) \
5741     { TARGET_ ## cmd, 0, #cmd },
5742 #include "ioctls.h"
5743     { 0, 0, },
5744 };
5745 
5746 /* ??? Implement proper locking for ioctls.  */
5747 /* do_ioctl() Must return target values and target errnos. */
5748 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5749 {
5750     const IOCTLEntry *ie;
5751     const argtype *arg_type;
5752     abi_long ret;
5753     uint8_t buf_temp[MAX_STRUCT_SIZE];
5754     int target_size;
5755     void *argptr;
5756 
5757     ie = ioctl_entries;
5758     for(;;) {
5759         if (ie->target_cmd == 0) {
5760             qemu_log_mask(
5761                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5762             return -TARGET_ENOSYS;
5763         }
5764         if (ie->target_cmd == cmd)
5765             break;
5766         ie++;
5767     }
5768     arg_type = ie->arg_type;
5769     if (ie->do_ioctl) {
5770         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5771     } else if (!ie->host_cmd) {
5772         /* Some architectures define BSD ioctls in their headers
5773            that are not implemented in Linux.  */
5774         return -TARGET_ENOSYS;
5775     }
5776 
5777     switch(arg_type[0]) {
5778     case TYPE_NULL:
5779         /* no argument */
5780         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5781         break;
5782     case TYPE_PTRVOID:
5783     case TYPE_INT:
5784     case TYPE_LONG:
5785     case TYPE_ULONG:
5786         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5787         break;
5788     case TYPE_PTR:
5789         arg_type++;
5790         target_size = thunk_type_size(arg_type, 0);
5791         switch(ie->access) {
5792         case IOC_R:
5793             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5794             if (!is_error(ret)) {
5795                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5796                 if (!argptr)
5797                     return -TARGET_EFAULT;
5798                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5799                 unlock_user(argptr, arg, target_size);
5800             }
5801             break;
5802         case IOC_W:
5803             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5804             if (!argptr)
5805                 return -TARGET_EFAULT;
5806             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5807             unlock_user(argptr, arg, 0);
5808             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5809             break;
5810         default:
5811         case IOC_RW:
5812             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5813             if (!argptr)
5814                 return -TARGET_EFAULT;
5815             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5816             unlock_user(argptr, arg, 0);
5817             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5818             if (!is_error(ret)) {
5819                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5820                 if (!argptr)
5821                     return -TARGET_EFAULT;
5822                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5823                 unlock_user(argptr, arg, target_size);
5824             }
5825             break;
5826         }
5827         break;
5828     default:
5829         qemu_log_mask(LOG_UNIMP,
5830                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5831                       (long)cmd, arg_type[0]);
5832         ret = -TARGET_ENOSYS;
5833         break;
5834     }
5835     return ret;
5836 }
5837 
5838 static const bitmask_transtbl iflag_tbl[] = {
5839         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5840         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5841         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5842         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5843         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5844         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5845         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5846         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5847         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5848         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5849         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5850         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5851         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5852         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5853         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5854         { 0, 0, 0, 0 }
5855 };
5856 
5857 static const bitmask_transtbl oflag_tbl[] = {
5858 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5859 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5860 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5861 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5862 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5863 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5864 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5865 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5866 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5867 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5868 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5869 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5870 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5871 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5872 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5873 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5874 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5875 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5876 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5877 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5878 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5879 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5880 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5881 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5882 	{ 0, 0, 0, 0 }
5883 };
5884 
5885 static const bitmask_transtbl cflag_tbl[] = {
5886 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5887 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5888 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5889 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5890 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5891 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5892 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5893 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5894 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5895 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5896 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5897 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5898 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5899 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5900 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5901 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5902 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5903 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5904 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5905 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5906 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5907 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5908 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5909 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5910 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5911 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5912 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5913 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5914 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5915 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5916 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5917 	{ 0, 0, 0, 0 }
5918 };
5919 
5920 static const bitmask_transtbl lflag_tbl[] = {
5921   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5922   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5923   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5924   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5925   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5926   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5927   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5928   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5929   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5930   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5931   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5932   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5933   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5934   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5935   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5936   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5937   { 0, 0, 0, 0 }
5938 };
5939 
5940 static void target_to_host_termios (void *dst, const void *src)
5941 {
5942     struct host_termios *host = dst;
5943     const struct target_termios *target = src;
5944 
5945     host->c_iflag =
5946         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5947     host->c_oflag =
5948         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5949     host->c_cflag =
5950         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5951     host->c_lflag =
5952         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5953     host->c_line = target->c_line;
5954 
5955     memset(host->c_cc, 0, sizeof(host->c_cc));
5956     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5957     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5958     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5959     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5960     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5961     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5962     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5963     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5964     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5965     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5966     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5967     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5968     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5969     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5970     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5971     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5972     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5973 }
5974 
5975 static void host_to_target_termios (void *dst, const void *src)
5976 {
5977     struct target_termios *target = dst;
5978     const struct host_termios *host = src;
5979 
5980     target->c_iflag =
5981         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5982     target->c_oflag =
5983         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5984     target->c_cflag =
5985         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5986     target->c_lflag =
5987         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5988     target->c_line = host->c_line;
5989 
5990     memset(target->c_cc, 0, sizeof(target->c_cc));
5991     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5992     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5993     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5994     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5995     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5996     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5997     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5998     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5999     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6000     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6001     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6002     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6003     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6004     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6005     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6006     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6007     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6008 }
6009 
6010 static const StructEntry struct_termios_def = {
6011     .convert = { host_to_target_termios, target_to_host_termios },
6012     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6013     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6014     .print = print_termios,
6015 };
6016 
6017 static const bitmask_transtbl mmap_flags_tbl[] = {
6018     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6019     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6020     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6021     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6022       MAP_ANONYMOUS, MAP_ANONYMOUS },
6023     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6024       MAP_GROWSDOWN, MAP_GROWSDOWN },
6025     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6026       MAP_DENYWRITE, MAP_DENYWRITE },
6027     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6028       MAP_EXECUTABLE, MAP_EXECUTABLE },
6029     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6030     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6031       MAP_NORESERVE, MAP_NORESERVE },
6032     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6033     /* MAP_STACK had been ignored by the kernel for quite some time.
6034        Recognize it for the target insofar as we do not want to pass
6035        it through to the host.  */
6036     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6037     { 0, 0, 0, 0 }
6038 };
6039 
6040 /*
6041  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6042  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6043  */
6044 #if defined(TARGET_I386)
6045 
6046 /* NOTE: there is really one LDT for all the threads */
6047 static uint8_t *ldt_table;
6048 
6049 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6050 {
6051     int size;
6052     void *p;
6053 
6054     if (!ldt_table)
6055         return 0;
6056     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6057     if (size > bytecount)
6058         size = bytecount;
6059     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6060     if (!p)
6061         return -TARGET_EFAULT;
6062     /* ??? Should this by byteswapped?  */
6063     memcpy(p, ldt_table, size);
6064     unlock_user(p, ptr, size);
6065     return size;
6066 }
6067 
6068 /* XXX: add locking support */
6069 static abi_long write_ldt(CPUX86State *env,
6070                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6071 {
6072     struct target_modify_ldt_ldt_s ldt_info;
6073     struct target_modify_ldt_ldt_s *target_ldt_info;
6074     int seg_32bit, contents, read_exec_only, limit_in_pages;
6075     int seg_not_present, useable, lm;
6076     uint32_t *lp, entry_1, entry_2;
6077 
6078     if (bytecount != sizeof(ldt_info))
6079         return -TARGET_EINVAL;
6080     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6081         return -TARGET_EFAULT;
6082     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6083     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6084     ldt_info.limit = tswap32(target_ldt_info->limit);
6085     ldt_info.flags = tswap32(target_ldt_info->flags);
6086     unlock_user_struct(target_ldt_info, ptr, 0);
6087 
6088     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6089         return -TARGET_EINVAL;
6090     seg_32bit = ldt_info.flags & 1;
6091     contents = (ldt_info.flags >> 1) & 3;
6092     read_exec_only = (ldt_info.flags >> 3) & 1;
6093     limit_in_pages = (ldt_info.flags >> 4) & 1;
6094     seg_not_present = (ldt_info.flags >> 5) & 1;
6095     useable = (ldt_info.flags >> 6) & 1;
6096 #ifdef TARGET_ABI32
6097     lm = 0;
6098 #else
6099     lm = (ldt_info.flags >> 7) & 1;
6100 #endif
6101     if (contents == 3) {
6102         if (oldmode)
6103             return -TARGET_EINVAL;
6104         if (seg_not_present == 0)
6105             return -TARGET_EINVAL;
6106     }
6107     /* allocate the LDT */
6108     if (!ldt_table) {
6109         env->ldt.base = target_mmap(0,
6110                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6111                                     PROT_READ|PROT_WRITE,
6112                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6113         if (env->ldt.base == -1)
6114             return -TARGET_ENOMEM;
6115         memset(g2h_untagged(env->ldt.base), 0,
6116                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6117         env->ldt.limit = 0xffff;
6118         ldt_table = g2h_untagged(env->ldt.base);
6119     }
6120 
6121     /* NOTE: same code as Linux kernel */
6122     /* Allow LDTs to be cleared by the user. */
6123     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6124         if (oldmode ||
6125             (contents == 0		&&
6126              read_exec_only == 1	&&
6127              seg_32bit == 0		&&
6128              limit_in_pages == 0	&&
6129              seg_not_present == 1	&&
6130              useable == 0 )) {
6131             entry_1 = 0;
6132             entry_2 = 0;
6133             goto install;
6134         }
6135     }
6136 
6137     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6138         (ldt_info.limit & 0x0ffff);
6139     entry_2 = (ldt_info.base_addr & 0xff000000) |
6140         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6141         (ldt_info.limit & 0xf0000) |
6142         ((read_exec_only ^ 1) << 9) |
6143         (contents << 10) |
6144         ((seg_not_present ^ 1) << 15) |
6145         (seg_32bit << 22) |
6146         (limit_in_pages << 23) |
6147         (lm << 21) |
6148         0x7000;
6149     if (!oldmode)
6150         entry_2 |= (useable << 20);
6151 
6152     /* Install the new entry ...  */
6153 install:
6154     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6155     lp[0] = tswap32(entry_1);
6156     lp[1] = tswap32(entry_2);
6157     return 0;
6158 }
6159 
6160 /* specific and weird i386 syscalls */
6161 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6162                               unsigned long bytecount)
6163 {
6164     abi_long ret;
6165 
6166     switch (func) {
6167     case 0:
6168         ret = read_ldt(ptr, bytecount);
6169         break;
6170     case 1:
6171         ret = write_ldt(env, ptr, bytecount, 1);
6172         break;
6173     case 0x11:
6174         ret = write_ldt(env, ptr, bytecount, 0);
6175         break;
6176     default:
6177         ret = -TARGET_ENOSYS;
6178         break;
6179     }
6180     return ret;
6181 }
6182 
6183 #if defined(TARGET_ABI32)
6184 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6185 {
6186     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6187     struct target_modify_ldt_ldt_s ldt_info;
6188     struct target_modify_ldt_ldt_s *target_ldt_info;
6189     int seg_32bit, contents, read_exec_only, limit_in_pages;
6190     int seg_not_present, useable, lm;
6191     uint32_t *lp, entry_1, entry_2;
6192     int i;
6193 
6194     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6195     if (!target_ldt_info)
6196         return -TARGET_EFAULT;
6197     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6198     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6199     ldt_info.limit = tswap32(target_ldt_info->limit);
6200     ldt_info.flags = tswap32(target_ldt_info->flags);
6201     if (ldt_info.entry_number == -1) {
6202         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6203             if (gdt_table[i] == 0) {
6204                 ldt_info.entry_number = i;
6205                 target_ldt_info->entry_number = tswap32(i);
6206                 break;
6207             }
6208         }
6209     }
6210     unlock_user_struct(target_ldt_info, ptr, 1);
6211 
6212     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6213         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6214            return -TARGET_EINVAL;
6215     seg_32bit = ldt_info.flags & 1;
6216     contents = (ldt_info.flags >> 1) & 3;
6217     read_exec_only = (ldt_info.flags >> 3) & 1;
6218     limit_in_pages = (ldt_info.flags >> 4) & 1;
6219     seg_not_present = (ldt_info.flags >> 5) & 1;
6220     useable = (ldt_info.flags >> 6) & 1;
6221 #ifdef TARGET_ABI32
6222     lm = 0;
6223 #else
6224     lm = (ldt_info.flags >> 7) & 1;
6225 #endif
6226 
6227     if (contents == 3) {
6228         if (seg_not_present == 0)
6229             return -TARGET_EINVAL;
6230     }
6231 
6232     /* NOTE: same code as Linux kernel */
6233     /* Allow LDTs to be cleared by the user. */
6234     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6235         if ((contents == 0             &&
6236              read_exec_only == 1       &&
6237              seg_32bit == 0            &&
6238              limit_in_pages == 0       &&
6239              seg_not_present == 1      &&
6240              useable == 0 )) {
6241             entry_1 = 0;
6242             entry_2 = 0;
6243             goto install;
6244         }
6245     }
6246 
6247     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6248         (ldt_info.limit & 0x0ffff);
6249     entry_2 = (ldt_info.base_addr & 0xff000000) |
6250         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6251         (ldt_info.limit & 0xf0000) |
6252         ((read_exec_only ^ 1) << 9) |
6253         (contents << 10) |
6254         ((seg_not_present ^ 1) << 15) |
6255         (seg_32bit << 22) |
6256         (limit_in_pages << 23) |
6257         (useable << 20) |
6258         (lm << 21) |
6259         0x7000;
6260 
6261     /* Install the new entry ...  */
6262 install:
6263     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6264     lp[0] = tswap32(entry_1);
6265     lp[1] = tswap32(entry_2);
6266     return 0;
6267 }
6268 
6269 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6270 {
6271     struct target_modify_ldt_ldt_s *target_ldt_info;
6272     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6273     uint32_t base_addr, limit, flags;
6274     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6275     int seg_not_present, useable, lm;
6276     uint32_t *lp, entry_1, entry_2;
6277 
6278     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6279     if (!target_ldt_info)
6280         return -TARGET_EFAULT;
6281     idx = tswap32(target_ldt_info->entry_number);
6282     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6283         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6284         unlock_user_struct(target_ldt_info, ptr, 1);
6285         return -TARGET_EINVAL;
6286     }
6287     lp = (uint32_t *)(gdt_table + idx);
6288     entry_1 = tswap32(lp[0]);
6289     entry_2 = tswap32(lp[1]);
6290 
6291     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6292     contents = (entry_2 >> 10) & 3;
6293     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6294     seg_32bit = (entry_2 >> 22) & 1;
6295     limit_in_pages = (entry_2 >> 23) & 1;
6296     useable = (entry_2 >> 20) & 1;
6297 #ifdef TARGET_ABI32
6298     lm = 0;
6299 #else
6300     lm = (entry_2 >> 21) & 1;
6301 #endif
6302     flags = (seg_32bit << 0) | (contents << 1) |
6303         (read_exec_only << 3) | (limit_in_pages << 4) |
6304         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6305     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6306     base_addr = (entry_1 >> 16) |
6307         (entry_2 & 0xff000000) |
6308         ((entry_2 & 0xff) << 16);
6309     target_ldt_info->base_addr = tswapal(base_addr);
6310     target_ldt_info->limit = tswap32(limit);
6311     target_ldt_info->flags = tswap32(flags);
6312     unlock_user_struct(target_ldt_info, ptr, 1);
6313     return 0;
6314 }
6315 
6316 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6317 {
6318     return -TARGET_ENOSYS;
6319 }
6320 #else
6321 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6322 {
6323     abi_long ret = 0;
6324     abi_ulong val;
6325     int idx;
6326 
6327     switch(code) {
6328     case TARGET_ARCH_SET_GS:
6329     case TARGET_ARCH_SET_FS:
6330         if (code == TARGET_ARCH_SET_GS)
6331             idx = R_GS;
6332         else
6333             idx = R_FS;
6334         cpu_x86_load_seg(env, idx, 0);
6335         env->segs[idx].base = addr;
6336         break;
6337     case TARGET_ARCH_GET_GS:
6338     case TARGET_ARCH_GET_FS:
6339         if (code == TARGET_ARCH_GET_GS)
6340             idx = R_GS;
6341         else
6342             idx = R_FS;
6343         val = env->segs[idx].base;
6344         if (put_user(val, addr, abi_ulong))
6345             ret = -TARGET_EFAULT;
6346         break;
6347     default:
6348         ret = -TARGET_EINVAL;
6349         break;
6350     }
6351     return ret;
6352 }
6353 #endif /* defined(TARGET_ABI32 */
6354 #endif /* defined(TARGET_I386) */
6355 
6356 /*
6357  * These constants are generic.  Supply any that are missing from the host.
6358  */
6359 #ifndef PR_SET_NAME
6360 # define PR_SET_NAME    15
6361 # define PR_GET_NAME    16
6362 #endif
6363 #ifndef PR_SET_FP_MODE
6364 # define PR_SET_FP_MODE 45
6365 # define PR_GET_FP_MODE 46
6366 # define PR_FP_MODE_FR   (1 << 0)
6367 # define PR_FP_MODE_FRE  (1 << 1)
6368 #endif
6369 #ifndef PR_SVE_SET_VL
6370 # define PR_SVE_SET_VL  50
6371 # define PR_SVE_GET_VL  51
6372 # define PR_SVE_VL_LEN_MASK  0xffff
6373 # define PR_SVE_VL_INHERIT   (1 << 17)
6374 #endif
6375 #ifndef PR_PAC_RESET_KEYS
6376 # define PR_PAC_RESET_KEYS  54
6377 # define PR_PAC_APIAKEY   (1 << 0)
6378 # define PR_PAC_APIBKEY   (1 << 1)
6379 # define PR_PAC_APDAKEY   (1 << 2)
6380 # define PR_PAC_APDBKEY   (1 << 3)
6381 # define PR_PAC_APGAKEY   (1 << 4)
6382 #endif
6383 #ifndef PR_SET_TAGGED_ADDR_CTRL
6384 # define PR_SET_TAGGED_ADDR_CTRL 55
6385 # define PR_GET_TAGGED_ADDR_CTRL 56
6386 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6387 #endif
6388 #ifndef PR_MTE_TCF_SHIFT
6389 # define PR_MTE_TCF_SHIFT       1
6390 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6391 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6392 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6393 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6394 # define PR_MTE_TAG_SHIFT       3
6395 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6396 #endif
6397 #ifndef PR_SET_IO_FLUSHER
6398 # define PR_SET_IO_FLUSHER 57
6399 # define PR_GET_IO_FLUSHER 58
6400 #endif
6401 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6402 # define PR_SET_SYSCALL_USER_DISPATCH 59
6403 #endif
6404 #ifndef PR_SME_SET_VL
6405 # define PR_SME_SET_VL  63
6406 # define PR_SME_GET_VL  64
6407 # define PR_SME_VL_LEN_MASK  0xffff
6408 # define PR_SME_VL_INHERIT   (1 << 17)
6409 #endif
6410 
6411 #include "target_prctl.h"
6412 
6413 static abi_long do_prctl_inval0(CPUArchState *env)
6414 {
6415     return -TARGET_EINVAL;
6416 }
6417 
6418 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6419 {
6420     return -TARGET_EINVAL;
6421 }
6422 
6423 #ifndef do_prctl_get_fp_mode
6424 #define do_prctl_get_fp_mode do_prctl_inval0
6425 #endif
6426 #ifndef do_prctl_set_fp_mode
6427 #define do_prctl_set_fp_mode do_prctl_inval1
6428 #endif
6429 #ifndef do_prctl_sve_get_vl
6430 #define do_prctl_sve_get_vl do_prctl_inval0
6431 #endif
6432 #ifndef do_prctl_sve_set_vl
6433 #define do_prctl_sve_set_vl do_prctl_inval1
6434 #endif
6435 #ifndef do_prctl_reset_keys
6436 #define do_prctl_reset_keys do_prctl_inval1
6437 #endif
6438 #ifndef do_prctl_set_tagged_addr_ctrl
6439 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6440 #endif
6441 #ifndef do_prctl_get_tagged_addr_ctrl
6442 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6443 #endif
6444 #ifndef do_prctl_get_unalign
6445 #define do_prctl_get_unalign do_prctl_inval1
6446 #endif
6447 #ifndef do_prctl_set_unalign
6448 #define do_prctl_set_unalign do_prctl_inval1
6449 #endif
6450 #ifndef do_prctl_sme_get_vl
6451 #define do_prctl_sme_get_vl do_prctl_inval0
6452 #endif
6453 #ifndef do_prctl_sme_set_vl
6454 #define do_prctl_sme_set_vl do_prctl_inval1
6455 #endif
6456 
6457 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6458                          abi_long arg3, abi_long arg4, abi_long arg5)
6459 {
6460     abi_long ret;
6461 
6462     switch (option) {
6463     case PR_GET_PDEATHSIG:
6464         {
6465             int deathsig;
6466             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6467                                   arg3, arg4, arg5));
6468             if (!is_error(ret) &&
6469                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6470                 return -TARGET_EFAULT;
6471             }
6472             return ret;
6473         }
6474     case PR_SET_PDEATHSIG:
6475         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6476                                arg3, arg4, arg5));
6477     case PR_GET_NAME:
6478         {
6479             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6480             if (!name) {
6481                 return -TARGET_EFAULT;
6482             }
6483             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6484                                   arg3, arg4, arg5));
6485             unlock_user(name, arg2, 16);
6486             return ret;
6487         }
6488     case PR_SET_NAME:
6489         {
6490             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6491             if (!name) {
6492                 return -TARGET_EFAULT;
6493             }
6494             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6495                                   arg3, arg4, arg5));
6496             unlock_user(name, arg2, 0);
6497             return ret;
6498         }
6499     case PR_GET_FP_MODE:
6500         return do_prctl_get_fp_mode(env);
6501     case PR_SET_FP_MODE:
6502         return do_prctl_set_fp_mode(env, arg2);
6503     case PR_SVE_GET_VL:
6504         return do_prctl_sve_get_vl(env);
6505     case PR_SVE_SET_VL:
6506         return do_prctl_sve_set_vl(env, arg2);
6507     case PR_SME_GET_VL:
6508         return do_prctl_sme_get_vl(env);
6509     case PR_SME_SET_VL:
6510         return do_prctl_sme_set_vl(env, arg2);
6511     case PR_PAC_RESET_KEYS:
6512         if (arg3 || arg4 || arg5) {
6513             return -TARGET_EINVAL;
6514         }
6515         return do_prctl_reset_keys(env, arg2);
6516     case PR_SET_TAGGED_ADDR_CTRL:
6517         if (arg3 || arg4 || arg5) {
6518             return -TARGET_EINVAL;
6519         }
6520         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6521     case PR_GET_TAGGED_ADDR_CTRL:
6522         if (arg2 || arg3 || arg4 || arg5) {
6523             return -TARGET_EINVAL;
6524         }
6525         return do_prctl_get_tagged_addr_ctrl(env);
6526 
6527     case PR_GET_UNALIGN:
6528         return do_prctl_get_unalign(env, arg2);
6529     case PR_SET_UNALIGN:
6530         return do_prctl_set_unalign(env, arg2);
6531 
6532     case PR_CAP_AMBIENT:
6533     case PR_CAPBSET_READ:
6534     case PR_CAPBSET_DROP:
6535     case PR_GET_DUMPABLE:
6536     case PR_SET_DUMPABLE:
6537     case PR_GET_KEEPCAPS:
6538     case PR_SET_KEEPCAPS:
6539     case PR_GET_SECUREBITS:
6540     case PR_SET_SECUREBITS:
6541     case PR_GET_TIMING:
6542     case PR_SET_TIMING:
6543     case PR_GET_TIMERSLACK:
6544     case PR_SET_TIMERSLACK:
6545     case PR_MCE_KILL:
6546     case PR_MCE_KILL_GET:
6547     case PR_GET_NO_NEW_PRIVS:
6548     case PR_SET_NO_NEW_PRIVS:
6549     case PR_GET_IO_FLUSHER:
6550     case PR_SET_IO_FLUSHER:
6551         /* Some prctl options have no pointer arguments and we can pass on. */
6552         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6553 
6554     case PR_GET_CHILD_SUBREAPER:
6555     case PR_SET_CHILD_SUBREAPER:
6556     case PR_GET_SPECULATION_CTRL:
6557     case PR_SET_SPECULATION_CTRL:
6558     case PR_GET_TID_ADDRESS:
6559         /* TODO */
6560         return -TARGET_EINVAL;
6561 
6562     case PR_GET_FPEXC:
6563     case PR_SET_FPEXC:
6564         /* Was used for SPE on PowerPC. */
6565         return -TARGET_EINVAL;
6566 
6567     case PR_GET_ENDIAN:
6568     case PR_SET_ENDIAN:
6569     case PR_GET_FPEMU:
6570     case PR_SET_FPEMU:
6571     case PR_SET_MM:
6572     case PR_GET_SECCOMP:
6573     case PR_SET_SECCOMP:
6574     case PR_SET_SYSCALL_USER_DISPATCH:
6575     case PR_GET_THP_DISABLE:
6576     case PR_SET_THP_DISABLE:
6577     case PR_GET_TSC:
6578     case PR_SET_TSC:
6579         /* Disable to prevent the target disabling stuff we need. */
6580         return -TARGET_EINVAL;
6581 
6582     default:
6583         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6584                       option);
6585         return -TARGET_EINVAL;
6586     }
6587 }
6588 
6589 #define NEW_STACK_SIZE 0x40000
6590 
6591 
6592 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6593 typedef struct {
6594     CPUArchState *env;
6595     pthread_mutex_t mutex;
6596     pthread_cond_t cond;
6597     pthread_t thread;
6598     uint32_t tid;
6599     abi_ulong child_tidptr;
6600     abi_ulong parent_tidptr;
6601     sigset_t sigmask;
6602 } new_thread_info;
6603 
6604 static void *clone_func(void *arg)
6605 {
6606     new_thread_info *info = arg;
6607     CPUArchState *env;
6608     CPUState *cpu;
6609     TaskState *ts;
6610 
6611     rcu_register_thread();
6612     tcg_register_thread();
6613     env = info->env;
6614     cpu = env_cpu(env);
6615     thread_cpu = cpu;
6616     ts = (TaskState *)cpu->opaque;
6617     info->tid = sys_gettid();
6618     task_settid(ts);
6619     if (info->child_tidptr)
6620         put_user_u32(info->tid, info->child_tidptr);
6621     if (info->parent_tidptr)
6622         put_user_u32(info->tid, info->parent_tidptr);
6623     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6624     /* Enable signals.  */
6625     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6626     /* Signal to the parent that we're ready.  */
6627     pthread_mutex_lock(&info->mutex);
6628     pthread_cond_broadcast(&info->cond);
6629     pthread_mutex_unlock(&info->mutex);
6630     /* Wait until the parent has finished initializing the tls state.  */
6631     pthread_mutex_lock(&clone_lock);
6632     pthread_mutex_unlock(&clone_lock);
6633     cpu_loop(env);
6634     /* never exits */
6635     return NULL;
6636 }
6637 
6638 /* do_fork() Must return host values and target errnos (unlike most
6639    do_*() functions). */
6640 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6641                    abi_ulong parent_tidptr, target_ulong newtls,
6642                    abi_ulong child_tidptr)
6643 {
6644     CPUState *cpu = env_cpu(env);
6645     int ret;
6646     TaskState *ts;
6647     CPUState *new_cpu;
6648     CPUArchState *new_env;
6649     sigset_t sigmask;
6650 
6651     flags &= ~CLONE_IGNORED_FLAGS;
6652 
6653     /* Emulate vfork() with fork() */
6654     if (flags & CLONE_VFORK)
6655         flags &= ~(CLONE_VFORK | CLONE_VM);
6656 
6657     if (flags & CLONE_VM) {
6658         TaskState *parent_ts = (TaskState *)cpu->opaque;
6659         new_thread_info info;
6660         pthread_attr_t attr;
6661 
6662         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6663             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6664             return -TARGET_EINVAL;
6665         }
6666 
6667         ts = g_new0(TaskState, 1);
6668         init_task_state(ts);
6669 
6670         /* Grab a mutex so that thread setup appears atomic.  */
6671         pthread_mutex_lock(&clone_lock);
6672 
6673         /*
6674          * If this is our first additional thread, we need to ensure we
6675          * generate code for parallel execution and flush old translations.
6676          * Do this now so that the copy gets CF_PARALLEL too.
6677          */
6678         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6679             cpu->tcg_cflags |= CF_PARALLEL;
6680             tb_flush(cpu);
6681         }
6682 
6683         /* we create a new CPU instance. */
6684         new_env = cpu_copy(env);
6685         /* Init regs that differ from the parent.  */
6686         cpu_clone_regs_child(new_env, newsp, flags);
6687         cpu_clone_regs_parent(env, flags);
6688         new_cpu = env_cpu(new_env);
6689         new_cpu->opaque = ts;
6690         ts->bprm = parent_ts->bprm;
6691         ts->info = parent_ts->info;
6692         ts->signal_mask = parent_ts->signal_mask;
6693 
6694         if (flags & CLONE_CHILD_CLEARTID) {
6695             ts->child_tidptr = child_tidptr;
6696         }
6697 
6698         if (flags & CLONE_SETTLS) {
6699             cpu_set_tls (new_env, newtls);
6700         }
6701 
6702         memset(&info, 0, sizeof(info));
6703         pthread_mutex_init(&info.mutex, NULL);
6704         pthread_mutex_lock(&info.mutex);
6705         pthread_cond_init(&info.cond, NULL);
6706         info.env = new_env;
6707         if (flags & CLONE_CHILD_SETTID) {
6708             info.child_tidptr = child_tidptr;
6709         }
6710         if (flags & CLONE_PARENT_SETTID) {
6711             info.parent_tidptr = parent_tidptr;
6712         }
6713 
6714         ret = pthread_attr_init(&attr);
6715         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6716         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6717         /* It is not safe to deliver signals until the child has finished
6718            initializing, so temporarily block all signals.  */
6719         sigfillset(&sigmask);
6720         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6721         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6722 
6723         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6724         /* TODO: Free new CPU state if thread creation failed.  */
6725 
6726         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6727         pthread_attr_destroy(&attr);
6728         if (ret == 0) {
6729             /* Wait for the child to initialize.  */
6730             pthread_cond_wait(&info.cond, &info.mutex);
6731             ret = info.tid;
6732         } else {
6733             ret = -1;
6734         }
6735         pthread_mutex_unlock(&info.mutex);
6736         pthread_cond_destroy(&info.cond);
6737         pthread_mutex_destroy(&info.mutex);
6738         pthread_mutex_unlock(&clone_lock);
6739     } else {
6740         /* if no CLONE_VM, we consider it is a fork */
6741         if (flags & CLONE_INVALID_FORK_FLAGS) {
6742             return -TARGET_EINVAL;
6743         }
6744 
6745         /* We can't support custom termination signals */
6746         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6747             return -TARGET_EINVAL;
6748         }
6749 
6750         if (block_signals()) {
6751             return -QEMU_ERESTARTSYS;
6752         }
6753 
6754         fork_start();
6755         ret = fork();
6756         if (ret == 0) {
6757             /* Child Process.  */
6758             cpu_clone_regs_child(env, newsp, flags);
6759             fork_end(1);
6760             /* There is a race condition here.  The parent process could
6761                theoretically read the TID in the child process before the child
6762                tid is set.  This would require using either ptrace
6763                (not implemented) or having *_tidptr to point at a shared memory
6764                mapping.  We can't repeat the spinlock hack used above because
6765                the child process gets its own copy of the lock.  */
6766             if (flags & CLONE_CHILD_SETTID)
6767                 put_user_u32(sys_gettid(), child_tidptr);
6768             if (flags & CLONE_PARENT_SETTID)
6769                 put_user_u32(sys_gettid(), parent_tidptr);
6770             ts = (TaskState *)cpu->opaque;
6771             if (flags & CLONE_SETTLS)
6772                 cpu_set_tls (env, newtls);
6773             if (flags & CLONE_CHILD_CLEARTID)
6774                 ts->child_tidptr = child_tidptr;
6775         } else {
6776             cpu_clone_regs_parent(env, flags);
6777             fork_end(0);
6778         }
6779     }
6780     return ret;
6781 }
6782 
6783 /* warning : doesn't handle linux specific flags... */
6784 static int target_to_host_fcntl_cmd(int cmd)
6785 {
6786     int ret;
6787 
6788     switch(cmd) {
6789     case TARGET_F_DUPFD:
6790     case TARGET_F_GETFD:
6791     case TARGET_F_SETFD:
6792     case TARGET_F_GETFL:
6793     case TARGET_F_SETFL:
6794     case TARGET_F_OFD_GETLK:
6795     case TARGET_F_OFD_SETLK:
6796     case TARGET_F_OFD_SETLKW:
6797         ret = cmd;
6798         break;
6799     case TARGET_F_GETLK:
6800         ret = F_GETLK64;
6801         break;
6802     case TARGET_F_SETLK:
6803         ret = F_SETLK64;
6804         break;
6805     case TARGET_F_SETLKW:
6806         ret = F_SETLKW64;
6807         break;
6808     case TARGET_F_GETOWN:
6809         ret = F_GETOWN;
6810         break;
6811     case TARGET_F_SETOWN:
6812         ret = F_SETOWN;
6813         break;
6814     case TARGET_F_GETSIG:
6815         ret = F_GETSIG;
6816         break;
6817     case TARGET_F_SETSIG:
6818         ret = F_SETSIG;
6819         break;
6820 #if TARGET_ABI_BITS == 32
6821     case TARGET_F_GETLK64:
6822         ret = F_GETLK64;
6823         break;
6824     case TARGET_F_SETLK64:
6825         ret = F_SETLK64;
6826         break;
6827     case TARGET_F_SETLKW64:
6828         ret = F_SETLKW64;
6829         break;
6830 #endif
6831     case TARGET_F_SETLEASE:
6832         ret = F_SETLEASE;
6833         break;
6834     case TARGET_F_GETLEASE:
6835         ret = F_GETLEASE;
6836         break;
6837 #ifdef F_DUPFD_CLOEXEC
6838     case TARGET_F_DUPFD_CLOEXEC:
6839         ret = F_DUPFD_CLOEXEC;
6840         break;
6841 #endif
6842     case TARGET_F_NOTIFY:
6843         ret = F_NOTIFY;
6844         break;
6845 #ifdef F_GETOWN_EX
6846     case TARGET_F_GETOWN_EX:
6847         ret = F_GETOWN_EX;
6848         break;
6849 #endif
6850 #ifdef F_SETOWN_EX
6851     case TARGET_F_SETOWN_EX:
6852         ret = F_SETOWN_EX;
6853         break;
6854 #endif
6855 #ifdef F_SETPIPE_SZ
6856     case TARGET_F_SETPIPE_SZ:
6857         ret = F_SETPIPE_SZ;
6858         break;
6859     case TARGET_F_GETPIPE_SZ:
6860         ret = F_GETPIPE_SZ;
6861         break;
6862 #endif
6863 #ifdef F_ADD_SEALS
6864     case TARGET_F_ADD_SEALS:
6865         ret = F_ADD_SEALS;
6866         break;
6867     case TARGET_F_GET_SEALS:
6868         ret = F_GET_SEALS;
6869         break;
6870 #endif
6871     default:
6872         ret = -TARGET_EINVAL;
6873         break;
6874     }
6875 
6876 #if defined(__powerpc64__)
6877     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6878      * is not supported by kernel. The glibc fcntl call actually adjusts
6879      * them to 5, 6 and 7 before making the syscall(). Since we make the
6880      * syscall directly, adjust to what is supported by the kernel.
6881      */
6882     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6883         ret -= F_GETLK64 - 5;
6884     }
6885 #endif
6886 
6887     return ret;
6888 }
6889 
6890 #define FLOCK_TRANSTBL \
6891     switch (type) { \
6892     TRANSTBL_CONVERT(F_RDLCK); \
6893     TRANSTBL_CONVERT(F_WRLCK); \
6894     TRANSTBL_CONVERT(F_UNLCK); \
6895     }
6896 
6897 static int target_to_host_flock(int type)
6898 {
6899 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6900     FLOCK_TRANSTBL
6901 #undef  TRANSTBL_CONVERT
6902     return -TARGET_EINVAL;
6903 }
6904 
6905 static int host_to_target_flock(int type)
6906 {
6907 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6908     FLOCK_TRANSTBL
6909 #undef  TRANSTBL_CONVERT
6910     /* if we don't know how to convert the value coming
6911      * from the host we copy to the target field as-is
6912      */
6913     return type;
6914 }
6915 
6916 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6917                                             abi_ulong target_flock_addr)
6918 {
6919     struct target_flock *target_fl;
6920     int l_type;
6921 
6922     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6923         return -TARGET_EFAULT;
6924     }
6925 
6926     __get_user(l_type, &target_fl->l_type);
6927     l_type = target_to_host_flock(l_type);
6928     if (l_type < 0) {
6929         return l_type;
6930     }
6931     fl->l_type = l_type;
6932     __get_user(fl->l_whence, &target_fl->l_whence);
6933     __get_user(fl->l_start, &target_fl->l_start);
6934     __get_user(fl->l_len, &target_fl->l_len);
6935     __get_user(fl->l_pid, &target_fl->l_pid);
6936     unlock_user_struct(target_fl, target_flock_addr, 0);
6937     return 0;
6938 }
6939 
6940 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6941                                           const struct flock64 *fl)
6942 {
6943     struct target_flock *target_fl;
6944     short l_type;
6945 
6946     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6947         return -TARGET_EFAULT;
6948     }
6949 
6950     l_type = host_to_target_flock(fl->l_type);
6951     __put_user(l_type, &target_fl->l_type);
6952     __put_user(fl->l_whence, &target_fl->l_whence);
6953     __put_user(fl->l_start, &target_fl->l_start);
6954     __put_user(fl->l_len, &target_fl->l_len);
6955     __put_user(fl->l_pid, &target_fl->l_pid);
6956     unlock_user_struct(target_fl, target_flock_addr, 1);
6957     return 0;
6958 }
6959 
6960 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6961 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6962 
6963 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6964 struct target_oabi_flock64 {
6965     abi_short l_type;
6966     abi_short l_whence;
6967     abi_llong l_start;
6968     abi_llong l_len;
6969     abi_int   l_pid;
6970 } QEMU_PACKED;
6971 
6972 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6973                                                    abi_ulong target_flock_addr)
6974 {
6975     struct target_oabi_flock64 *target_fl;
6976     int l_type;
6977 
6978     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6979         return -TARGET_EFAULT;
6980     }
6981 
6982     __get_user(l_type, &target_fl->l_type);
6983     l_type = target_to_host_flock(l_type);
6984     if (l_type < 0) {
6985         return l_type;
6986     }
6987     fl->l_type = l_type;
6988     __get_user(fl->l_whence, &target_fl->l_whence);
6989     __get_user(fl->l_start, &target_fl->l_start);
6990     __get_user(fl->l_len, &target_fl->l_len);
6991     __get_user(fl->l_pid, &target_fl->l_pid);
6992     unlock_user_struct(target_fl, target_flock_addr, 0);
6993     return 0;
6994 }
6995 
6996 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6997                                                  const struct flock64 *fl)
6998 {
6999     struct target_oabi_flock64 *target_fl;
7000     short l_type;
7001 
7002     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7003         return -TARGET_EFAULT;
7004     }
7005 
7006     l_type = host_to_target_flock(fl->l_type);
7007     __put_user(l_type, &target_fl->l_type);
7008     __put_user(fl->l_whence, &target_fl->l_whence);
7009     __put_user(fl->l_start, &target_fl->l_start);
7010     __put_user(fl->l_len, &target_fl->l_len);
7011     __put_user(fl->l_pid, &target_fl->l_pid);
7012     unlock_user_struct(target_fl, target_flock_addr, 1);
7013     return 0;
7014 }
7015 #endif
7016 
7017 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7018                                               abi_ulong target_flock_addr)
7019 {
7020     struct target_flock64 *target_fl;
7021     int l_type;
7022 
7023     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7024         return -TARGET_EFAULT;
7025     }
7026 
7027     __get_user(l_type, &target_fl->l_type);
7028     l_type = target_to_host_flock(l_type);
7029     if (l_type < 0) {
7030         return l_type;
7031     }
7032     fl->l_type = l_type;
7033     __get_user(fl->l_whence, &target_fl->l_whence);
7034     __get_user(fl->l_start, &target_fl->l_start);
7035     __get_user(fl->l_len, &target_fl->l_len);
7036     __get_user(fl->l_pid, &target_fl->l_pid);
7037     unlock_user_struct(target_fl, target_flock_addr, 0);
7038     return 0;
7039 }
7040 
7041 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7042                                             const struct flock64 *fl)
7043 {
7044     struct target_flock64 *target_fl;
7045     short l_type;
7046 
7047     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7048         return -TARGET_EFAULT;
7049     }
7050 
7051     l_type = host_to_target_flock(fl->l_type);
7052     __put_user(l_type, &target_fl->l_type);
7053     __put_user(fl->l_whence, &target_fl->l_whence);
7054     __put_user(fl->l_start, &target_fl->l_start);
7055     __put_user(fl->l_len, &target_fl->l_len);
7056     __put_user(fl->l_pid, &target_fl->l_pid);
7057     unlock_user_struct(target_fl, target_flock_addr, 1);
7058     return 0;
7059 }
7060 
7061 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7062 {
7063     struct flock64 fl64;
7064 #ifdef F_GETOWN_EX
7065     struct f_owner_ex fox;
7066     struct target_f_owner_ex *target_fox;
7067 #endif
7068     abi_long ret;
7069     int host_cmd = target_to_host_fcntl_cmd(cmd);
7070 
7071     if (host_cmd == -TARGET_EINVAL)
7072 	    return host_cmd;
7073 
7074     switch(cmd) {
7075     case TARGET_F_GETLK:
7076         ret = copy_from_user_flock(&fl64, arg);
7077         if (ret) {
7078             return ret;
7079         }
7080         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7081         if (ret == 0) {
7082             ret = copy_to_user_flock(arg, &fl64);
7083         }
7084         break;
7085 
7086     case TARGET_F_SETLK:
7087     case TARGET_F_SETLKW:
7088         ret = copy_from_user_flock(&fl64, arg);
7089         if (ret) {
7090             return ret;
7091         }
7092         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7093         break;
7094 
7095     case TARGET_F_GETLK64:
7096     case TARGET_F_OFD_GETLK:
7097         ret = copy_from_user_flock64(&fl64, arg);
7098         if (ret) {
7099             return ret;
7100         }
7101         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7102         if (ret == 0) {
7103             ret = copy_to_user_flock64(arg, &fl64);
7104         }
7105         break;
7106     case TARGET_F_SETLK64:
7107     case TARGET_F_SETLKW64:
7108     case TARGET_F_OFD_SETLK:
7109     case TARGET_F_OFD_SETLKW:
7110         ret = copy_from_user_flock64(&fl64, arg);
7111         if (ret) {
7112             return ret;
7113         }
7114         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7115         break;
7116 
7117     case TARGET_F_GETFL:
7118         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7119         if (ret >= 0) {
7120             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7121         }
7122         break;
7123 
7124     case TARGET_F_SETFL:
7125         ret = get_errno(safe_fcntl(fd, host_cmd,
7126                                    target_to_host_bitmask(arg,
7127                                                           fcntl_flags_tbl)));
7128         break;
7129 
7130 #ifdef F_GETOWN_EX
7131     case TARGET_F_GETOWN_EX:
7132         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7133         if (ret >= 0) {
7134             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7135                 return -TARGET_EFAULT;
7136             target_fox->type = tswap32(fox.type);
7137             target_fox->pid = tswap32(fox.pid);
7138             unlock_user_struct(target_fox, arg, 1);
7139         }
7140         break;
7141 #endif
7142 
7143 #ifdef F_SETOWN_EX
7144     case TARGET_F_SETOWN_EX:
7145         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7146             return -TARGET_EFAULT;
7147         fox.type = tswap32(target_fox->type);
7148         fox.pid = tswap32(target_fox->pid);
7149         unlock_user_struct(target_fox, arg, 0);
7150         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7151         break;
7152 #endif
7153 
7154     case TARGET_F_SETSIG:
7155         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7156         break;
7157 
7158     case TARGET_F_GETSIG:
7159         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7160         break;
7161 
7162     case TARGET_F_SETOWN:
7163     case TARGET_F_GETOWN:
7164     case TARGET_F_SETLEASE:
7165     case TARGET_F_GETLEASE:
7166     case TARGET_F_SETPIPE_SZ:
7167     case TARGET_F_GETPIPE_SZ:
7168     case TARGET_F_ADD_SEALS:
7169     case TARGET_F_GET_SEALS:
7170         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7171         break;
7172 
7173     default:
7174         ret = get_errno(safe_fcntl(fd, cmd, arg));
7175         break;
7176     }
7177     return ret;
7178 }
7179 
7180 #ifdef USE_UID16
7181 
7182 static inline int high2lowuid(int uid)
7183 {
7184     if (uid > 65535)
7185         return 65534;
7186     else
7187         return uid;
7188 }
7189 
7190 static inline int high2lowgid(int gid)
7191 {
7192     if (gid > 65535)
7193         return 65534;
7194     else
7195         return gid;
7196 }
7197 
7198 static inline int low2highuid(int uid)
7199 {
7200     if ((int16_t)uid == -1)
7201         return -1;
7202     else
7203         return uid;
7204 }
7205 
7206 static inline int low2highgid(int gid)
7207 {
7208     if ((int16_t)gid == -1)
7209         return -1;
7210     else
7211         return gid;
7212 }
7213 static inline int tswapid(int id)
7214 {
7215     return tswap16(id);
7216 }
7217 
7218 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7219 
7220 #else /* !USE_UID16 */
7221 static inline int high2lowuid(int uid)
7222 {
7223     return uid;
7224 }
7225 static inline int high2lowgid(int gid)
7226 {
7227     return gid;
7228 }
7229 static inline int low2highuid(int uid)
7230 {
7231     return uid;
7232 }
7233 static inline int low2highgid(int gid)
7234 {
7235     return gid;
7236 }
7237 static inline int tswapid(int id)
7238 {
7239     return tswap32(id);
7240 }
7241 
7242 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7243 
7244 #endif /* USE_UID16 */
7245 
7246 /* We must do direct syscalls for setting UID/GID, because we want to
7247  * implement the Linux system call semantics of "change only for this thread",
7248  * not the libc/POSIX semantics of "change for all threads in process".
7249  * (See http://ewontfix.com/17/ for more details.)
7250  * We use the 32-bit version of the syscalls if present; if it is not
7251  * then either the host architecture supports 32-bit UIDs natively with
7252  * the standard syscall, or the 16-bit UID is the best we can do.
7253  */
7254 #ifdef __NR_setuid32
7255 #define __NR_sys_setuid __NR_setuid32
7256 #else
7257 #define __NR_sys_setuid __NR_setuid
7258 #endif
7259 #ifdef __NR_setgid32
7260 #define __NR_sys_setgid __NR_setgid32
7261 #else
7262 #define __NR_sys_setgid __NR_setgid
7263 #endif
7264 #ifdef __NR_setresuid32
7265 #define __NR_sys_setresuid __NR_setresuid32
7266 #else
7267 #define __NR_sys_setresuid __NR_setresuid
7268 #endif
7269 #ifdef __NR_setresgid32
7270 #define __NR_sys_setresgid __NR_setresgid32
7271 #else
7272 #define __NR_sys_setresgid __NR_setresgid
7273 #endif
7274 
7275 _syscall1(int, sys_setuid, uid_t, uid)
7276 _syscall1(int, sys_setgid, gid_t, gid)
7277 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7278 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7279 
7280 void syscall_init(void)
7281 {
7282     IOCTLEntry *ie;
7283     const argtype *arg_type;
7284     int size;
7285 
7286     thunk_init(STRUCT_MAX);
7287 
7288 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7289 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7290 #include "syscall_types.h"
7291 #undef STRUCT
7292 #undef STRUCT_SPECIAL
7293 
7294     /* we patch the ioctl size if necessary. We rely on the fact that
7295        no ioctl has all the bits at '1' in the size field */
7296     ie = ioctl_entries;
7297     while (ie->target_cmd != 0) {
7298         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7299             TARGET_IOC_SIZEMASK) {
7300             arg_type = ie->arg_type;
7301             if (arg_type[0] != TYPE_PTR) {
7302                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7303                         ie->target_cmd);
7304                 exit(1);
7305             }
7306             arg_type++;
7307             size = thunk_type_size(arg_type, 0);
7308             ie->target_cmd = (ie->target_cmd &
7309                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7310                 (size << TARGET_IOC_SIZESHIFT);
7311         }
7312 
7313         /* automatic consistency check if same arch */
7314 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7315     (defined(__x86_64__) && defined(TARGET_X86_64))
7316         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7317             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7318                     ie->name, ie->target_cmd, ie->host_cmd);
7319         }
7320 #endif
7321         ie++;
7322     }
7323 }
7324 
7325 #ifdef TARGET_NR_truncate64
7326 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7327                                          abi_long arg2,
7328                                          abi_long arg3,
7329                                          abi_long arg4)
7330 {
7331     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7332         arg2 = arg3;
7333         arg3 = arg4;
7334     }
7335     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7336 }
7337 #endif
7338 
7339 #ifdef TARGET_NR_ftruncate64
7340 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7341                                           abi_long arg2,
7342                                           abi_long arg3,
7343                                           abi_long arg4)
7344 {
7345     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7346         arg2 = arg3;
7347         arg3 = arg4;
7348     }
7349     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7350 }
7351 #endif
7352 
7353 #if defined(TARGET_NR_timer_settime) || \
7354     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7355 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7356                                                  abi_ulong target_addr)
7357 {
7358     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7359                                 offsetof(struct target_itimerspec,
7360                                          it_interval)) ||
7361         target_to_host_timespec(&host_its->it_value, target_addr +
7362                                 offsetof(struct target_itimerspec,
7363                                          it_value))) {
7364         return -TARGET_EFAULT;
7365     }
7366 
7367     return 0;
7368 }
7369 #endif
7370 
7371 #if defined(TARGET_NR_timer_settime64) || \
7372     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7373 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7374                                                    abi_ulong target_addr)
7375 {
7376     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7377                                   offsetof(struct target__kernel_itimerspec,
7378                                            it_interval)) ||
7379         target_to_host_timespec64(&host_its->it_value, target_addr +
7380                                   offsetof(struct target__kernel_itimerspec,
7381                                            it_value))) {
7382         return -TARGET_EFAULT;
7383     }
7384 
7385     return 0;
7386 }
7387 #endif
7388 
7389 #if ((defined(TARGET_NR_timerfd_gettime) || \
7390       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7391       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7392 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7393                                                  struct itimerspec *host_its)
7394 {
7395     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7396                                                        it_interval),
7397                                 &host_its->it_interval) ||
7398         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7399                                                        it_value),
7400                                 &host_its->it_value)) {
7401         return -TARGET_EFAULT;
7402     }
7403     return 0;
7404 }
7405 #endif
7406 
7407 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7408       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7409       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7410 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7411                                                    struct itimerspec *host_its)
7412 {
7413     if (host_to_target_timespec64(target_addr +
7414                                   offsetof(struct target__kernel_itimerspec,
7415                                            it_interval),
7416                                   &host_its->it_interval) ||
7417         host_to_target_timespec64(target_addr +
7418                                   offsetof(struct target__kernel_itimerspec,
7419                                            it_value),
7420                                   &host_its->it_value)) {
7421         return -TARGET_EFAULT;
7422     }
7423     return 0;
7424 }
7425 #endif
7426 
7427 #if defined(TARGET_NR_adjtimex) || \
7428     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7429 static inline abi_long target_to_host_timex(struct timex *host_tx,
7430                                             abi_long target_addr)
7431 {
7432     struct target_timex *target_tx;
7433 
7434     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7435         return -TARGET_EFAULT;
7436     }
7437 
7438     __get_user(host_tx->modes, &target_tx->modes);
7439     __get_user(host_tx->offset, &target_tx->offset);
7440     __get_user(host_tx->freq, &target_tx->freq);
7441     __get_user(host_tx->maxerror, &target_tx->maxerror);
7442     __get_user(host_tx->esterror, &target_tx->esterror);
7443     __get_user(host_tx->status, &target_tx->status);
7444     __get_user(host_tx->constant, &target_tx->constant);
7445     __get_user(host_tx->precision, &target_tx->precision);
7446     __get_user(host_tx->tolerance, &target_tx->tolerance);
7447     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7448     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7449     __get_user(host_tx->tick, &target_tx->tick);
7450     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7451     __get_user(host_tx->jitter, &target_tx->jitter);
7452     __get_user(host_tx->shift, &target_tx->shift);
7453     __get_user(host_tx->stabil, &target_tx->stabil);
7454     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7455     __get_user(host_tx->calcnt, &target_tx->calcnt);
7456     __get_user(host_tx->errcnt, &target_tx->errcnt);
7457     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7458     __get_user(host_tx->tai, &target_tx->tai);
7459 
7460     unlock_user_struct(target_tx, target_addr, 0);
7461     return 0;
7462 }
7463 
7464 static inline abi_long host_to_target_timex(abi_long target_addr,
7465                                             struct timex *host_tx)
7466 {
7467     struct target_timex *target_tx;
7468 
7469     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7470         return -TARGET_EFAULT;
7471     }
7472 
7473     __put_user(host_tx->modes, &target_tx->modes);
7474     __put_user(host_tx->offset, &target_tx->offset);
7475     __put_user(host_tx->freq, &target_tx->freq);
7476     __put_user(host_tx->maxerror, &target_tx->maxerror);
7477     __put_user(host_tx->esterror, &target_tx->esterror);
7478     __put_user(host_tx->status, &target_tx->status);
7479     __put_user(host_tx->constant, &target_tx->constant);
7480     __put_user(host_tx->precision, &target_tx->precision);
7481     __put_user(host_tx->tolerance, &target_tx->tolerance);
7482     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7483     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7484     __put_user(host_tx->tick, &target_tx->tick);
7485     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7486     __put_user(host_tx->jitter, &target_tx->jitter);
7487     __put_user(host_tx->shift, &target_tx->shift);
7488     __put_user(host_tx->stabil, &target_tx->stabil);
7489     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7490     __put_user(host_tx->calcnt, &target_tx->calcnt);
7491     __put_user(host_tx->errcnt, &target_tx->errcnt);
7492     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7493     __put_user(host_tx->tai, &target_tx->tai);
7494 
7495     unlock_user_struct(target_tx, target_addr, 1);
7496     return 0;
7497 }
7498 #endif
7499 
7500 
7501 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7502 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7503                                               abi_long target_addr)
7504 {
7505     struct target__kernel_timex *target_tx;
7506 
7507     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7508                                  offsetof(struct target__kernel_timex,
7509                                           time))) {
7510         return -TARGET_EFAULT;
7511     }
7512 
7513     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7514         return -TARGET_EFAULT;
7515     }
7516 
7517     __get_user(host_tx->modes, &target_tx->modes);
7518     __get_user(host_tx->offset, &target_tx->offset);
7519     __get_user(host_tx->freq, &target_tx->freq);
7520     __get_user(host_tx->maxerror, &target_tx->maxerror);
7521     __get_user(host_tx->esterror, &target_tx->esterror);
7522     __get_user(host_tx->status, &target_tx->status);
7523     __get_user(host_tx->constant, &target_tx->constant);
7524     __get_user(host_tx->precision, &target_tx->precision);
7525     __get_user(host_tx->tolerance, &target_tx->tolerance);
7526     __get_user(host_tx->tick, &target_tx->tick);
7527     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7528     __get_user(host_tx->jitter, &target_tx->jitter);
7529     __get_user(host_tx->shift, &target_tx->shift);
7530     __get_user(host_tx->stabil, &target_tx->stabil);
7531     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7532     __get_user(host_tx->calcnt, &target_tx->calcnt);
7533     __get_user(host_tx->errcnt, &target_tx->errcnt);
7534     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7535     __get_user(host_tx->tai, &target_tx->tai);
7536 
7537     unlock_user_struct(target_tx, target_addr, 0);
7538     return 0;
7539 }
7540 
7541 static inline abi_long host_to_target_timex64(abi_long target_addr,
7542                                               struct timex *host_tx)
7543 {
7544     struct target__kernel_timex *target_tx;
7545 
7546    if (copy_to_user_timeval64(target_addr +
7547                               offsetof(struct target__kernel_timex, time),
7548                               &host_tx->time)) {
7549         return -TARGET_EFAULT;
7550     }
7551 
7552     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7553         return -TARGET_EFAULT;
7554     }
7555 
7556     __put_user(host_tx->modes, &target_tx->modes);
7557     __put_user(host_tx->offset, &target_tx->offset);
7558     __put_user(host_tx->freq, &target_tx->freq);
7559     __put_user(host_tx->maxerror, &target_tx->maxerror);
7560     __put_user(host_tx->esterror, &target_tx->esterror);
7561     __put_user(host_tx->status, &target_tx->status);
7562     __put_user(host_tx->constant, &target_tx->constant);
7563     __put_user(host_tx->precision, &target_tx->precision);
7564     __put_user(host_tx->tolerance, &target_tx->tolerance);
7565     __put_user(host_tx->tick, &target_tx->tick);
7566     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7567     __put_user(host_tx->jitter, &target_tx->jitter);
7568     __put_user(host_tx->shift, &target_tx->shift);
7569     __put_user(host_tx->stabil, &target_tx->stabil);
7570     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7571     __put_user(host_tx->calcnt, &target_tx->calcnt);
7572     __put_user(host_tx->errcnt, &target_tx->errcnt);
7573     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7574     __put_user(host_tx->tai, &target_tx->tai);
7575 
7576     unlock_user_struct(target_tx, target_addr, 1);
7577     return 0;
7578 }
7579 #endif
7580 
7581 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7582 #define sigev_notify_thread_id _sigev_un._tid
7583 #endif
7584 
7585 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7586                                                abi_ulong target_addr)
7587 {
7588     struct target_sigevent *target_sevp;
7589 
7590     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7591         return -TARGET_EFAULT;
7592     }
7593 
7594     /* This union is awkward on 64 bit systems because it has a 32 bit
7595      * integer and a pointer in it; we follow the conversion approach
7596      * used for handling sigval types in signal.c so the guest should get
7597      * the correct value back even if we did a 64 bit byteswap and it's
7598      * using the 32 bit integer.
7599      */
7600     host_sevp->sigev_value.sival_ptr =
7601         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7602     host_sevp->sigev_signo =
7603         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7604     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7605     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7606 
7607     unlock_user_struct(target_sevp, target_addr, 1);
7608     return 0;
7609 }
7610 
7611 #if defined(TARGET_NR_mlockall)
7612 static inline int target_to_host_mlockall_arg(int arg)
7613 {
7614     int result = 0;
7615 
7616     if (arg & TARGET_MCL_CURRENT) {
7617         result |= MCL_CURRENT;
7618     }
7619     if (arg & TARGET_MCL_FUTURE) {
7620         result |= MCL_FUTURE;
7621     }
7622 #ifdef MCL_ONFAULT
7623     if (arg & TARGET_MCL_ONFAULT) {
7624         result |= MCL_ONFAULT;
7625     }
7626 #endif
7627 
7628     return result;
7629 }
7630 #endif
7631 
7632 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7633      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7634      defined(TARGET_NR_newfstatat))
7635 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7636                                              abi_ulong target_addr,
7637                                              struct stat *host_st)
7638 {
7639 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7640     if (cpu_env->eabi) {
7641         struct target_eabi_stat64 *target_st;
7642 
7643         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7644             return -TARGET_EFAULT;
7645         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7646         __put_user(host_st->st_dev, &target_st->st_dev);
7647         __put_user(host_st->st_ino, &target_st->st_ino);
7648 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7649         __put_user(host_st->st_ino, &target_st->__st_ino);
7650 #endif
7651         __put_user(host_st->st_mode, &target_st->st_mode);
7652         __put_user(host_st->st_nlink, &target_st->st_nlink);
7653         __put_user(host_st->st_uid, &target_st->st_uid);
7654         __put_user(host_st->st_gid, &target_st->st_gid);
7655         __put_user(host_st->st_rdev, &target_st->st_rdev);
7656         __put_user(host_st->st_size, &target_st->st_size);
7657         __put_user(host_st->st_blksize, &target_st->st_blksize);
7658         __put_user(host_st->st_blocks, &target_st->st_blocks);
7659         __put_user(host_st->st_atime, &target_st->target_st_atime);
7660         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7661         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7662 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7663         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7664         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7665         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7666 #endif
7667         unlock_user_struct(target_st, target_addr, 1);
7668     } else
7669 #endif
7670     {
7671 #if defined(TARGET_HAS_STRUCT_STAT64)
7672         struct target_stat64 *target_st;
7673 #else
7674         struct target_stat *target_st;
7675 #endif
7676 
7677         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7678             return -TARGET_EFAULT;
7679         memset(target_st, 0, sizeof(*target_st));
7680         __put_user(host_st->st_dev, &target_st->st_dev);
7681         __put_user(host_st->st_ino, &target_st->st_ino);
7682 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7683         __put_user(host_st->st_ino, &target_st->__st_ino);
7684 #endif
7685         __put_user(host_st->st_mode, &target_st->st_mode);
7686         __put_user(host_st->st_nlink, &target_st->st_nlink);
7687         __put_user(host_st->st_uid, &target_st->st_uid);
7688         __put_user(host_st->st_gid, &target_st->st_gid);
7689         __put_user(host_st->st_rdev, &target_st->st_rdev);
7690         /* XXX: better use of kernel struct */
7691         __put_user(host_st->st_size, &target_st->st_size);
7692         __put_user(host_st->st_blksize, &target_st->st_blksize);
7693         __put_user(host_st->st_blocks, &target_st->st_blocks);
7694         __put_user(host_st->st_atime, &target_st->target_st_atime);
7695         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7696         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7697 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7698         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7699         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7700         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7701 #endif
7702         unlock_user_struct(target_st, target_addr, 1);
7703     }
7704 
7705     return 0;
7706 }
7707 #endif
7708 
7709 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7710 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7711                                             abi_ulong target_addr)
7712 {
7713     struct target_statx *target_stx;
7714 
7715     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7716         return -TARGET_EFAULT;
7717     }
7718     memset(target_stx, 0, sizeof(*target_stx));
7719 
7720     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7721     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7722     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7723     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7724     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7725     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7726     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7727     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7728     __put_user(host_stx->stx_size, &target_stx->stx_size);
7729     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7730     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7731     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7732     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7733     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7734     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7735     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7736     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7737     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7738     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7739     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7740     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7741     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7742     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7743 
7744     unlock_user_struct(target_stx, target_addr, 1);
7745 
7746     return 0;
7747 }
7748 #endif
7749 
7750 static int do_sys_futex(int *uaddr, int op, int val,
7751                          const struct timespec *timeout, int *uaddr2,
7752                          int val3)
7753 {
7754 #if HOST_LONG_BITS == 64
7755 #if defined(__NR_futex)
7756     /* always a 64-bit time_t, it doesn't define _time64 version  */
7757     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7758 
7759 #endif
7760 #else /* HOST_LONG_BITS == 64 */
7761 #if defined(__NR_futex_time64)
7762     if (sizeof(timeout->tv_sec) == 8) {
7763         /* _time64 function on 32bit arch */
7764         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7765     }
7766 #endif
7767 #if defined(__NR_futex)
7768     /* old function on 32bit arch */
7769     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7770 #endif
7771 #endif /* HOST_LONG_BITS == 64 */
7772     g_assert_not_reached();
7773 }
7774 
7775 static int do_safe_futex(int *uaddr, int op, int val,
7776                          const struct timespec *timeout, int *uaddr2,
7777                          int val3)
7778 {
7779 #if HOST_LONG_BITS == 64
7780 #if defined(__NR_futex)
7781     /* always a 64-bit time_t, it doesn't define _time64 version  */
7782     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7783 #endif
7784 #else /* HOST_LONG_BITS == 64 */
7785 #if defined(__NR_futex_time64)
7786     if (sizeof(timeout->tv_sec) == 8) {
7787         /* _time64 function on 32bit arch */
7788         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7789                                            val3));
7790     }
7791 #endif
7792 #if defined(__NR_futex)
7793     /* old function on 32bit arch */
7794     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7795 #endif
7796 #endif /* HOST_LONG_BITS == 64 */
7797     return -TARGET_ENOSYS;
7798 }
7799 
7800 /* ??? Using host futex calls even when target atomic operations
7801    are not really atomic probably breaks things.  However implementing
7802    futexes locally would make futexes shared between multiple processes
7803    tricky.  However they're probably useless because guest atomic
7804    operations won't work either.  */
7805 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7806 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7807                     int op, int val, target_ulong timeout,
7808                     target_ulong uaddr2, int val3)
7809 {
7810     struct timespec ts, *pts = NULL;
7811     void *haddr2 = NULL;
7812     int base_op;
7813 
7814     /* We assume FUTEX_* constants are the same on both host and target. */
7815 #ifdef FUTEX_CMD_MASK
7816     base_op = op & FUTEX_CMD_MASK;
7817 #else
7818     base_op = op;
7819 #endif
7820     switch (base_op) {
7821     case FUTEX_WAIT:
7822     case FUTEX_WAIT_BITSET:
7823         val = tswap32(val);
7824         break;
7825     case FUTEX_WAIT_REQUEUE_PI:
7826         val = tswap32(val);
7827         haddr2 = g2h(cpu, uaddr2);
7828         break;
7829     case FUTEX_LOCK_PI:
7830     case FUTEX_LOCK_PI2:
7831         break;
7832     case FUTEX_WAKE:
7833     case FUTEX_WAKE_BITSET:
7834     case FUTEX_TRYLOCK_PI:
7835     case FUTEX_UNLOCK_PI:
7836         timeout = 0;
7837         break;
7838     case FUTEX_FD:
7839         val = target_to_host_signal(val);
7840         timeout = 0;
7841         break;
7842     case FUTEX_CMP_REQUEUE:
7843     case FUTEX_CMP_REQUEUE_PI:
7844         val3 = tswap32(val3);
7845         /* fall through */
7846     case FUTEX_REQUEUE:
7847     case FUTEX_WAKE_OP:
7848         /*
7849          * For these, the 4th argument is not TIMEOUT, but VAL2.
7850          * But the prototype of do_safe_futex takes a pointer, so
7851          * insert casts to satisfy the compiler.  We do not need
7852          * to tswap VAL2 since it's not compared to guest memory.
7853           */
7854         pts = (struct timespec *)(uintptr_t)timeout;
7855         timeout = 0;
7856         haddr2 = g2h(cpu, uaddr2);
7857         break;
7858     default:
7859         return -TARGET_ENOSYS;
7860     }
7861     if (timeout) {
7862         pts = &ts;
7863         if (time64
7864             ? target_to_host_timespec64(pts, timeout)
7865             : target_to_host_timespec(pts, timeout)) {
7866             return -TARGET_EFAULT;
7867         }
7868     }
7869     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7870 }
7871 #endif
7872 
7873 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7874 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7875                                      abi_long handle, abi_long mount_id,
7876                                      abi_long flags)
7877 {
7878     struct file_handle *target_fh;
7879     struct file_handle *fh;
7880     int mid = 0;
7881     abi_long ret;
7882     char *name;
7883     unsigned int size, total_size;
7884 
7885     if (get_user_s32(size, handle)) {
7886         return -TARGET_EFAULT;
7887     }
7888 
7889     name = lock_user_string(pathname);
7890     if (!name) {
7891         return -TARGET_EFAULT;
7892     }
7893 
7894     total_size = sizeof(struct file_handle) + size;
7895     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7896     if (!target_fh) {
7897         unlock_user(name, pathname, 0);
7898         return -TARGET_EFAULT;
7899     }
7900 
7901     fh = g_malloc0(total_size);
7902     fh->handle_bytes = size;
7903 
7904     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7905     unlock_user(name, pathname, 0);
7906 
7907     /* man name_to_handle_at(2):
7908      * Other than the use of the handle_bytes field, the caller should treat
7909      * the file_handle structure as an opaque data type
7910      */
7911 
7912     memcpy(target_fh, fh, total_size);
7913     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7914     target_fh->handle_type = tswap32(fh->handle_type);
7915     g_free(fh);
7916     unlock_user(target_fh, handle, total_size);
7917 
7918     if (put_user_s32(mid, mount_id)) {
7919         return -TARGET_EFAULT;
7920     }
7921 
7922     return ret;
7923 
7924 }
7925 #endif
7926 
7927 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7928 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7929                                      abi_long flags)
7930 {
7931     struct file_handle *target_fh;
7932     struct file_handle *fh;
7933     unsigned int size, total_size;
7934     abi_long ret;
7935 
7936     if (get_user_s32(size, handle)) {
7937         return -TARGET_EFAULT;
7938     }
7939 
7940     total_size = sizeof(struct file_handle) + size;
7941     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7942     if (!target_fh) {
7943         return -TARGET_EFAULT;
7944     }
7945 
7946     fh = g_memdup(target_fh, total_size);
7947     fh->handle_bytes = size;
7948     fh->handle_type = tswap32(target_fh->handle_type);
7949 
7950     ret = get_errno(open_by_handle_at(mount_fd, fh,
7951                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7952 
7953     g_free(fh);
7954 
7955     unlock_user(target_fh, handle, total_size);
7956 
7957     return ret;
7958 }
7959 #endif
7960 
7961 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7962 
7963 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7964 {
7965     int host_flags;
7966     target_sigset_t *target_mask;
7967     sigset_t host_mask;
7968     abi_long ret;
7969 
7970     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7971         return -TARGET_EINVAL;
7972     }
7973     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7974         return -TARGET_EFAULT;
7975     }
7976 
7977     target_to_host_sigset(&host_mask, target_mask);
7978 
7979     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7980 
7981     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7982     if (ret >= 0) {
7983         fd_trans_register(ret, &target_signalfd_trans);
7984     }
7985 
7986     unlock_user_struct(target_mask, mask, 0);
7987 
7988     return ret;
7989 }
7990 #endif
7991 
7992 /* Map host to target signal numbers for the wait family of syscalls.
7993    Assume all other status bits are the same.  */
7994 int host_to_target_waitstatus(int status)
7995 {
7996     if (WIFSIGNALED(status)) {
7997         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7998     }
7999     if (WIFSTOPPED(status)) {
8000         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8001                | (status & 0xff);
8002     }
8003     return status;
8004 }
8005 
8006 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8007 {
8008     CPUState *cpu = env_cpu(cpu_env);
8009     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8010     int i;
8011 
8012     for (i = 0; i < bprm->argc; i++) {
8013         size_t len = strlen(bprm->argv[i]) + 1;
8014 
8015         if (write(fd, bprm->argv[i], len) != len) {
8016             return -1;
8017         }
8018     }
8019 
8020     return 0;
8021 }
8022 
8023 static int open_self_maps(CPUArchState *cpu_env, int fd)
8024 {
8025     CPUState *cpu = env_cpu(cpu_env);
8026     TaskState *ts = cpu->opaque;
8027     GSList *map_info = read_self_maps();
8028     GSList *s;
8029     int count;
8030 
8031     for (s = map_info; s; s = g_slist_next(s)) {
8032         MapInfo *e = (MapInfo *) s->data;
8033 
8034         if (h2g_valid(e->start)) {
8035             unsigned long min = e->start;
8036             unsigned long max = e->end;
8037             int flags = page_get_flags(h2g(min));
8038             const char *path;
8039 
8040             max = h2g_valid(max - 1) ?
8041                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8042 
8043             if (page_check_range(h2g(min), max - min, flags) == -1) {
8044                 continue;
8045             }
8046 
8047 #ifdef TARGET_HPPA
8048             if (h2g(max) == ts->info->stack_limit) {
8049 #else
8050             if (h2g(min) == ts->info->stack_limit) {
8051 #endif
8052                 path = "[stack]";
8053             } else {
8054                 path = e->path;
8055             }
8056 
8057             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8058                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8059                             h2g(min), h2g(max - 1) + 1,
8060                             (flags & PAGE_READ) ? 'r' : '-',
8061                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8062                             (flags & PAGE_EXEC) ? 'x' : '-',
8063                             e->is_priv ? 'p' : 's',
8064                             (uint64_t) e->offset, e->dev, e->inode);
8065             if (path) {
8066                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8067             } else {
8068                 dprintf(fd, "\n");
8069             }
8070         }
8071     }
8072 
8073     free_self_maps(map_info);
8074 
8075 #ifdef TARGET_VSYSCALL_PAGE
8076     /*
8077      * We only support execution from the vsyscall page.
8078      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8079      */
8080     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8081                     " --xp 00000000 00:00 0",
8082                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8083     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8084 #endif
8085 
8086     return 0;
8087 }
8088 
8089 static int open_self_stat(CPUArchState *cpu_env, int fd)
8090 {
8091     CPUState *cpu = env_cpu(cpu_env);
8092     TaskState *ts = cpu->opaque;
8093     g_autoptr(GString) buf = g_string_new(NULL);
8094     int i;
8095 
8096     for (i = 0; i < 44; i++) {
8097         if (i == 0) {
8098             /* pid */
8099             g_string_printf(buf, FMT_pid " ", getpid());
8100         } else if (i == 1) {
8101             /* app name */
8102             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8103             bin = bin ? bin + 1 : ts->bprm->argv[0];
8104             g_string_printf(buf, "(%.15s) ", bin);
8105         } else if (i == 3) {
8106             /* ppid */
8107             g_string_printf(buf, FMT_pid " ", getppid());
8108         } else if (i == 21) {
8109             /* starttime */
8110             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8111         } else if (i == 27) {
8112             /* stack bottom */
8113             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8114         } else {
8115             /* for the rest, there is MasterCard */
8116             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8117         }
8118 
8119         if (write(fd, buf->str, buf->len) != buf->len) {
8120             return -1;
8121         }
8122     }
8123 
8124     return 0;
8125 }
8126 
8127 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8128 {
8129     CPUState *cpu = env_cpu(cpu_env);
8130     TaskState *ts = cpu->opaque;
8131     abi_ulong auxv = ts->info->saved_auxv;
8132     abi_ulong len = ts->info->auxv_len;
8133     char *ptr;
8134 
8135     /*
8136      * Auxiliary vector is stored in target process stack.
8137      * read in whole auxv vector and copy it to file
8138      */
8139     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8140     if (ptr != NULL) {
8141         while (len > 0) {
8142             ssize_t r;
8143             r = write(fd, ptr, len);
8144             if (r <= 0) {
8145                 break;
8146             }
8147             len -= r;
8148             ptr += r;
8149         }
8150         lseek(fd, 0, SEEK_SET);
8151         unlock_user(ptr, auxv, len);
8152     }
8153 
8154     return 0;
8155 }
8156 
8157 static int is_proc_myself(const char *filename, const char *entry)
8158 {
8159     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8160         filename += strlen("/proc/");
8161         if (!strncmp(filename, "self/", strlen("self/"))) {
8162             filename += strlen("self/");
8163         } else if (*filename >= '1' && *filename <= '9') {
8164             char myself[80];
8165             snprintf(myself, sizeof(myself), "%d/", getpid());
8166             if (!strncmp(filename, myself, strlen(myself))) {
8167                 filename += strlen(myself);
8168             } else {
8169                 return 0;
8170             }
8171         } else {
8172             return 0;
8173         }
8174         if (!strcmp(filename, entry)) {
8175             return 1;
8176         }
8177     }
8178     return 0;
8179 }
8180 
8181 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8182                       const char *fmt, int code)
8183 {
8184     if (logfile) {
8185         CPUState *cs = env_cpu(env);
8186 
8187         fprintf(logfile, fmt, code);
8188         fprintf(logfile, "Failing executable: %s\n", exec_path);
8189         cpu_dump_state(cs, logfile, 0);
8190         open_self_maps(env, fileno(logfile));
8191     }
8192 }
8193 
8194 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8195 {
8196     /* dump to console */
8197     excp_dump_file(stderr, env, fmt, code);
8198 
8199     /* dump to log file */
8200     if (qemu_log_separate()) {
8201         FILE *logfile = qemu_log_trylock();
8202 
8203         excp_dump_file(logfile, env, fmt, code);
8204         qemu_log_unlock(logfile);
8205     }
8206 }
8207 
8208 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8209     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8210 static int is_proc(const char *filename, const char *entry)
8211 {
8212     return strcmp(filename, entry) == 0;
8213 }
8214 #endif
8215 
8216 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8217 static int open_net_route(CPUArchState *cpu_env, int fd)
8218 {
8219     FILE *fp;
8220     char *line = NULL;
8221     size_t len = 0;
8222     ssize_t read;
8223 
8224     fp = fopen("/proc/net/route", "r");
8225     if (fp == NULL) {
8226         return -1;
8227     }
8228 
8229     /* read header */
8230 
8231     read = getline(&line, &len, fp);
8232     dprintf(fd, "%s", line);
8233 
8234     /* read routes */
8235 
8236     while ((read = getline(&line, &len, fp)) != -1) {
8237         char iface[16];
8238         uint32_t dest, gw, mask;
8239         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8240         int fields;
8241 
8242         fields = sscanf(line,
8243                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8244                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8245                         &mask, &mtu, &window, &irtt);
8246         if (fields != 11) {
8247             continue;
8248         }
8249         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8250                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8251                 metric, tswap32(mask), mtu, window, irtt);
8252     }
8253 
8254     free(line);
8255     fclose(fp);
8256 
8257     return 0;
8258 }
8259 #endif
8260 
8261 #if defined(TARGET_SPARC)
8262 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8263 {
8264     dprintf(fd, "type\t\t: sun4u\n");
8265     return 0;
8266 }
8267 #endif
8268 
8269 #if defined(TARGET_HPPA)
8270 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8271 {
8272     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8273     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8274     dprintf(fd, "capabilities\t: os32\n");
8275     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8276     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8277     return 0;
8278 }
8279 #endif
8280 
8281 #if defined(TARGET_M68K)
8282 static int open_hardware(CPUArchState *cpu_env, int fd)
8283 {
8284     dprintf(fd, "Model:\t\tqemu-m68k\n");
8285     return 0;
8286 }
8287 #endif
8288 
8289 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8290 {
8291     struct fake_open {
8292         const char *filename;
8293         int (*fill)(CPUArchState *cpu_env, int fd);
8294         int (*cmp)(const char *s1, const char *s2);
8295     };
8296     const struct fake_open *fake_open;
8297     static const struct fake_open fakes[] = {
8298         { "maps", open_self_maps, is_proc_myself },
8299         { "stat", open_self_stat, is_proc_myself },
8300         { "auxv", open_self_auxv, is_proc_myself },
8301         { "cmdline", open_self_cmdline, is_proc_myself },
8302 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8303         { "/proc/net/route", open_net_route, is_proc },
8304 #endif
8305 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8306         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8307 #endif
8308 #if defined(TARGET_M68K)
8309         { "/proc/hardware", open_hardware, is_proc },
8310 #endif
8311         { NULL, NULL, NULL }
8312     };
8313 
8314     if (is_proc_myself(pathname, "exe")) {
8315         return safe_openat(dirfd, exec_path, flags, mode);
8316     }
8317 
8318     for (fake_open = fakes; fake_open->filename; fake_open++) {
8319         if (fake_open->cmp(pathname, fake_open->filename)) {
8320             break;
8321         }
8322     }
8323 
8324     if (fake_open->filename) {
8325         const char *tmpdir;
8326         char filename[PATH_MAX];
8327         int fd, r;
8328 
8329         fd = memfd_create("qemu-open", 0);
8330         if (fd < 0) {
8331             if (errno != ENOSYS) {
8332                 return fd;
8333             }
8334             /* create temporary file to map stat to */
8335             tmpdir = getenv("TMPDIR");
8336             if (!tmpdir)
8337                 tmpdir = "/tmp";
8338             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8339             fd = mkstemp(filename);
8340             if (fd < 0) {
8341                 return fd;
8342             }
8343             unlink(filename);
8344         }
8345 
8346         if ((r = fake_open->fill(cpu_env, fd))) {
8347             int e = errno;
8348             close(fd);
8349             errno = e;
8350             return r;
8351         }
8352         lseek(fd, 0, SEEK_SET);
8353 
8354         return fd;
8355     }
8356 
8357     return safe_openat(dirfd, path(pathname), flags, mode);
8358 }
8359 
8360 static int do_execve(CPUArchState *cpu_env,
8361                        abi_long pathname, abi_long guest_argp,
8362                        abi_long guest_envp)
8363 {
8364     int ret;
8365     char **argp, **envp;
8366     int argc, envc;
8367     abi_ulong gp;
8368     abi_ulong addr;
8369     char **q;
8370     void *p;
8371 
8372     argc = 0;
8373 
8374     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8375         if (get_user_ual(addr, gp)) {
8376             return -TARGET_EFAULT;
8377         }
8378         if (!addr) {
8379             break;
8380         }
8381         argc++;
8382     }
8383     envc = 0;
8384     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8385         if (get_user_ual(addr, gp)) {
8386             return -TARGET_EFAULT;
8387         }
8388         if (!addr) {
8389             break;
8390         }
8391         envc++;
8392     }
8393 
8394     argp = g_new0(char *, argc + 1);
8395     envp = g_new0(char *, envc + 1);
8396 
8397     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8398         if (get_user_ual(addr, gp)) {
8399             goto execve_efault;
8400         }
8401         if (!addr) {
8402             break;
8403         }
8404         *q = lock_user_string(addr);
8405         if (!*q) {
8406             goto execve_efault;
8407         }
8408     }
8409     *q = NULL;
8410 
8411     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8412         if (get_user_ual(addr, gp)) {
8413             goto execve_efault;
8414         }
8415         if (!addr) {
8416             break;
8417         }
8418         *q = lock_user_string(addr);
8419         if (!*q) {
8420             goto execve_efault;
8421         }
8422     }
8423     *q = NULL;
8424 
8425     /*
8426      * Although execve() is not an interruptible syscall it is
8427      * a special case where we must use the safe_syscall wrapper:
8428      * if we allow a signal to happen before we make the host
8429      * syscall then we will 'lose' it, because at the point of
8430      * execve the process leaves QEMU's control. So we use the
8431      * safe syscall wrapper to ensure that we either take the
8432      * signal as a guest signal, or else it does not happen
8433      * before the execve completes and makes it the other
8434      * program's problem.
8435      */
8436     p = lock_user_string(pathname);
8437     if (!p) {
8438         goto execve_efault;
8439     }
8440 
8441     if (is_proc_myself(p, "exe")) {
8442         ret = get_errno(safe_execve(exec_path, argp, envp));
8443     } else {
8444         ret = get_errno(safe_execve(p, argp, envp));
8445     }
8446 
8447     unlock_user(p, pathname, 0);
8448 
8449     goto execve_end;
8450 
8451 execve_efault:
8452     ret = -TARGET_EFAULT;
8453 
8454 execve_end:
8455     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8456         if (get_user_ual(addr, gp) || !addr) {
8457             break;
8458         }
8459         unlock_user(*q, addr, 0);
8460     }
8461     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8462         if (get_user_ual(addr, gp) || !addr) {
8463             break;
8464         }
8465         unlock_user(*q, addr, 0);
8466     }
8467 
8468     g_free(argp);
8469     g_free(envp);
8470     return ret;
8471 }
8472 
8473 #define TIMER_MAGIC 0x0caf0000
8474 #define TIMER_MAGIC_MASK 0xffff0000
8475 
8476 /* Convert QEMU provided timer ID back to internal 16bit index format */
8477 static target_timer_t get_timer_id(abi_long arg)
8478 {
8479     target_timer_t timerid = arg;
8480 
8481     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8482         return -TARGET_EINVAL;
8483     }
8484 
8485     timerid &= 0xffff;
8486 
8487     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8488         return -TARGET_EINVAL;
8489     }
8490 
8491     return timerid;
8492 }
8493 
8494 static int target_to_host_cpu_mask(unsigned long *host_mask,
8495                                    size_t host_size,
8496                                    abi_ulong target_addr,
8497                                    size_t target_size)
8498 {
8499     unsigned target_bits = sizeof(abi_ulong) * 8;
8500     unsigned host_bits = sizeof(*host_mask) * 8;
8501     abi_ulong *target_mask;
8502     unsigned i, j;
8503 
8504     assert(host_size >= target_size);
8505 
8506     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8507     if (!target_mask) {
8508         return -TARGET_EFAULT;
8509     }
8510     memset(host_mask, 0, host_size);
8511 
8512     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8513         unsigned bit = i * target_bits;
8514         abi_ulong val;
8515 
8516         __get_user(val, &target_mask[i]);
8517         for (j = 0; j < target_bits; j++, bit++) {
8518             if (val & (1UL << j)) {
8519                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8520             }
8521         }
8522     }
8523 
8524     unlock_user(target_mask, target_addr, 0);
8525     return 0;
8526 }
8527 
8528 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8529                                    size_t host_size,
8530                                    abi_ulong target_addr,
8531                                    size_t target_size)
8532 {
8533     unsigned target_bits = sizeof(abi_ulong) * 8;
8534     unsigned host_bits = sizeof(*host_mask) * 8;
8535     abi_ulong *target_mask;
8536     unsigned i, j;
8537 
8538     assert(host_size >= target_size);
8539 
8540     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8541     if (!target_mask) {
8542         return -TARGET_EFAULT;
8543     }
8544 
8545     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8546         unsigned bit = i * target_bits;
8547         abi_ulong val = 0;
8548 
8549         for (j = 0; j < target_bits; j++, bit++) {
8550             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8551                 val |= 1UL << j;
8552             }
8553         }
8554         __put_user(val, &target_mask[i]);
8555     }
8556 
8557     unlock_user(target_mask, target_addr, target_size);
8558     return 0;
8559 }
8560 
8561 #ifdef TARGET_NR_getdents
8562 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8563 {
8564     g_autofree void *hdirp = NULL;
8565     void *tdirp;
8566     int hlen, hoff, toff;
8567     int hreclen, treclen;
8568     off64_t prev_diroff = 0;
8569 
8570     hdirp = g_try_malloc(count);
8571     if (!hdirp) {
8572         return -TARGET_ENOMEM;
8573     }
8574 
8575 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8576     hlen = sys_getdents(dirfd, hdirp, count);
8577 #else
8578     hlen = sys_getdents64(dirfd, hdirp, count);
8579 #endif
8580 
8581     hlen = get_errno(hlen);
8582     if (is_error(hlen)) {
8583         return hlen;
8584     }
8585 
8586     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8587     if (!tdirp) {
8588         return -TARGET_EFAULT;
8589     }
8590 
8591     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8592 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8593         struct linux_dirent *hde = hdirp + hoff;
8594 #else
8595         struct linux_dirent64 *hde = hdirp + hoff;
8596 #endif
8597         struct target_dirent *tde = tdirp + toff;
8598         int namelen;
8599         uint8_t type;
8600 
8601         namelen = strlen(hde->d_name);
8602         hreclen = hde->d_reclen;
8603         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8604         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8605 
8606         if (toff + treclen > count) {
8607             /*
8608              * If the host struct is smaller than the target struct, or
8609              * requires less alignment and thus packs into less space,
8610              * then the host can return more entries than we can pass
8611              * on to the guest.
8612              */
8613             if (toff == 0) {
8614                 toff = -TARGET_EINVAL; /* result buffer is too small */
8615                 break;
8616             }
8617             /*
8618              * Return what we have, resetting the file pointer to the
8619              * location of the first record not returned.
8620              */
8621             lseek64(dirfd, prev_diroff, SEEK_SET);
8622             break;
8623         }
8624 
8625         prev_diroff = hde->d_off;
8626         tde->d_ino = tswapal(hde->d_ino);
8627         tde->d_off = tswapal(hde->d_off);
8628         tde->d_reclen = tswap16(treclen);
8629         memcpy(tde->d_name, hde->d_name, namelen + 1);
8630 
8631         /*
8632          * The getdents type is in what was formerly a padding byte at the
8633          * end of the structure.
8634          */
8635 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8636         type = *((uint8_t *)hde + hreclen - 1);
8637 #else
8638         type = hde->d_type;
8639 #endif
8640         *((uint8_t *)tde + treclen - 1) = type;
8641     }
8642 
8643     unlock_user(tdirp, arg2, toff);
8644     return toff;
8645 }
8646 #endif /* TARGET_NR_getdents */
8647 
8648 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8649 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8650 {
8651     g_autofree void *hdirp = NULL;
8652     void *tdirp;
8653     int hlen, hoff, toff;
8654     int hreclen, treclen;
8655     off64_t prev_diroff = 0;
8656 
8657     hdirp = g_try_malloc(count);
8658     if (!hdirp) {
8659         return -TARGET_ENOMEM;
8660     }
8661 
8662     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8663     if (is_error(hlen)) {
8664         return hlen;
8665     }
8666 
8667     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8668     if (!tdirp) {
8669         return -TARGET_EFAULT;
8670     }
8671 
8672     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8673         struct linux_dirent64 *hde = hdirp + hoff;
8674         struct target_dirent64 *tde = tdirp + toff;
8675         int namelen;
8676 
8677         namelen = strlen(hde->d_name) + 1;
8678         hreclen = hde->d_reclen;
8679         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8680         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8681 
8682         if (toff + treclen > count) {
8683             /*
8684              * If the host struct is smaller than the target struct, or
8685              * requires less alignment and thus packs into less space,
8686              * then the host can return more entries than we can pass
8687              * on to the guest.
8688              */
8689             if (toff == 0) {
8690                 toff = -TARGET_EINVAL; /* result buffer is too small */
8691                 break;
8692             }
8693             /*
8694              * Return what we have, resetting the file pointer to the
8695              * location of the first record not returned.
8696              */
8697             lseek64(dirfd, prev_diroff, SEEK_SET);
8698             break;
8699         }
8700 
8701         prev_diroff = hde->d_off;
8702         tde->d_ino = tswap64(hde->d_ino);
8703         tde->d_off = tswap64(hde->d_off);
8704         tde->d_reclen = tswap16(treclen);
8705         tde->d_type = hde->d_type;
8706         memcpy(tde->d_name, hde->d_name, namelen);
8707     }
8708 
8709     unlock_user(tdirp, arg2, toff);
8710     return toff;
8711 }
8712 #endif /* TARGET_NR_getdents64 */
8713 
8714 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8715 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8716 #endif
8717 
8718 /* This is an internal helper for do_syscall so that it is easier
8719  * to have a single return point, so that actions, such as logging
8720  * of syscall results, can be performed.
8721  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8722  */
8723 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8724                             abi_long arg2, abi_long arg3, abi_long arg4,
8725                             abi_long arg5, abi_long arg6, abi_long arg7,
8726                             abi_long arg8)
8727 {
8728     CPUState *cpu = env_cpu(cpu_env);
8729     abi_long ret;
8730 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8731     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8732     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8733     || defined(TARGET_NR_statx)
8734     struct stat st;
8735 #endif
8736 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8737     || defined(TARGET_NR_fstatfs)
8738     struct statfs stfs;
8739 #endif
8740     void *p;
8741 
8742     switch(num) {
8743     case TARGET_NR_exit:
8744         /* In old applications this may be used to implement _exit(2).
8745            However in threaded applications it is used for thread termination,
8746            and _exit_group is used for application termination.
8747            Do thread termination if we have more then one thread.  */
8748 
8749         if (block_signals()) {
8750             return -QEMU_ERESTARTSYS;
8751         }
8752 
8753         pthread_mutex_lock(&clone_lock);
8754 
8755         if (CPU_NEXT(first_cpu)) {
8756             TaskState *ts = cpu->opaque;
8757 
8758             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8759             object_unref(OBJECT(cpu));
8760             /*
8761              * At this point the CPU should be unrealized and removed
8762              * from cpu lists. We can clean-up the rest of the thread
8763              * data without the lock held.
8764              */
8765 
8766             pthread_mutex_unlock(&clone_lock);
8767 
8768             if (ts->child_tidptr) {
8769                 put_user_u32(0, ts->child_tidptr);
8770                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8771                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8772             }
8773             thread_cpu = NULL;
8774             g_free(ts);
8775             rcu_unregister_thread();
8776             pthread_exit(NULL);
8777         }
8778 
8779         pthread_mutex_unlock(&clone_lock);
8780         preexit_cleanup(cpu_env, arg1);
8781         _exit(arg1);
8782         return 0; /* avoid warning */
8783     case TARGET_NR_read:
8784         if (arg2 == 0 && arg3 == 0) {
8785             return get_errno(safe_read(arg1, 0, 0));
8786         } else {
8787             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8788                 return -TARGET_EFAULT;
8789             ret = get_errno(safe_read(arg1, p, arg3));
8790             if (ret >= 0 &&
8791                 fd_trans_host_to_target_data(arg1)) {
8792                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8793             }
8794             unlock_user(p, arg2, ret);
8795         }
8796         return ret;
8797     case TARGET_NR_write:
8798         if (arg2 == 0 && arg3 == 0) {
8799             return get_errno(safe_write(arg1, 0, 0));
8800         }
8801         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8802             return -TARGET_EFAULT;
8803         if (fd_trans_target_to_host_data(arg1)) {
8804             void *copy = g_malloc(arg3);
8805             memcpy(copy, p, arg3);
8806             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8807             if (ret >= 0) {
8808                 ret = get_errno(safe_write(arg1, copy, ret));
8809             }
8810             g_free(copy);
8811         } else {
8812             ret = get_errno(safe_write(arg1, p, arg3));
8813         }
8814         unlock_user(p, arg2, 0);
8815         return ret;
8816 
8817 #ifdef TARGET_NR_open
8818     case TARGET_NR_open:
8819         if (!(p = lock_user_string(arg1)))
8820             return -TARGET_EFAULT;
8821         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8822                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8823                                   arg3));
8824         fd_trans_unregister(ret);
8825         unlock_user(p, arg1, 0);
8826         return ret;
8827 #endif
8828     case TARGET_NR_openat:
8829         if (!(p = lock_user_string(arg2)))
8830             return -TARGET_EFAULT;
8831         ret = get_errno(do_openat(cpu_env, arg1, p,
8832                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8833                                   arg4));
8834         fd_trans_unregister(ret);
8835         unlock_user(p, arg2, 0);
8836         return ret;
8837 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8838     case TARGET_NR_name_to_handle_at:
8839         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8840         return ret;
8841 #endif
8842 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8843     case TARGET_NR_open_by_handle_at:
8844         ret = do_open_by_handle_at(arg1, arg2, arg3);
8845         fd_trans_unregister(ret);
8846         return ret;
8847 #endif
8848 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8849     case TARGET_NR_pidfd_open:
8850         return get_errno(pidfd_open(arg1, arg2));
8851 #endif
8852 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8853     case TARGET_NR_pidfd_send_signal:
8854         {
8855             siginfo_t uinfo, *puinfo;
8856 
8857             if (arg3) {
8858                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8859                 if (!p) {
8860                     return -TARGET_EFAULT;
8861                  }
8862                  target_to_host_siginfo(&uinfo, p);
8863                  unlock_user(p, arg3, 0);
8864                  puinfo = &uinfo;
8865             } else {
8866                  puinfo = NULL;
8867             }
8868             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8869                                               puinfo, arg4));
8870         }
8871         return ret;
8872 #endif
8873 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8874     case TARGET_NR_pidfd_getfd:
8875         return get_errno(pidfd_getfd(arg1, arg2, arg3));
8876 #endif
8877     case TARGET_NR_close:
8878         fd_trans_unregister(arg1);
8879         return get_errno(close(arg1));
8880 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8881     case TARGET_NR_close_range:
8882         ret = get_errno(sys_close_range(arg1, arg2, arg3));
8883         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
8884             abi_long fd, maxfd;
8885             maxfd = MIN(arg2, target_fd_max);
8886             for (fd = arg1; fd < maxfd; fd++) {
8887                 fd_trans_unregister(fd);
8888             }
8889         }
8890         return ret;
8891 #endif
8892 
8893     case TARGET_NR_brk:
8894         return do_brk(arg1);
8895 #ifdef TARGET_NR_fork
8896     case TARGET_NR_fork:
8897         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8898 #endif
8899 #ifdef TARGET_NR_waitpid
8900     case TARGET_NR_waitpid:
8901         {
8902             int status;
8903             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8904             if (!is_error(ret) && arg2 && ret
8905                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8906                 return -TARGET_EFAULT;
8907         }
8908         return ret;
8909 #endif
8910 #ifdef TARGET_NR_waitid
8911     case TARGET_NR_waitid:
8912         {
8913             siginfo_t info;
8914             info.si_pid = 0;
8915             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8916             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8917                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8918                     return -TARGET_EFAULT;
8919                 host_to_target_siginfo(p, &info);
8920                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8921             }
8922         }
8923         return ret;
8924 #endif
8925 #ifdef TARGET_NR_creat /* not on alpha */
8926     case TARGET_NR_creat:
8927         if (!(p = lock_user_string(arg1)))
8928             return -TARGET_EFAULT;
8929         ret = get_errno(creat(p, arg2));
8930         fd_trans_unregister(ret);
8931         unlock_user(p, arg1, 0);
8932         return ret;
8933 #endif
8934 #ifdef TARGET_NR_link
8935     case TARGET_NR_link:
8936         {
8937             void * p2;
8938             p = lock_user_string(arg1);
8939             p2 = lock_user_string(arg2);
8940             if (!p || !p2)
8941                 ret = -TARGET_EFAULT;
8942             else
8943                 ret = get_errno(link(p, p2));
8944             unlock_user(p2, arg2, 0);
8945             unlock_user(p, arg1, 0);
8946         }
8947         return ret;
8948 #endif
8949 #if defined(TARGET_NR_linkat)
8950     case TARGET_NR_linkat:
8951         {
8952             void * p2 = NULL;
8953             if (!arg2 || !arg4)
8954                 return -TARGET_EFAULT;
8955             p  = lock_user_string(arg2);
8956             p2 = lock_user_string(arg4);
8957             if (!p || !p2)
8958                 ret = -TARGET_EFAULT;
8959             else
8960                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8961             unlock_user(p, arg2, 0);
8962             unlock_user(p2, arg4, 0);
8963         }
8964         return ret;
8965 #endif
8966 #ifdef TARGET_NR_unlink
8967     case TARGET_NR_unlink:
8968         if (!(p = lock_user_string(arg1)))
8969             return -TARGET_EFAULT;
8970         ret = get_errno(unlink(p));
8971         unlock_user(p, arg1, 0);
8972         return ret;
8973 #endif
8974 #if defined(TARGET_NR_unlinkat)
8975     case TARGET_NR_unlinkat:
8976         if (!(p = lock_user_string(arg2)))
8977             return -TARGET_EFAULT;
8978         ret = get_errno(unlinkat(arg1, p, arg3));
8979         unlock_user(p, arg2, 0);
8980         return ret;
8981 #endif
8982     case TARGET_NR_execve:
8983         return do_execve(cpu_env, arg1, arg2, arg3);
8984     case TARGET_NR_chdir:
8985         if (!(p = lock_user_string(arg1)))
8986             return -TARGET_EFAULT;
8987         ret = get_errno(chdir(p));
8988         unlock_user(p, arg1, 0);
8989         return ret;
8990 #ifdef TARGET_NR_time
8991     case TARGET_NR_time:
8992         {
8993             time_t host_time;
8994             ret = get_errno(time(&host_time));
8995             if (!is_error(ret)
8996                 && arg1
8997                 && put_user_sal(host_time, arg1))
8998                 return -TARGET_EFAULT;
8999         }
9000         return ret;
9001 #endif
9002 #ifdef TARGET_NR_mknod
9003     case TARGET_NR_mknod:
9004         if (!(p = lock_user_string(arg1)))
9005             return -TARGET_EFAULT;
9006         ret = get_errno(mknod(p, arg2, arg3));
9007         unlock_user(p, arg1, 0);
9008         return ret;
9009 #endif
9010 #if defined(TARGET_NR_mknodat)
9011     case TARGET_NR_mknodat:
9012         if (!(p = lock_user_string(arg2)))
9013             return -TARGET_EFAULT;
9014         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9015         unlock_user(p, arg2, 0);
9016         return ret;
9017 #endif
9018 #ifdef TARGET_NR_chmod
9019     case TARGET_NR_chmod:
9020         if (!(p = lock_user_string(arg1)))
9021             return -TARGET_EFAULT;
9022         ret = get_errno(chmod(p, arg2));
9023         unlock_user(p, arg1, 0);
9024         return ret;
9025 #endif
9026 #ifdef TARGET_NR_lseek
9027     case TARGET_NR_lseek:
9028         return get_errno(lseek(arg1, arg2, arg3));
9029 #endif
9030 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9031     /* Alpha specific */
9032     case TARGET_NR_getxpid:
9033         cpu_env->ir[IR_A4] = getppid();
9034         return get_errno(getpid());
9035 #endif
9036 #ifdef TARGET_NR_getpid
9037     case TARGET_NR_getpid:
9038         return get_errno(getpid());
9039 #endif
9040     case TARGET_NR_mount:
9041         {
9042             /* need to look at the data field */
9043             void *p2, *p3;
9044 
9045             if (arg1) {
9046                 p = lock_user_string(arg1);
9047                 if (!p) {
9048                     return -TARGET_EFAULT;
9049                 }
9050             } else {
9051                 p = NULL;
9052             }
9053 
9054             p2 = lock_user_string(arg2);
9055             if (!p2) {
9056                 if (arg1) {
9057                     unlock_user(p, arg1, 0);
9058                 }
9059                 return -TARGET_EFAULT;
9060             }
9061 
9062             if (arg3) {
9063                 p3 = lock_user_string(arg3);
9064                 if (!p3) {
9065                     if (arg1) {
9066                         unlock_user(p, arg1, 0);
9067                     }
9068                     unlock_user(p2, arg2, 0);
9069                     return -TARGET_EFAULT;
9070                 }
9071             } else {
9072                 p3 = NULL;
9073             }
9074 
9075             /* FIXME - arg5 should be locked, but it isn't clear how to
9076              * do that since it's not guaranteed to be a NULL-terminated
9077              * string.
9078              */
9079             if (!arg5) {
9080                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9081             } else {
9082                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9083             }
9084             ret = get_errno(ret);
9085 
9086             if (arg1) {
9087                 unlock_user(p, arg1, 0);
9088             }
9089             unlock_user(p2, arg2, 0);
9090             if (arg3) {
9091                 unlock_user(p3, arg3, 0);
9092             }
9093         }
9094         return ret;
9095 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9096 #if defined(TARGET_NR_umount)
9097     case TARGET_NR_umount:
9098 #endif
9099 #if defined(TARGET_NR_oldumount)
9100     case TARGET_NR_oldumount:
9101 #endif
9102         if (!(p = lock_user_string(arg1)))
9103             return -TARGET_EFAULT;
9104         ret = get_errno(umount(p));
9105         unlock_user(p, arg1, 0);
9106         return ret;
9107 #endif
9108 #ifdef TARGET_NR_stime /* not on alpha */
9109     case TARGET_NR_stime:
9110         {
9111             struct timespec ts;
9112             ts.tv_nsec = 0;
9113             if (get_user_sal(ts.tv_sec, arg1)) {
9114                 return -TARGET_EFAULT;
9115             }
9116             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9117         }
9118 #endif
9119 #ifdef TARGET_NR_alarm /* not on alpha */
9120     case TARGET_NR_alarm:
9121         return alarm(arg1);
9122 #endif
9123 #ifdef TARGET_NR_pause /* not on alpha */
9124     case TARGET_NR_pause:
9125         if (!block_signals()) {
9126             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9127         }
9128         return -TARGET_EINTR;
9129 #endif
9130 #ifdef TARGET_NR_utime
9131     case TARGET_NR_utime:
9132         {
9133             struct utimbuf tbuf, *host_tbuf;
9134             struct target_utimbuf *target_tbuf;
9135             if (arg2) {
9136                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9137                     return -TARGET_EFAULT;
9138                 tbuf.actime = tswapal(target_tbuf->actime);
9139                 tbuf.modtime = tswapal(target_tbuf->modtime);
9140                 unlock_user_struct(target_tbuf, arg2, 0);
9141                 host_tbuf = &tbuf;
9142             } else {
9143                 host_tbuf = NULL;
9144             }
9145             if (!(p = lock_user_string(arg1)))
9146                 return -TARGET_EFAULT;
9147             ret = get_errno(utime(p, host_tbuf));
9148             unlock_user(p, arg1, 0);
9149         }
9150         return ret;
9151 #endif
9152 #ifdef TARGET_NR_utimes
9153     case TARGET_NR_utimes:
9154         {
9155             struct timeval *tvp, tv[2];
9156             if (arg2) {
9157                 if (copy_from_user_timeval(&tv[0], arg2)
9158                     || copy_from_user_timeval(&tv[1],
9159                                               arg2 + sizeof(struct target_timeval)))
9160                     return -TARGET_EFAULT;
9161                 tvp = tv;
9162             } else {
9163                 tvp = NULL;
9164             }
9165             if (!(p = lock_user_string(arg1)))
9166                 return -TARGET_EFAULT;
9167             ret = get_errno(utimes(p, tvp));
9168             unlock_user(p, arg1, 0);
9169         }
9170         return ret;
9171 #endif
9172 #if defined(TARGET_NR_futimesat)
9173     case TARGET_NR_futimesat:
9174         {
9175             struct timeval *tvp, tv[2];
9176             if (arg3) {
9177                 if (copy_from_user_timeval(&tv[0], arg3)
9178                     || copy_from_user_timeval(&tv[1],
9179                                               arg3 + sizeof(struct target_timeval)))
9180                     return -TARGET_EFAULT;
9181                 tvp = tv;
9182             } else {
9183                 tvp = NULL;
9184             }
9185             if (!(p = lock_user_string(arg2))) {
9186                 return -TARGET_EFAULT;
9187             }
9188             ret = get_errno(futimesat(arg1, path(p), tvp));
9189             unlock_user(p, arg2, 0);
9190         }
9191         return ret;
9192 #endif
9193 #ifdef TARGET_NR_access
9194     case TARGET_NR_access:
9195         if (!(p = lock_user_string(arg1))) {
9196             return -TARGET_EFAULT;
9197         }
9198         ret = get_errno(access(path(p), arg2));
9199         unlock_user(p, arg1, 0);
9200         return ret;
9201 #endif
9202 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9203     case TARGET_NR_faccessat:
9204         if (!(p = lock_user_string(arg2))) {
9205             return -TARGET_EFAULT;
9206         }
9207         ret = get_errno(faccessat(arg1, p, arg3, 0));
9208         unlock_user(p, arg2, 0);
9209         return ret;
9210 #endif
9211 #if defined(TARGET_NR_faccessat2)
9212     case TARGET_NR_faccessat2:
9213         if (!(p = lock_user_string(arg2))) {
9214             return -TARGET_EFAULT;
9215         }
9216         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9217         unlock_user(p, arg2, 0);
9218         return ret;
9219 #endif
9220 #ifdef TARGET_NR_nice /* not on alpha */
9221     case TARGET_NR_nice:
9222         return get_errno(nice(arg1));
9223 #endif
9224     case TARGET_NR_sync:
9225         sync();
9226         return 0;
9227 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9228     case TARGET_NR_syncfs:
9229         return get_errno(syncfs(arg1));
9230 #endif
9231     case TARGET_NR_kill:
9232         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9233 #ifdef TARGET_NR_rename
9234     case TARGET_NR_rename:
9235         {
9236             void *p2;
9237             p = lock_user_string(arg1);
9238             p2 = lock_user_string(arg2);
9239             if (!p || !p2)
9240                 ret = -TARGET_EFAULT;
9241             else
9242                 ret = get_errno(rename(p, p2));
9243             unlock_user(p2, arg2, 0);
9244             unlock_user(p, arg1, 0);
9245         }
9246         return ret;
9247 #endif
9248 #if defined(TARGET_NR_renameat)
9249     case TARGET_NR_renameat:
9250         {
9251             void *p2;
9252             p  = lock_user_string(arg2);
9253             p2 = lock_user_string(arg4);
9254             if (!p || !p2)
9255                 ret = -TARGET_EFAULT;
9256             else
9257                 ret = get_errno(renameat(arg1, p, arg3, p2));
9258             unlock_user(p2, arg4, 0);
9259             unlock_user(p, arg2, 0);
9260         }
9261         return ret;
9262 #endif
9263 #if defined(TARGET_NR_renameat2)
9264     case TARGET_NR_renameat2:
9265         {
9266             void *p2;
9267             p  = lock_user_string(arg2);
9268             p2 = lock_user_string(arg4);
9269             if (!p || !p2) {
9270                 ret = -TARGET_EFAULT;
9271             } else {
9272                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9273             }
9274             unlock_user(p2, arg4, 0);
9275             unlock_user(p, arg2, 0);
9276         }
9277         return ret;
9278 #endif
9279 #ifdef TARGET_NR_mkdir
9280     case TARGET_NR_mkdir:
9281         if (!(p = lock_user_string(arg1)))
9282             return -TARGET_EFAULT;
9283         ret = get_errno(mkdir(p, arg2));
9284         unlock_user(p, arg1, 0);
9285         return ret;
9286 #endif
9287 #if defined(TARGET_NR_mkdirat)
9288     case TARGET_NR_mkdirat:
9289         if (!(p = lock_user_string(arg2)))
9290             return -TARGET_EFAULT;
9291         ret = get_errno(mkdirat(arg1, p, arg3));
9292         unlock_user(p, arg2, 0);
9293         return ret;
9294 #endif
9295 #ifdef TARGET_NR_rmdir
9296     case TARGET_NR_rmdir:
9297         if (!(p = lock_user_string(arg1)))
9298             return -TARGET_EFAULT;
9299         ret = get_errno(rmdir(p));
9300         unlock_user(p, arg1, 0);
9301         return ret;
9302 #endif
9303     case TARGET_NR_dup:
9304         ret = get_errno(dup(arg1));
9305         if (ret >= 0) {
9306             fd_trans_dup(arg1, ret);
9307         }
9308         return ret;
9309 #ifdef TARGET_NR_pipe
9310     case TARGET_NR_pipe:
9311         return do_pipe(cpu_env, arg1, 0, 0);
9312 #endif
9313 #ifdef TARGET_NR_pipe2
9314     case TARGET_NR_pipe2:
9315         return do_pipe(cpu_env, arg1,
9316                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9317 #endif
9318     case TARGET_NR_times:
9319         {
9320             struct target_tms *tmsp;
9321             struct tms tms;
9322             ret = get_errno(times(&tms));
9323             if (arg1) {
9324                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9325                 if (!tmsp)
9326                     return -TARGET_EFAULT;
9327                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9328                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9329                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9330                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9331             }
9332             if (!is_error(ret))
9333                 ret = host_to_target_clock_t(ret);
9334         }
9335         return ret;
9336     case TARGET_NR_acct:
9337         if (arg1 == 0) {
9338             ret = get_errno(acct(NULL));
9339         } else {
9340             if (!(p = lock_user_string(arg1))) {
9341                 return -TARGET_EFAULT;
9342             }
9343             ret = get_errno(acct(path(p)));
9344             unlock_user(p, arg1, 0);
9345         }
9346         return ret;
9347 #ifdef TARGET_NR_umount2
9348     case TARGET_NR_umount2:
9349         if (!(p = lock_user_string(arg1)))
9350             return -TARGET_EFAULT;
9351         ret = get_errno(umount2(p, arg2));
9352         unlock_user(p, arg1, 0);
9353         return ret;
9354 #endif
9355     case TARGET_NR_ioctl:
9356         return do_ioctl(arg1, arg2, arg3);
9357 #ifdef TARGET_NR_fcntl
9358     case TARGET_NR_fcntl:
9359         return do_fcntl(arg1, arg2, arg3);
9360 #endif
9361     case TARGET_NR_setpgid:
9362         return get_errno(setpgid(arg1, arg2));
9363     case TARGET_NR_umask:
9364         return get_errno(umask(arg1));
9365     case TARGET_NR_chroot:
9366         if (!(p = lock_user_string(arg1)))
9367             return -TARGET_EFAULT;
9368         ret = get_errno(chroot(p));
9369         unlock_user(p, arg1, 0);
9370         return ret;
9371 #ifdef TARGET_NR_dup2
9372     case TARGET_NR_dup2:
9373         ret = get_errno(dup2(arg1, arg2));
9374         if (ret >= 0) {
9375             fd_trans_dup(arg1, arg2);
9376         }
9377         return ret;
9378 #endif
9379 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9380     case TARGET_NR_dup3:
9381     {
9382         int host_flags;
9383 
9384         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9385             return -EINVAL;
9386         }
9387         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9388         ret = get_errno(dup3(arg1, arg2, host_flags));
9389         if (ret >= 0) {
9390             fd_trans_dup(arg1, arg2);
9391         }
9392         return ret;
9393     }
9394 #endif
9395 #ifdef TARGET_NR_getppid /* not on alpha */
9396     case TARGET_NR_getppid:
9397         return get_errno(getppid());
9398 #endif
9399 #ifdef TARGET_NR_getpgrp
9400     case TARGET_NR_getpgrp:
9401         return get_errno(getpgrp());
9402 #endif
9403     case TARGET_NR_setsid:
9404         return get_errno(setsid());
9405 #ifdef TARGET_NR_sigaction
9406     case TARGET_NR_sigaction:
9407         {
9408 #if defined(TARGET_MIPS)
9409 	    struct target_sigaction act, oact, *pact, *old_act;
9410 
9411 	    if (arg2) {
9412                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9413                     return -TARGET_EFAULT;
9414 		act._sa_handler = old_act->_sa_handler;
9415 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9416 		act.sa_flags = old_act->sa_flags;
9417 		unlock_user_struct(old_act, arg2, 0);
9418 		pact = &act;
9419 	    } else {
9420 		pact = NULL;
9421 	    }
9422 
9423         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9424 
9425 	    if (!is_error(ret) && arg3) {
9426                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9427                     return -TARGET_EFAULT;
9428 		old_act->_sa_handler = oact._sa_handler;
9429 		old_act->sa_flags = oact.sa_flags;
9430 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9431 		old_act->sa_mask.sig[1] = 0;
9432 		old_act->sa_mask.sig[2] = 0;
9433 		old_act->sa_mask.sig[3] = 0;
9434 		unlock_user_struct(old_act, arg3, 1);
9435 	    }
9436 #else
9437             struct target_old_sigaction *old_act;
9438             struct target_sigaction act, oact, *pact;
9439             if (arg2) {
9440                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9441                     return -TARGET_EFAULT;
9442                 act._sa_handler = old_act->_sa_handler;
9443                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9444                 act.sa_flags = old_act->sa_flags;
9445 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9446                 act.sa_restorer = old_act->sa_restorer;
9447 #endif
9448                 unlock_user_struct(old_act, arg2, 0);
9449                 pact = &act;
9450             } else {
9451                 pact = NULL;
9452             }
9453             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9454             if (!is_error(ret) && arg3) {
9455                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9456                     return -TARGET_EFAULT;
9457                 old_act->_sa_handler = oact._sa_handler;
9458                 old_act->sa_mask = oact.sa_mask.sig[0];
9459                 old_act->sa_flags = oact.sa_flags;
9460 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9461                 old_act->sa_restorer = oact.sa_restorer;
9462 #endif
9463                 unlock_user_struct(old_act, arg3, 1);
9464             }
9465 #endif
9466         }
9467         return ret;
9468 #endif
9469     case TARGET_NR_rt_sigaction:
9470         {
9471             /*
9472              * For Alpha and SPARC this is a 5 argument syscall, with
9473              * a 'restorer' parameter which must be copied into the
9474              * sa_restorer field of the sigaction struct.
9475              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9476              * and arg5 is the sigsetsize.
9477              */
9478 #if defined(TARGET_ALPHA)
9479             target_ulong sigsetsize = arg4;
9480             target_ulong restorer = arg5;
9481 #elif defined(TARGET_SPARC)
9482             target_ulong restorer = arg4;
9483             target_ulong sigsetsize = arg5;
9484 #else
9485             target_ulong sigsetsize = arg4;
9486             target_ulong restorer = 0;
9487 #endif
9488             struct target_sigaction *act = NULL;
9489             struct target_sigaction *oact = NULL;
9490 
9491             if (sigsetsize != sizeof(target_sigset_t)) {
9492                 return -TARGET_EINVAL;
9493             }
9494             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9495                 return -TARGET_EFAULT;
9496             }
9497             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9498                 ret = -TARGET_EFAULT;
9499             } else {
9500                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9501                 if (oact) {
9502                     unlock_user_struct(oact, arg3, 1);
9503                 }
9504             }
9505             if (act) {
9506                 unlock_user_struct(act, arg2, 0);
9507             }
9508         }
9509         return ret;
9510 #ifdef TARGET_NR_sgetmask /* not on alpha */
9511     case TARGET_NR_sgetmask:
9512         {
9513             sigset_t cur_set;
9514             abi_ulong target_set;
9515             ret = do_sigprocmask(0, NULL, &cur_set);
9516             if (!ret) {
9517                 host_to_target_old_sigset(&target_set, &cur_set);
9518                 ret = target_set;
9519             }
9520         }
9521         return ret;
9522 #endif
9523 #ifdef TARGET_NR_ssetmask /* not on alpha */
9524     case TARGET_NR_ssetmask:
9525         {
9526             sigset_t set, oset;
9527             abi_ulong target_set = arg1;
9528             target_to_host_old_sigset(&set, &target_set);
9529             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9530             if (!ret) {
9531                 host_to_target_old_sigset(&target_set, &oset);
9532                 ret = target_set;
9533             }
9534         }
9535         return ret;
9536 #endif
9537 #ifdef TARGET_NR_sigprocmask
9538     case TARGET_NR_sigprocmask:
9539         {
9540 #if defined(TARGET_ALPHA)
9541             sigset_t set, oldset;
9542             abi_ulong mask;
9543             int how;
9544 
9545             switch (arg1) {
9546             case TARGET_SIG_BLOCK:
9547                 how = SIG_BLOCK;
9548                 break;
9549             case TARGET_SIG_UNBLOCK:
9550                 how = SIG_UNBLOCK;
9551                 break;
9552             case TARGET_SIG_SETMASK:
9553                 how = SIG_SETMASK;
9554                 break;
9555             default:
9556                 return -TARGET_EINVAL;
9557             }
9558             mask = arg2;
9559             target_to_host_old_sigset(&set, &mask);
9560 
9561             ret = do_sigprocmask(how, &set, &oldset);
9562             if (!is_error(ret)) {
9563                 host_to_target_old_sigset(&mask, &oldset);
9564                 ret = mask;
9565                 cpu_env->ir[IR_V0] = 0; /* force no error */
9566             }
9567 #else
9568             sigset_t set, oldset, *set_ptr;
9569             int how;
9570 
9571             if (arg2) {
9572                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9573                 if (!p) {
9574                     return -TARGET_EFAULT;
9575                 }
9576                 target_to_host_old_sigset(&set, p);
9577                 unlock_user(p, arg2, 0);
9578                 set_ptr = &set;
9579                 switch (arg1) {
9580                 case TARGET_SIG_BLOCK:
9581                     how = SIG_BLOCK;
9582                     break;
9583                 case TARGET_SIG_UNBLOCK:
9584                     how = SIG_UNBLOCK;
9585                     break;
9586                 case TARGET_SIG_SETMASK:
9587                     how = SIG_SETMASK;
9588                     break;
9589                 default:
9590                     return -TARGET_EINVAL;
9591                 }
9592             } else {
9593                 how = 0;
9594                 set_ptr = NULL;
9595             }
9596             ret = do_sigprocmask(how, set_ptr, &oldset);
9597             if (!is_error(ret) && arg3) {
9598                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9599                     return -TARGET_EFAULT;
9600                 host_to_target_old_sigset(p, &oldset);
9601                 unlock_user(p, arg3, sizeof(target_sigset_t));
9602             }
9603 #endif
9604         }
9605         return ret;
9606 #endif
9607     case TARGET_NR_rt_sigprocmask:
9608         {
9609             int how = arg1;
9610             sigset_t set, oldset, *set_ptr;
9611 
9612             if (arg4 != sizeof(target_sigset_t)) {
9613                 return -TARGET_EINVAL;
9614             }
9615 
9616             if (arg2) {
9617                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9618                 if (!p) {
9619                     return -TARGET_EFAULT;
9620                 }
9621                 target_to_host_sigset(&set, p);
9622                 unlock_user(p, arg2, 0);
9623                 set_ptr = &set;
9624                 switch(how) {
9625                 case TARGET_SIG_BLOCK:
9626                     how = SIG_BLOCK;
9627                     break;
9628                 case TARGET_SIG_UNBLOCK:
9629                     how = SIG_UNBLOCK;
9630                     break;
9631                 case TARGET_SIG_SETMASK:
9632                     how = SIG_SETMASK;
9633                     break;
9634                 default:
9635                     return -TARGET_EINVAL;
9636                 }
9637             } else {
9638                 how = 0;
9639                 set_ptr = NULL;
9640             }
9641             ret = do_sigprocmask(how, set_ptr, &oldset);
9642             if (!is_error(ret) && arg3) {
9643                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9644                     return -TARGET_EFAULT;
9645                 host_to_target_sigset(p, &oldset);
9646                 unlock_user(p, arg3, sizeof(target_sigset_t));
9647             }
9648         }
9649         return ret;
9650 #ifdef TARGET_NR_sigpending
9651     case TARGET_NR_sigpending:
9652         {
9653             sigset_t set;
9654             ret = get_errno(sigpending(&set));
9655             if (!is_error(ret)) {
9656                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9657                     return -TARGET_EFAULT;
9658                 host_to_target_old_sigset(p, &set);
9659                 unlock_user(p, arg1, sizeof(target_sigset_t));
9660             }
9661         }
9662         return ret;
9663 #endif
9664     case TARGET_NR_rt_sigpending:
9665         {
9666             sigset_t set;
9667 
9668             /* Yes, this check is >, not != like most. We follow the kernel's
9669              * logic and it does it like this because it implements
9670              * NR_sigpending through the same code path, and in that case
9671              * the old_sigset_t is smaller in size.
9672              */
9673             if (arg2 > sizeof(target_sigset_t)) {
9674                 return -TARGET_EINVAL;
9675             }
9676 
9677             ret = get_errno(sigpending(&set));
9678             if (!is_error(ret)) {
9679                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9680                     return -TARGET_EFAULT;
9681                 host_to_target_sigset(p, &set);
9682                 unlock_user(p, arg1, sizeof(target_sigset_t));
9683             }
9684         }
9685         return ret;
9686 #ifdef TARGET_NR_sigsuspend
9687     case TARGET_NR_sigsuspend:
9688         {
9689             sigset_t *set;
9690 
9691 #if defined(TARGET_ALPHA)
9692             TaskState *ts = cpu->opaque;
9693             /* target_to_host_old_sigset will bswap back */
9694             abi_ulong mask = tswapal(arg1);
9695             set = &ts->sigsuspend_mask;
9696             target_to_host_old_sigset(set, &mask);
9697 #else
9698             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9699             if (ret != 0) {
9700                 return ret;
9701             }
9702 #endif
9703             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9704             finish_sigsuspend_mask(ret);
9705         }
9706         return ret;
9707 #endif
9708     case TARGET_NR_rt_sigsuspend:
9709         {
9710             sigset_t *set;
9711 
9712             ret = process_sigsuspend_mask(&set, arg1, arg2);
9713             if (ret != 0) {
9714                 return ret;
9715             }
9716             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9717             finish_sigsuspend_mask(ret);
9718         }
9719         return ret;
9720 #ifdef TARGET_NR_rt_sigtimedwait
9721     case TARGET_NR_rt_sigtimedwait:
9722         {
9723             sigset_t set;
9724             struct timespec uts, *puts;
9725             siginfo_t uinfo;
9726 
9727             if (arg4 != sizeof(target_sigset_t)) {
9728                 return -TARGET_EINVAL;
9729             }
9730 
9731             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9732                 return -TARGET_EFAULT;
9733             target_to_host_sigset(&set, p);
9734             unlock_user(p, arg1, 0);
9735             if (arg3) {
9736                 puts = &uts;
9737                 if (target_to_host_timespec(puts, arg3)) {
9738                     return -TARGET_EFAULT;
9739                 }
9740             } else {
9741                 puts = NULL;
9742             }
9743             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9744                                                  SIGSET_T_SIZE));
9745             if (!is_error(ret)) {
9746                 if (arg2) {
9747                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9748                                   0);
9749                     if (!p) {
9750                         return -TARGET_EFAULT;
9751                     }
9752                     host_to_target_siginfo(p, &uinfo);
9753                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9754                 }
9755                 ret = host_to_target_signal(ret);
9756             }
9757         }
9758         return ret;
9759 #endif
9760 #ifdef TARGET_NR_rt_sigtimedwait_time64
9761     case TARGET_NR_rt_sigtimedwait_time64:
9762         {
9763             sigset_t set;
9764             struct timespec uts, *puts;
9765             siginfo_t uinfo;
9766 
9767             if (arg4 != sizeof(target_sigset_t)) {
9768                 return -TARGET_EINVAL;
9769             }
9770 
9771             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9772             if (!p) {
9773                 return -TARGET_EFAULT;
9774             }
9775             target_to_host_sigset(&set, p);
9776             unlock_user(p, arg1, 0);
9777             if (arg3) {
9778                 puts = &uts;
9779                 if (target_to_host_timespec64(puts, arg3)) {
9780                     return -TARGET_EFAULT;
9781                 }
9782             } else {
9783                 puts = NULL;
9784             }
9785             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9786                                                  SIGSET_T_SIZE));
9787             if (!is_error(ret)) {
9788                 if (arg2) {
9789                     p = lock_user(VERIFY_WRITE, arg2,
9790                                   sizeof(target_siginfo_t), 0);
9791                     if (!p) {
9792                         return -TARGET_EFAULT;
9793                     }
9794                     host_to_target_siginfo(p, &uinfo);
9795                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9796                 }
9797                 ret = host_to_target_signal(ret);
9798             }
9799         }
9800         return ret;
9801 #endif
9802     case TARGET_NR_rt_sigqueueinfo:
9803         {
9804             siginfo_t uinfo;
9805 
9806             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9807             if (!p) {
9808                 return -TARGET_EFAULT;
9809             }
9810             target_to_host_siginfo(&uinfo, p);
9811             unlock_user(p, arg3, 0);
9812             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9813         }
9814         return ret;
9815     case TARGET_NR_rt_tgsigqueueinfo:
9816         {
9817             siginfo_t uinfo;
9818 
9819             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9820             if (!p) {
9821                 return -TARGET_EFAULT;
9822             }
9823             target_to_host_siginfo(&uinfo, p);
9824             unlock_user(p, arg4, 0);
9825             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9826         }
9827         return ret;
9828 #ifdef TARGET_NR_sigreturn
9829     case TARGET_NR_sigreturn:
9830         if (block_signals()) {
9831             return -QEMU_ERESTARTSYS;
9832         }
9833         return do_sigreturn(cpu_env);
9834 #endif
9835     case TARGET_NR_rt_sigreturn:
9836         if (block_signals()) {
9837             return -QEMU_ERESTARTSYS;
9838         }
9839         return do_rt_sigreturn(cpu_env);
9840     case TARGET_NR_sethostname:
9841         if (!(p = lock_user_string(arg1)))
9842             return -TARGET_EFAULT;
9843         ret = get_errno(sethostname(p, arg2));
9844         unlock_user(p, arg1, 0);
9845         return ret;
9846 #ifdef TARGET_NR_setrlimit
9847     case TARGET_NR_setrlimit:
9848         {
9849             int resource = target_to_host_resource(arg1);
9850             struct target_rlimit *target_rlim;
9851             struct rlimit rlim;
9852             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9853                 return -TARGET_EFAULT;
9854             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9855             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9856             unlock_user_struct(target_rlim, arg2, 0);
9857             /*
9858              * If we just passed through resource limit settings for memory then
9859              * they would also apply to QEMU's own allocations, and QEMU will
9860              * crash or hang or die if its allocations fail. Ideally we would
9861              * track the guest allocations in QEMU and apply the limits ourselves.
9862              * For now, just tell the guest the call succeeded but don't actually
9863              * limit anything.
9864              */
9865             if (resource != RLIMIT_AS &&
9866                 resource != RLIMIT_DATA &&
9867                 resource != RLIMIT_STACK) {
9868                 return get_errno(setrlimit(resource, &rlim));
9869             } else {
9870                 return 0;
9871             }
9872         }
9873 #endif
9874 #ifdef TARGET_NR_getrlimit
9875     case TARGET_NR_getrlimit:
9876         {
9877             int resource = target_to_host_resource(arg1);
9878             struct target_rlimit *target_rlim;
9879             struct rlimit rlim;
9880 
9881             ret = get_errno(getrlimit(resource, &rlim));
9882             if (!is_error(ret)) {
9883                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9884                     return -TARGET_EFAULT;
9885                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9886                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9887                 unlock_user_struct(target_rlim, arg2, 1);
9888             }
9889         }
9890         return ret;
9891 #endif
9892     case TARGET_NR_getrusage:
9893         {
9894             struct rusage rusage;
9895             ret = get_errno(getrusage(arg1, &rusage));
9896             if (!is_error(ret)) {
9897                 ret = host_to_target_rusage(arg2, &rusage);
9898             }
9899         }
9900         return ret;
9901 #if defined(TARGET_NR_gettimeofday)
9902     case TARGET_NR_gettimeofday:
9903         {
9904             struct timeval tv;
9905             struct timezone tz;
9906 
9907             ret = get_errno(gettimeofday(&tv, &tz));
9908             if (!is_error(ret)) {
9909                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9910                     return -TARGET_EFAULT;
9911                 }
9912                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9913                     return -TARGET_EFAULT;
9914                 }
9915             }
9916         }
9917         return ret;
9918 #endif
9919 #if defined(TARGET_NR_settimeofday)
9920     case TARGET_NR_settimeofday:
9921         {
9922             struct timeval tv, *ptv = NULL;
9923             struct timezone tz, *ptz = NULL;
9924 
9925             if (arg1) {
9926                 if (copy_from_user_timeval(&tv, arg1)) {
9927                     return -TARGET_EFAULT;
9928                 }
9929                 ptv = &tv;
9930             }
9931 
9932             if (arg2) {
9933                 if (copy_from_user_timezone(&tz, arg2)) {
9934                     return -TARGET_EFAULT;
9935                 }
9936                 ptz = &tz;
9937             }
9938 
9939             return get_errno(settimeofday(ptv, ptz));
9940         }
9941 #endif
9942 #if defined(TARGET_NR_select)
9943     case TARGET_NR_select:
9944 #if defined(TARGET_WANT_NI_OLD_SELECT)
9945         /* some architectures used to have old_select here
9946          * but now ENOSYS it.
9947          */
9948         ret = -TARGET_ENOSYS;
9949 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9950         ret = do_old_select(arg1);
9951 #else
9952         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9953 #endif
9954         return ret;
9955 #endif
9956 #ifdef TARGET_NR_pselect6
9957     case TARGET_NR_pselect6:
9958         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9959 #endif
9960 #ifdef TARGET_NR_pselect6_time64
9961     case TARGET_NR_pselect6_time64:
9962         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9963 #endif
9964 #ifdef TARGET_NR_symlink
9965     case TARGET_NR_symlink:
9966         {
9967             void *p2;
9968             p = lock_user_string(arg1);
9969             p2 = lock_user_string(arg2);
9970             if (!p || !p2)
9971                 ret = -TARGET_EFAULT;
9972             else
9973                 ret = get_errno(symlink(p, p2));
9974             unlock_user(p2, arg2, 0);
9975             unlock_user(p, arg1, 0);
9976         }
9977         return ret;
9978 #endif
9979 #if defined(TARGET_NR_symlinkat)
9980     case TARGET_NR_symlinkat:
9981         {
9982             void *p2;
9983             p  = lock_user_string(arg1);
9984             p2 = lock_user_string(arg3);
9985             if (!p || !p2)
9986                 ret = -TARGET_EFAULT;
9987             else
9988                 ret = get_errno(symlinkat(p, arg2, p2));
9989             unlock_user(p2, arg3, 0);
9990             unlock_user(p, arg1, 0);
9991         }
9992         return ret;
9993 #endif
9994 #ifdef TARGET_NR_readlink
9995     case TARGET_NR_readlink:
9996         {
9997             void *p2;
9998             p = lock_user_string(arg1);
9999             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10000             if (!p || !p2) {
10001                 ret = -TARGET_EFAULT;
10002             } else if (!arg3) {
10003                 /* Short circuit this for the magic exe check. */
10004                 ret = -TARGET_EINVAL;
10005             } else if (is_proc_myself((const char *)p, "exe")) {
10006                 char real[PATH_MAX], *temp;
10007                 temp = realpath(exec_path, real);
10008                 /* Return value is # of bytes that we wrote to the buffer. */
10009                 if (temp == NULL) {
10010                     ret = get_errno(-1);
10011                 } else {
10012                     /* Don't worry about sign mismatch as earlier mapping
10013                      * logic would have thrown a bad address error. */
10014                     ret = MIN(strlen(real), arg3);
10015                     /* We cannot NUL terminate the string. */
10016                     memcpy(p2, real, ret);
10017                 }
10018             } else {
10019                 ret = get_errno(readlink(path(p), p2, arg3));
10020             }
10021             unlock_user(p2, arg2, ret);
10022             unlock_user(p, arg1, 0);
10023         }
10024         return ret;
10025 #endif
10026 #if defined(TARGET_NR_readlinkat)
10027     case TARGET_NR_readlinkat:
10028         {
10029             void *p2;
10030             p  = lock_user_string(arg2);
10031             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10032             if (!p || !p2) {
10033                 ret = -TARGET_EFAULT;
10034             } else if (!arg4) {
10035                 /* Short circuit this for the magic exe check. */
10036                 ret = -TARGET_EINVAL;
10037             } else if (is_proc_myself((const char *)p, "exe")) {
10038                 char real[PATH_MAX], *temp;
10039                 temp = realpath(exec_path, real);
10040                 /* Return value is # of bytes that we wrote to the buffer. */
10041                 if (temp == NULL) {
10042                     ret = get_errno(-1);
10043                 } else {
10044                     /* Don't worry about sign mismatch as earlier mapping
10045                      * logic would have thrown a bad address error. */
10046                     ret = MIN(strlen(real), arg4);
10047                     /* We cannot NUL terminate the string. */
10048                     memcpy(p2, real, ret);
10049                 }
10050             } else {
10051                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10052             }
10053             unlock_user(p2, arg3, ret);
10054             unlock_user(p, arg2, 0);
10055         }
10056         return ret;
10057 #endif
10058 #ifdef TARGET_NR_swapon
10059     case TARGET_NR_swapon:
10060         if (!(p = lock_user_string(arg1)))
10061             return -TARGET_EFAULT;
10062         ret = get_errno(swapon(p, arg2));
10063         unlock_user(p, arg1, 0);
10064         return ret;
10065 #endif
10066     case TARGET_NR_reboot:
10067         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10068            /* arg4 must be ignored in all other cases */
10069            p = lock_user_string(arg4);
10070            if (!p) {
10071                return -TARGET_EFAULT;
10072            }
10073            ret = get_errno(reboot(arg1, arg2, arg3, p));
10074            unlock_user(p, arg4, 0);
10075         } else {
10076            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10077         }
10078         return ret;
10079 #ifdef TARGET_NR_mmap
10080     case TARGET_NR_mmap:
10081 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10082     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10083     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10084     || defined(TARGET_S390X)
10085         {
10086             abi_ulong *v;
10087             abi_ulong v1, v2, v3, v4, v5, v6;
10088             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10089                 return -TARGET_EFAULT;
10090             v1 = tswapal(v[0]);
10091             v2 = tswapal(v[1]);
10092             v3 = tswapal(v[2]);
10093             v4 = tswapal(v[3]);
10094             v5 = tswapal(v[4]);
10095             v6 = tswapal(v[5]);
10096             unlock_user(v, arg1, 0);
10097             ret = get_errno(target_mmap(v1, v2, v3,
10098                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10099                                         v5, v6));
10100         }
10101 #else
10102         /* mmap pointers are always untagged */
10103         ret = get_errno(target_mmap(arg1, arg2, arg3,
10104                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10105                                     arg5,
10106                                     arg6));
10107 #endif
10108         return ret;
10109 #endif
10110 #ifdef TARGET_NR_mmap2
10111     case TARGET_NR_mmap2:
10112 #ifndef MMAP_SHIFT
10113 #define MMAP_SHIFT 12
10114 #endif
10115         ret = target_mmap(arg1, arg2, arg3,
10116                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10117                           arg5, arg6 << MMAP_SHIFT);
10118         return get_errno(ret);
10119 #endif
10120     case TARGET_NR_munmap:
10121         arg1 = cpu_untagged_addr(cpu, arg1);
10122         return get_errno(target_munmap(arg1, arg2));
10123     case TARGET_NR_mprotect:
10124         arg1 = cpu_untagged_addr(cpu, arg1);
10125         {
10126             TaskState *ts = cpu->opaque;
10127             /* Special hack to detect libc making the stack executable.  */
10128             if ((arg3 & PROT_GROWSDOWN)
10129                 && arg1 >= ts->info->stack_limit
10130                 && arg1 <= ts->info->start_stack) {
10131                 arg3 &= ~PROT_GROWSDOWN;
10132                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10133                 arg1 = ts->info->stack_limit;
10134             }
10135         }
10136         return get_errno(target_mprotect(arg1, arg2, arg3));
10137 #ifdef TARGET_NR_mremap
10138     case TARGET_NR_mremap:
10139         arg1 = cpu_untagged_addr(cpu, arg1);
10140         /* mremap new_addr (arg5) is always untagged */
10141         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10142 #endif
10143         /* ??? msync/mlock/munlock are broken for softmmu.  */
10144 #ifdef TARGET_NR_msync
10145     case TARGET_NR_msync:
10146         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10147 #endif
10148 #ifdef TARGET_NR_mlock
10149     case TARGET_NR_mlock:
10150         return get_errno(mlock(g2h(cpu, arg1), arg2));
10151 #endif
10152 #ifdef TARGET_NR_munlock
10153     case TARGET_NR_munlock:
10154         return get_errno(munlock(g2h(cpu, arg1), arg2));
10155 #endif
10156 #ifdef TARGET_NR_mlockall
10157     case TARGET_NR_mlockall:
10158         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10159 #endif
10160 #ifdef TARGET_NR_munlockall
10161     case TARGET_NR_munlockall:
10162         return get_errno(munlockall());
10163 #endif
10164 #ifdef TARGET_NR_truncate
10165     case TARGET_NR_truncate:
10166         if (!(p = lock_user_string(arg1)))
10167             return -TARGET_EFAULT;
10168         ret = get_errno(truncate(p, arg2));
10169         unlock_user(p, arg1, 0);
10170         return ret;
10171 #endif
10172 #ifdef TARGET_NR_ftruncate
10173     case TARGET_NR_ftruncate:
10174         return get_errno(ftruncate(arg1, arg2));
10175 #endif
10176     case TARGET_NR_fchmod:
10177         return get_errno(fchmod(arg1, arg2));
10178 #if defined(TARGET_NR_fchmodat)
10179     case TARGET_NR_fchmodat:
10180         if (!(p = lock_user_string(arg2)))
10181             return -TARGET_EFAULT;
10182         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10183         unlock_user(p, arg2, 0);
10184         return ret;
10185 #endif
10186     case TARGET_NR_getpriority:
10187         /* Note that negative values are valid for getpriority, so we must
10188            differentiate based on errno settings.  */
10189         errno = 0;
10190         ret = getpriority(arg1, arg2);
10191         if (ret == -1 && errno != 0) {
10192             return -host_to_target_errno(errno);
10193         }
10194 #ifdef TARGET_ALPHA
10195         /* Return value is the unbiased priority.  Signal no error.  */
10196         cpu_env->ir[IR_V0] = 0;
10197 #else
10198         /* Return value is a biased priority to avoid negative numbers.  */
10199         ret = 20 - ret;
10200 #endif
10201         return ret;
10202     case TARGET_NR_setpriority:
10203         return get_errno(setpriority(arg1, arg2, arg3));
10204 #ifdef TARGET_NR_statfs
10205     case TARGET_NR_statfs:
10206         if (!(p = lock_user_string(arg1))) {
10207             return -TARGET_EFAULT;
10208         }
10209         ret = get_errno(statfs(path(p), &stfs));
10210         unlock_user(p, arg1, 0);
10211     convert_statfs:
10212         if (!is_error(ret)) {
10213             struct target_statfs *target_stfs;
10214 
10215             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10216                 return -TARGET_EFAULT;
10217             __put_user(stfs.f_type, &target_stfs->f_type);
10218             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10219             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10220             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10221             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10222             __put_user(stfs.f_files, &target_stfs->f_files);
10223             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10224             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10225             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10226             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10227             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10228 #ifdef _STATFS_F_FLAGS
10229             __put_user(stfs.f_flags, &target_stfs->f_flags);
10230 #else
10231             __put_user(0, &target_stfs->f_flags);
10232 #endif
10233             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10234             unlock_user_struct(target_stfs, arg2, 1);
10235         }
10236         return ret;
10237 #endif
10238 #ifdef TARGET_NR_fstatfs
10239     case TARGET_NR_fstatfs:
10240         ret = get_errno(fstatfs(arg1, &stfs));
10241         goto convert_statfs;
10242 #endif
10243 #ifdef TARGET_NR_statfs64
10244     case TARGET_NR_statfs64:
10245         if (!(p = lock_user_string(arg1))) {
10246             return -TARGET_EFAULT;
10247         }
10248         ret = get_errno(statfs(path(p), &stfs));
10249         unlock_user(p, arg1, 0);
10250     convert_statfs64:
10251         if (!is_error(ret)) {
10252             struct target_statfs64 *target_stfs;
10253 
10254             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10255                 return -TARGET_EFAULT;
10256             __put_user(stfs.f_type, &target_stfs->f_type);
10257             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10258             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10259             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10260             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10261             __put_user(stfs.f_files, &target_stfs->f_files);
10262             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10263             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10264             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10265             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10266             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10267 #ifdef _STATFS_F_FLAGS
10268             __put_user(stfs.f_flags, &target_stfs->f_flags);
10269 #else
10270             __put_user(0, &target_stfs->f_flags);
10271 #endif
10272             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10273             unlock_user_struct(target_stfs, arg3, 1);
10274         }
10275         return ret;
10276     case TARGET_NR_fstatfs64:
10277         ret = get_errno(fstatfs(arg1, &stfs));
10278         goto convert_statfs64;
10279 #endif
10280 #ifdef TARGET_NR_socketcall
10281     case TARGET_NR_socketcall:
10282         return do_socketcall(arg1, arg2);
10283 #endif
10284 #ifdef TARGET_NR_accept
10285     case TARGET_NR_accept:
10286         return do_accept4(arg1, arg2, arg3, 0);
10287 #endif
10288 #ifdef TARGET_NR_accept4
10289     case TARGET_NR_accept4:
10290         return do_accept4(arg1, arg2, arg3, arg4);
10291 #endif
10292 #ifdef TARGET_NR_bind
10293     case TARGET_NR_bind:
10294         return do_bind(arg1, arg2, arg3);
10295 #endif
10296 #ifdef TARGET_NR_connect
10297     case TARGET_NR_connect:
10298         return do_connect(arg1, arg2, arg3);
10299 #endif
10300 #ifdef TARGET_NR_getpeername
10301     case TARGET_NR_getpeername:
10302         return do_getpeername(arg1, arg2, arg3);
10303 #endif
10304 #ifdef TARGET_NR_getsockname
10305     case TARGET_NR_getsockname:
10306         return do_getsockname(arg1, arg2, arg3);
10307 #endif
10308 #ifdef TARGET_NR_getsockopt
10309     case TARGET_NR_getsockopt:
10310         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10311 #endif
10312 #ifdef TARGET_NR_listen
10313     case TARGET_NR_listen:
10314         return get_errno(listen(arg1, arg2));
10315 #endif
10316 #ifdef TARGET_NR_recv
10317     case TARGET_NR_recv:
10318         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10319 #endif
10320 #ifdef TARGET_NR_recvfrom
10321     case TARGET_NR_recvfrom:
10322         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10323 #endif
10324 #ifdef TARGET_NR_recvmsg
10325     case TARGET_NR_recvmsg:
10326         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10327 #endif
10328 #ifdef TARGET_NR_send
10329     case TARGET_NR_send:
10330         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10331 #endif
10332 #ifdef TARGET_NR_sendmsg
10333     case TARGET_NR_sendmsg:
10334         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10335 #endif
10336 #ifdef TARGET_NR_sendmmsg
10337     case TARGET_NR_sendmmsg:
10338         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10339 #endif
10340 #ifdef TARGET_NR_recvmmsg
10341     case TARGET_NR_recvmmsg:
10342         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10343 #endif
10344 #ifdef TARGET_NR_sendto
10345     case TARGET_NR_sendto:
10346         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10347 #endif
10348 #ifdef TARGET_NR_shutdown
10349     case TARGET_NR_shutdown:
10350         return get_errno(shutdown(arg1, arg2));
10351 #endif
10352 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10353     case TARGET_NR_getrandom:
10354         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10355         if (!p) {
10356             return -TARGET_EFAULT;
10357         }
10358         ret = get_errno(getrandom(p, arg2, arg3));
10359         unlock_user(p, arg1, ret);
10360         return ret;
10361 #endif
10362 #ifdef TARGET_NR_socket
10363     case TARGET_NR_socket:
10364         return do_socket(arg1, arg2, arg3);
10365 #endif
10366 #ifdef TARGET_NR_socketpair
10367     case TARGET_NR_socketpair:
10368         return do_socketpair(arg1, arg2, arg3, arg4);
10369 #endif
10370 #ifdef TARGET_NR_setsockopt
10371     case TARGET_NR_setsockopt:
10372         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10373 #endif
10374 #if defined(TARGET_NR_syslog)
10375     case TARGET_NR_syslog:
10376         {
10377             int len = arg2;
10378 
10379             switch (arg1) {
10380             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10381             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10382             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10383             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10384             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10385             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10386             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10387             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10388                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10389             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10390             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10391             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10392                 {
10393                     if (len < 0) {
10394                         return -TARGET_EINVAL;
10395                     }
10396                     if (len == 0) {
10397                         return 0;
10398                     }
10399                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10400                     if (!p) {
10401                         return -TARGET_EFAULT;
10402                     }
10403                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10404                     unlock_user(p, arg2, arg3);
10405                 }
10406                 return ret;
10407             default:
10408                 return -TARGET_EINVAL;
10409             }
10410         }
10411         break;
10412 #endif
10413     case TARGET_NR_setitimer:
10414         {
10415             struct itimerval value, ovalue, *pvalue;
10416 
10417             if (arg2) {
10418                 pvalue = &value;
10419                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10420                     || copy_from_user_timeval(&pvalue->it_value,
10421                                               arg2 + sizeof(struct target_timeval)))
10422                     return -TARGET_EFAULT;
10423             } else {
10424                 pvalue = NULL;
10425             }
10426             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10427             if (!is_error(ret) && arg3) {
10428                 if (copy_to_user_timeval(arg3,
10429                                          &ovalue.it_interval)
10430                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10431                                             &ovalue.it_value))
10432                     return -TARGET_EFAULT;
10433             }
10434         }
10435         return ret;
10436     case TARGET_NR_getitimer:
10437         {
10438             struct itimerval value;
10439 
10440             ret = get_errno(getitimer(arg1, &value));
10441             if (!is_error(ret) && arg2) {
10442                 if (copy_to_user_timeval(arg2,
10443                                          &value.it_interval)
10444                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10445                                             &value.it_value))
10446                     return -TARGET_EFAULT;
10447             }
10448         }
10449         return ret;
10450 #ifdef TARGET_NR_stat
10451     case TARGET_NR_stat:
10452         if (!(p = lock_user_string(arg1))) {
10453             return -TARGET_EFAULT;
10454         }
10455         ret = get_errno(stat(path(p), &st));
10456         unlock_user(p, arg1, 0);
10457         goto do_stat;
10458 #endif
10459 #ifdef TARGET_NR_lstat
10460     case TARGET_NR_lstat:
10461         if (!(p = lock_user_string(arg1))) {
10462             return -TARGET_EFAULT;
10463         }
10464         ret = get_errno(lstat(path(p), &st));
10465         unlock_user(p, arg1, 0);
10466         goto do_stat;
10467 #endif
10468 #ifdef TARGET_NR_fstat
10469     case TARGET_NR_fstat:
10470         {
10471             ret = get_errno(fstat(arg1, &st));
10472 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10473         do_stat:
10474 #endif
10475             if (!is_error(ret)) {
10476                 struct target_stat *target_st;
10477 
10478                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10479                     return -TARGET_EFAULT;
10480                 memset(target_st, 0, sizeof(*target_st));
10481                 __put_user(st.st_dev, &target_st->st_dev);
10482                 __put_user(st.st_ino, &target_st->st_ino);
10483                 __put_user(st.st_mode, &target_st->st_mode);
10484                 __put_user(st.st_uid, &target_st->st_uid);
10485                 __put_user(st.st_gid, &target_st->st_gid);
10486                 __put_user(st.st_nlink, &target_st->st_nlink);
10487                 __put_user(st.st_rdev, &target_st->st_rdev);
10488                 __put_user(st.st_size, &target_st->st_size);
10489                 __put_user(st.st_blksize, &target_st->st_blksize);
10490                 __put_user(st.st_blocks, &target_st->st_blocks);
10491                 __put_user(st.st_atime, &target_st->target_st_atime);
10492                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10493                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10494 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10495                 __put_user(st.st_atim.tv_nsec,
10496                            &target_st->target_st_atime_nsec);
10497                 __put_user(st.st_mtim.tv_nsec,
10498                            &target_st->target_st_mtime_nsec);
10499                 __put_user(st.st_ctim.tv_nsec,
10500                            &target_st->target_st_ctime_nsec);
10501 #endif
10502                 unlock_user_struct(target_st, arg2, 1);
10503             }
10504         }
10505         return ret;
10506 #endif
10507     case TARGET_NR_vhangup:
10508         return get_errno(vhangup());
10509 #ifdef TARGET_NR_syscall
10510     case TARGET_NR_syscall:
10511         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10512                           arg6, arg7, arg8, 0);
10513 #endif
10514 #if defined(TARGET_NR_wait4)
10515     case TARGET_NR_wait4:
10516         {
10517             int status;
10518             abi_long status_ptr = arg2;
10519             struct rusage rusage, *rusage_ptr;
10520             abi_ulong target_rusage = arg4;
10521             abi_long rusage_err;
10522             if (target_rusage)
10523                 rusage_ptr = &rusage;
10524             else
10525                 rusage_ptr = NULL;
10526             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10527             if (!is_error(ret)) {
10528                 if (status_ptr && ret) {
10529                     status = host_to_target_waitstatus(status);
10530                     if (put_user_s32(status, status_ptr))
10531                         return -TARGET_EFAULT;
10532                 }
10533                 if (target_rusage) {
10534                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10535                     if (rusage_err) {
10536                         ret = rusage_err;
10537                     }
10538                 }
10539             }
10540         }
10541         return ret;
10542 #endif
10543 #ifdef TARGET_NR_swapoff
10544     case TARGET_NR_swapoff:
10545         if (!(p = lock_user_string(arg1)))
10546             return -TARGET_EFAULT;
10547         ret = get_errno(swapoff(p));
10548         unlock_user(p, arg1, 0);
10549         return ret;
10550 #endif
10551     case TARGET_NR_sysinfo:
10552         {
10553             struct target_sysinfo *target_value;
10554             struct sysinfo value;
10555             ret = get_errno(sysinfo(&value));
10556             if (!is_error(ret) && arg1)
10557             {
10558                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10559                     return -TARGET_EFAULT;
10560                 __put_user(value.uptime, &target_value->uptime);
10561                 __put_user(value.loads[0], &target_value->loads[0]);
10562                 __put_user(value.loads[1], &target_value->loads[1]);
10563                 __put_user(value.loads[2], &target_value->loads[2]);
10564                 __put_user(value.totalram, &target_value->totalram);
10565                 __put_user(value.freeram, &target_value->freeram);
10566                 __put_user(value.sharedram, &target_value->sharedram);
10567                 __put_user(value.bufferram, &target_value->bufferram);
10568                 __put_user(value.totalswap, &target_value->totalswap);
10569                 __put_user(value.freeswap, &target_value->freeswap);
10570                 __put_user(value.procs, &target_value->procs);
10571                 __put_user(value.totalhigh, &target_value->totalhigh);
10572                 __put_user(value.freehigh, &target_value->freehigh);
10573                 __put_user(value.mem_unit, &target_value->mem_unit);
10574                 unlock_user_struct(target_value, arg1, 1);
10575             }
10576         }
10577         return ret;
10578 #ifdef TARGET_NR_ipc
10579     case TARGET_NR_ipc:
10580         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10581 #endif
10582 #ifdef TARGET_NR_semget
10583     case TARGET_NR_semget:
10584         return get_errno(semget(arg1, arg2, arg3));
10585 #endif
10586 #ifdef TARGET_NR_semop
10587     case TARGET_NR_semop:
10588         return do_semtimedop(arg1, arg2, arg3, 0, false);
10589 #endif
10590 #ifdef TARGET_NR_semtimedop
10591     case TARGET_NR_semtimedop:
10592         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10593 #endif
10594 #ifdef TARGET_NR_semtimedop_time64
10595     case TARGET_NR_semtimedop_time64:
10596         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10597 #endif
10598 #ifdef TARGET_NR_semctl
10599     case TARGET_NR_semctl:
10600         return do_semctl(arg1, arg2, arg3, arg4);
10601 #endif
10602 #ifdef TARGET_NR_msgctl
10603     case TARGET_NR_msgctl:
10604         return do_msgctl(arg1, arg2, arg3);
10605 #endif
10606 #ifdef TARGET_NR_msgget
10607     case TARGET_NR_msgget:
10608         return get_errno(msgget(arg1, arg2));
10609 #endif
10610 #ifdef TARGET_NR_msgrcv
10611     case TARGET_NR_msgrcv:
10612         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10613 #endif
10614 #ifdef TARGET_NR_msgsnd
10615     case TARGET_NR_msgsnd:
10616         return do_msgsnd(arg1, arg2, arg3, arg4);
10617 #endif
10618 #ifdef TARGET_NR_shmget
10619     case TARGET_NR_shmget:
10620         return get_errno(shmget(arg1, arg2, arg3));
10621 #endif
10622 #ifdef TARGET_NR_shmctl
10623     case TARGET_NR_shmctl:
10624         return do_shmctl(arg1, arg2, arg3);
10625 #endif
10626 #ifdef TARGET_NR_shmat
10627     case TARGET_NR_shmat:
10628         return do_shmat(cpu_env, arg1, arg2, arg3);
10629 #endif
10630 #ifdef TARGET_NR_shmdt
10631     case TARGET_NR_shmdt:
10632         return do_shmdt(arg1);
10633 #endif
10634     case TARGET_NR_fsync:
10635         return get_errno(fsync(arg1));
10636     case TARGET_NR_clone:
10637         /* Linux manages to have three different orderings for its
10638          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10639          * match the kernel's CONFIG_CLONE_* settings.
10640          * Microblaze is further special in that it uses a sixth
10641          * implicit argument to clone for the TLS pointer.
10642          */
10643 #if defined(TARGET_MICROBLAZE)
10644         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10645 #elif defined(TARGET_CLONE_BACKWARDS)
10646         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10647 #elif defined(TARGET_CLONE_BACKWARDS2)
10648         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10649 #else
10650         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10651 #endif
10652         return ret;
10653 #ifdef __NR_exit_group
10654         /* new thread calls */
10655     case TARGET_NR_exit_group:
10656         preexit_cleanup(cpu_env, arg1);
10657         return get_errno(exit_group(arg1));
10658 #endif
10659     case TARGET_NR_setdomainname:
10660         if (!(p = lock_user_string(arg1)))
10661             return -TARGET_EFAULT;
10662         ret = get_errno(setdomainname(p, arg2));
10663         unlock_user(p, arg1, 0);
10664         return ret;
10665     case TARGET_NR_uname:
10666         /* no need to transcode because we use the linux syscall */
10667         {
10668             struct new_utsname * buf;
10669 
10670             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10671                 return -TARGET_EFAULT;
10672             ret = get_errno(sys_uname(buf));
10673             if (!is_error(ret)) {
10674                 /* Overwrite the native machine name with whatever is being
10675                    emulated. */
10676                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10677                           sizeof(buf->machine));
10678                 /* Allow the user to override the reported release.  */
10679                 if (qemu_uname_release && *qemu_uname_release) {
10680                     g_strlcpy(buf->release, qemu_uname_release,
10681                               sizeof(buf->release));
10682                 }
10683             }
10684             unlock_user_struct(buf, arg1, 1);
10685         }
10686         return ret;
10687 #ifdef TARGET_I386
10688     case TARGET_NR_modify_ldt:
10689         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10690 #if !defined(TARGET_X86_64)
10691     case TARGET_NR_vm86:
10692         return do_vm86(cpu_env, arg1, arg2);
10693 #endif
10694 #endif
10695 #if defined(TARGET_NR_adjtimex)
10696     case TARGET_NR_adjtimex:
10697         {
10698             struct timex host_buf;
10699 
10700             if (target_to_host_timex(&host_buf, arg1) != 0) {
10701                 return -TARGET_EFAULT;
10702             }
10703             ret = get_errno(adjtimex(&host_buf));
10704             if (!is_error(ret)) {
10705                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10706                     return -TARGET_EFAULT;
10707                 }
10708             }
10709         }
10710         return ret;
10711 #endif
10712 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10713     case TARGET_NR_clock_adjtime:
10714         {
10715             struct timex htx, *phtx = &htx;
10716 
10717             if (target_to_host_timex(phtx, arg2) != 0) {
10718                 return -TARGET_EFAULT;
10719             }
10720             ret = get_errno(clock_adjtime(arg1, phtx));
10721             if (!is_error(ret) && phtx) {
10722                 if (host_to_target_timex(arg2, phtx) != 0) {
10723                     return -TARGET_EFAULT;
10724                 }
10725             }
10726         }
10727         return ret;
10728 #endif
10729 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10730     case TARGET_NR_clock_adjtime64:
10731         {
10732             struct timex htx;
10733 
10734             if (target_to_host_timex64(&htx, arg2) != 0) {
10735                 return -TARGET_EFAULT;
10736             }
10737             ret = get_errno(clock_adjtime(arg1, &htx));
10738             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10739                     return -TARGET_EFAULT;
10740             }
10741         }
10742         return ret;
10743 #endif
10744     case TARGET_NR_getpgid:
10745         return get_errno(getpgid(arg1));
10746     case TARGET_NR_fchdir:
10747         return get_errno(fchdir(arg1));
10748     case TARGET_NR_personality:
10749         return get_errno(personality(arg1));
10750 #ifdef TARGET_NR__llseek /* Not on alpha */
10751     case TARGET_NR__llseek:
10752         {
10753             int64_t res;
10754 #if !defined(__NR_llseek)
10755             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10756             if (res == -1) {
10757                 ret = get_errno(res);
10758             } else {
10759                 ret = 0;
10760             }
10761 #else
10762             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10763 #endif
10764             if ((ret == 0) && put_user_s64(res, arg4)) {
10765                 return -TARGET_EFAULT;
10766             }
10767         }
10768         return ret;
10769 #endif
10770 #ifdef TARGET_NR_getdents
10771     case TARGET_NR_getdents:
10772         return do_getdents(arg1, arg2, arg3);
10773 #endif /* TARGET_NR_getdents */
10774 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10775     case TARGET_NR_getdents64:
10776         return do_getdents64(arg1, arg2, arg3);
10777 #endif /* TARGET_NR_getdents64 */
10778 #if defined(TARGET_NR__newselect)
10779     case TARGET_NR__newselect:
10780         return do_select(arg1, arg2, arg3, arg4, arg5);
10781 #endif
10782 #ifdef TARGET_NR_poll
10783     case TARGET_NR_poll:
10784         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10785 #endif
10786 #ifdef TARGET_NR_ppoll
10787     case TARGET_NR_ppoll:
10788         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10789 #endif
10790 #ifdef TARGET_NR_ppoll_time64
10791     case TARGET_NR_ppoll_time64:
10792         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10793 #endif
10794     case TARGET_NR_flock:
10795         /* NOTE: the flock constant seems to be the same for every
10796            Linux platform */
10797         return get_errno(safe_flock(arg1, arg2));
10798     case TARGET_NR_readv:
10799         {
10800             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10801             if (vec != NULL) {
10802                 ret = get_errno(safe_readv(arg1, vec, arg3));
10803                 unlock_iovec(vec, arg2, arg3, 1);
10804             } else {
10805                 ret = -host_to_target_errno(errno);
10806             }
10807         }
10808         return ret;
10809     case TARGET_NR_writev:
10810         {
10811             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10812             if (vec != NULL) {
10813                 ret = get_errno(safe_writev(arg1, vec, arg3));
10814                 unlock_iovec(vec, arg2, arg3, 0);
10815             } else {
10816                 ret = -host_to_target_errno(errno);
10817             }
10818         }
10819         return ret;
10820 #if defined(TARGET_NR_preadv)
10821     case TARGET_NR_preadv:
10822         {
10823             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10824             if (vec != NULL) {
10825                 unsigned long low, high;
10826 
10827                 target_to_host_low_high(arg4, arg5, &low, &high);
10828                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10829                 unlock_iovec(vec, arg2, arg3, 1);
10830             } else {
10831                 ret = -host_to_target_errno(errno);
10832            }
10833         }
10834         return ret;
10835 #endif
10836 #if defined(TARGET_NR_pwritev)
10837     case TARGET_NR_pwritev:
10838         {
10839             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10840             if (vec != NULL) {
10841                 unsigned long low, high;
10842 
10843                 target_to_host_low_high(arg4, arg5, &low, &high);
10844                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10845                 unlock_iovec(vec, arg2, arg3, 0);
10846             } else {
10847                 ret = -host_to_target_errno(errno);
10848            }
10849         }
10850         return ret;
10851 #endif
10852     case TARGET_NR_getsid:
10853         return get_errno(getsid(arg1));
10854 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10855     case TARGET_NR_fdatasync:
10856         return get_errno(fdatasync(arg1));
10857 #endif
10858     case TARGET_NR_sched_getaffinity:
10859         {
10860             unsigned int mask_size;
10861             unsigned long *mask;
10862 
10863             /*
10864              * sched_getaffinity needs multiples of ulong, so need to take
10865              * care of mismatches between target ulong and host ulong sizes.
10866              */
10867             if (arg2 & (sizeof(abi_ulong) - 1)) {
10868                 return -TARGET_EINVAL;
10869             }
10870             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10871 
10872             mask = alloca(mask_size);
10873             memset(mask, 0, mask_size);
10874             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10875 
10876             if (!is_error(ret)) {
10877                 if (ret > arg2) {
10878                     /* More data returned than the caller's buffer will fit.
10879                      * This only happens if sizeof(abi_long) < sizeof(long)
10880                      * and the caller passed us a buffer holding an odd number
10881                      * of abi_longs. If the host kernel is actually using the
10882                      * extra 4 bytes then fail EINVAL; otherwise we can just
10883                      * ignore them and only copy the interesting part.
10884                      */
10885                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10886                     if (numcpus > arg2 * 8) {
10887                         return -TARGET_EINVAL;
10888                     }
10889                     ret = arg2;
10890                 }
10891 
10892                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10893                     return -TARGET_EFAULT;
10894                 }
10895             }
10896         }
10897         return ret;
10898     case TARGET_NR_sched_setaffinity:
10899         {
10900             unsigned int mask_size;
10901             unsigned long *mask;
10902 
10903             /*
10904              * sched_setaffinity needs multiples of ulong, so need to take
10905              * care of mismatches between target ulong and host ulong sizes.
10906              */
10907             if (arg2 & (sizeof(abi_ulong) - 1)) {
10908                 return -TARGET_EINVAL;
10909             }
10910             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10911             mask = alloca(mask_size);
10912 
10913             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10914             if (ret) {
10915                 return ret;
10916             }
10917 
10918             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10919         }
10920     case TARGET_NR_getcpu:
10921         {
10922             unsigned cpu, node;
10923             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10924                                        arg2 ? &node : NULL,
10925                                        NULL));
10926             if (is_error(ret)) {
10927                 return ret;
10928             }
10929             if (arg1 && put_user_u32(cpu, arg1)) {
10930                 return -TARGET_EFAULT;
10931             }
10932             if (arg2 && put_user_u32(node, arg2)) {
10933                 return -TARGET_EFAULT;
10934             }
10935         }
10936         return ret;
10937     case TARGET_NR_sched_setparam:
10938         {
10939             struct target_sched_param *target_schp;
10940             struct sched_param schp;
10941 
10942             if (arg2 == 0) {
10943                 return -TARGET_EINVAL;
10944             }
10945             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10946                 return -TARGET_EFAULT;
10947             }
10948             schp.sched_priority = tswap32(target_schp->sched_priority);
10949             unlock_user_struct(target_schp, arg2, 0);
10950             return get_errno(sys_sched_setparam(arg1, &schp));
10951         }
10952     case TARGET_NR_sched_getparam:
10953         {
10954             struct target_sched_param *target_schp;
10955             struct sched_param schp;
10956 
10957             if (arg2 == 0) {
10958                 return -TARGET_EINVAL;
10959             }
10960             ret = get_errno(sys_sched_getparam(arg1, &schp));
10961             if (!is_error(ret)) {
10962                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10963                     return -TARGET_EFAULT;
10964                 }
10965                 target_schp->sched_priority = tswap32(schp.sched_priority);
10966                 unlock_user_struct(target_schp, arg2, 1);
10967             }
10968         }
10969         return ret;
10970     case TARGET_NR_sched_setscheduler:
10971         {
10972             struct target_sched_param *target_schp;
10973             struct sched_param schp;
10974             if (arg3 == 0) {
10975                 return -TARGET_EINVAL;
10976             }
10977             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10978                 return -TARGET_EFAULT;
10979             }
10980             schp.sched_priority = tswap32(target_schp->sched_priority);
10981             unlock_user_struct(target_schp, arg3, 0);
10982             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10983         }
10984     case TARGET_NR_sched_getscheduler:
10985         return get_errno(sys_sched_getscheduler(arg1));
10986     case TARGET_NR_sched_getattr:
10987         {
10988             struct target_sched_attr *target_scha;
10989             struct sched_attr scha;
10990             if (arg2 == 0) {
10991                 return -TARGET_EINVAL;
10992             }
10993             if (arg3 > sizeof(scha)) {
10994                 arg3 = sizeof(scha);
10995             }
10996             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10997             if (!is_error(ret)) {
10998                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10999                 if (!target_scha) {
11000                     return -TARGET_EFAULT;
11001                 }
11002                 target_scha->size = tswap32(scha.size);
11003                 target_scha->sched_policy = tswap32(scha.sched_policy);
11004                 target_scha->sched_flags = tswap64(scha.sched_flags);
11005                 target_scha->sched_nice = tswap32(scha.sched_nice);
11006                 target_scha->sched_priority = tswap32(scha.sched_priority);
11007                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11008                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11009                 target_scha->sched_period = tswap64(scha.sched_period);
11010                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11011                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11012                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11013                 }
11014                 unlock_user(target_scha, arg2, arg3);
11015             }
11016             return ret;
11017         }
11018     case TARGET_NR_sched_setattr:
11019         {
11020             struct target_sched_attr *target_scha;
11021             struct sched_attr scha;
11022             uint32_t size;
11023             int zeroed;
11024             if (arg2 == 0) {
11025                 return -TARGET_EINVAL;
11026             }
11027             if (get_user_u32(size, arg2)) {
11028                 return -TARGET_EFAULT;
11029             }
11030             if (!size) {
11031                 size = offsetof(struct target_sched_attr, sched_util_min);
11032             }
11033             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11034                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11035                     return -TARGET_EFAULT;
11036                 }
11037                 return -TARGET_E2BIG;
11038             }
11039 
11040             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11041             if (zeroed < 0) {
11042                 return zeroed;
11043             } else if (zeroed == 0) {
11044                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11045                     return -TARGET_EFAULT;
11046                 }
11047                 return -TARGET_E2BIG;
11048             }
11049             if (size > sizeof(struct target_sched_attr)) {
11050                 size = sizeof(struct target_sched_attr);
11051             }
11052 
11053             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11054             if (!target_scha) {
11055                 return -TARGET_EFAULT;
11056             }
11057             scha.size = size;
11058             scha.sched_policy = tswap32(target_scha->sched_policy);
11059             scha.sched_flags = tswap64(target_scha->sched_flags);
11060             scha.sched_nice = tswap32(target_scha->sched_nice);
11061             scha.sched_priority = tswap32(target_scha->sched_priority);
11062             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11063             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11064             scha.sched_period = tswap64(target_scha->sched_period);
11065             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11066                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11067                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11068             }
11069             unlock_user(target_scha, arg2, 0);
11070             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11071         }
11072     case TARGET_NR_sched_yield:
11073         return get_errno(sched_yield());
11074     case TARGET_NR_sched_get_priority_max:
11075         return get_errno(sched_get_priority_max(arg1));
11076     case TARGET_NR_sched_get_priority_min:
11077         return get_errno(sched_get_priority_min(arg1));
11078 #ifdef TARGET_NR_sched_rr_get_interval
11079     case TARGET_NR_sched_rr_get_interval:
11080         {
11081             struct timespec ts;
11082             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11083             if (!is_error(ret)) {
11084                 ret = host_to_target_timespec(arg2, &ts);
11085             }
11086         }
11087         return ret;
11088 #endif
11089 #ifdef TARGET_NR_sched_rr_get_interval_time64
11090     case TARGET_NR_sched_rr_get_interval_time64:
11091         {
11092             struct timespec ts;
11093             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11094             if (!is_error(ret)) {
11095                 ret = host_to_target_timespec64(arg2, &ts);
11096             }
11097         }
11098         return ret;
11099 #endif
11100 #if defined(TARGET_NR_nanosleep)
11101     case TARGET_NR_nanosleep:
11102         {
11103             struct timespec req, rem;
11104             target_to_host_timespec(&req, arg1);
11105             ret = get_errno(safe_nanosleep(&req, &rem));
11106             if (is_error(ret) && arg2) {
11107                 host_to_target_timespec(arg2, &rem);
11108             }
11109         }
11110         return ret;
11111 #endif
11112     case TARGET_NR_prctl:
11113         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11114         break;
11115 #ifdef TARGET_NR_arch_prctl
11116     case TARGET_NR_arch_prctl:
11117         return do_arch_prctl(cpu_env, arg1, arg2);
11118 #endif
11119 #ifdef TARGET_NR_pread64
11120     case TARGET_NR_pread64:
11121         if (regpairs_aligned(cpu_env, num)) {
11122             arg4 = arg5;
11123             arg5 = arg6;
11124         }
11125         if (arg2 == 0 && arg3 == 0) {
11126             /* Special-case NULL buffer and zero length, which should succeed */
11127             p = 0;
11128         } else {
11129             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11130             if (!p) {
11131                 return -TARGET_EFAULT;
11132             }
11133         }
11134         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11135         unlock_user(p, arg2, ret);
11136         return ret;
11137     case TARGET_NR_pwrite64:
11138         if (regpairs_aligned(cpu_env, num)) {
11139             arg4 = arg5;
11140             arg5 = arg6;
11141         }
11142         if (arg2 == 0 && arg3 == 0) {
11143             /* Special-case NULL buffer and zero length, which should succeed */
11144             p = 0;
11145         } else {
11146             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11147             if (!p) {
11148                 return -TARGET_EFAULT;
11149             }
11150         }
11151         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11152         unlock_user(p, arg2, 0);
11153         return ret;
11154 #endif
11155     case TARGET_NR_getcwd:
11156         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11157             return -TARGET_EFAULT;
11158         ret = get_errno(sys_getcwd1(p, arg2));
11159         unlock_user(p, arg1, ret);
11160         return ret;
11161     case TARGET_NR_capget:
11162     case TARGET_NR_capset:
11163     {
11164         struct target_user_cap_header *target_header;
11165         struct target_user_cap_data *target_data = NULL;
11166         struct __user_cap_header_struct header;
11167         struct __user_cap_data_struct data[2];
11168         struct __user_cap_data_struct *dataptr = NULL;
11169         int i, target_datalen;
11170         int data_items = 1;
11171 
11172         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11173             return -TARGET_EFAULT;
11174         }
11175         header.version = tswap32(target_header->version);
11176         header.pid = tswap32(target_header->pid);
11177 
11178         if (header.version != _LINUX_CAPABILITY_VERSION) {
11179             /* Version 2 and up takes pointer to two user_data structs */
11180             data_items = 2;
11181         }
11182 
11183         target_datalen = sizeof(*target_data) * data_items;
11184 
11185         if (arg2) {
11186             if (num == TARGET_NR_capget) {
11187                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11188             } else {
11189                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11190             }
11191             if (!target_data) {
11192                 unlock_user_struct(target_header, arg1, 0);
11193                 return -TARGET_EFAULT;
11194             }
11195 
11196             if (num == TARGET_NR_capset) {
11197                 for (i = 0; i < data_items; i++) {
11198                     data[i].effective = tswap32(target_data[i].effective);
11199                     data[i].permitted = tswap32(target_data[i].permitted);
11200                     data[i].inheritable = tswap32(target_data[i].inheritable);
11201                 }
11202             }
11203 
11204             dataptr = data;
11205         }
11206 
11207         if (num == TARGET_NR_capget) {
11208             ret = get_errno(capget(&header, dataptr));
11209         } else {
11210             ret = get_errno(capset(&header, dataptr));
11211         }
11212 
11213         /* The kernel always updates version for both capget and capset */
11214         target_header->version = tswap32(header.version);
11215         unlock_user_struct(target_header, arg1, 1);
11216 
11217         if (arg2) {
11218             if (num == TARGET_NR_capget) {
11219                 for (i = 0; i < data_items; i++) {
11220                     target_data[i].effective = tswap32(data[i].effective);
11221                     target_data[i].permitted = tswap32(data[i].permitted);
11222                     target_data[i].inheritable = tswap32(data[i].inheritable);
11223                 }
11224                 unlock_user(target_data, arg2, target_datalen);
11225             } else {
11226                 unlock_user(target_data, arg2, 0);
11227             }
11228         }
11229         return ret;
11230     }
11231     case TARGET_NR_sigaltstack:
11232         return do_sigaltstack(arg1, arg2, cpu_env);
11233 
11234 #ifdef CONFIG_SENDFILE
11235 #ifdef TARGET_NR_sendfile
11236     case TARGET_NR_sendfile:
11237     {
11238         off_t *offp = NULL;
11239         off_t off;
11240         if (arg3) {
11241             ret = get_user_sal(off, arg3);
11242             if (is_error(ret)) {
11243                 return ret;
11244             }
11245             offp = &off;
11246         }
11247         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11248         if (!is_error(ret) && arg3) {
11249             abi_long ret2 = put_user_sal(off, arg3);
11250             if (is_error(ret2)) {
11251                 ret = ret2;
11252             }
11253         }
11254         return ret;
11255     }
11256 #endif
11257 #ifdef TARGET_NR_sendfile64
11258     case TARGET_NR_sendfile64:
11259     {
11260         off_t *offp = NULL;
11261         off_t off;
11262         if (arg3) {
11263             ret = get_user_s64(off, arg3);
11264             if (is_error(ret)) {
11265                 return ret;
11266             }
11267             offp = &off;
11268         }
11269         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11270         if (!is_error(ret) && arg3) {
11271             abi_long ret2 = put_user_s64(off, arg3);
11272             if (is_error(ret2)) {
11273                 ret = ret2;
11274             }
11275         }
11276         return ret;
11277     }
11278 #endif
11279 #endif
11280 #ifdef TARGET_NR_vfork
11281     case TARGET_NR_vfork:
11282         return get_errno(do_fork(cpu_env,
11283                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11284                          0, 0, 0, 0));
11285 #endif
11286 #ifdef TARGET_NR_ugetrlimit
11287     case TARGET_NR_ugetrlimit:
11288     {
11289 	struct rlimit rlim;
11290 	int resource = target_to_host_resource(arg1);
11291 	ret = get_errno(getrlimit(resource, &rlim));
11292 	if (!is_error(ret)) {
11293 	    struct target_rlimit *target_rlim;
11294             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11295                 return -TARGET_EFAULT;
11296 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11297 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11298             unlock_user_struct(target_rlim, arg2, 1);
11299 	}
11300         return ret;
11301     }
11302 #endif
11303 #ifdef TARGET_NR_truncate64
11304     case TARGET_NR_truncate64:
11305         if (!(p = lock_user_string(arg1)))
11306             return -TARGET_EFAULT;
11307 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11308         unlock_user(p, arg1, 0);
11309         return ret;
11310 #endif
11311 #ifdef TARGET_NR_ftruncate64
11312     case TARGET_NR_ftruncate64:
11313         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11314 #endif
11315 #ifdef TARGET_NR_stat64
11316     case TARGET_NR_stat64:
11317         if (!(p = lock_user_string(arg1))) {
11318             return -TARGET_EFAULT;
11319         }
11320         ret = get_errno(stat(path(p), &st));
11321         unlock_user(p, arg1, 0);
11322         if (!is_error(ret))
11323             ret = host_to_target_stat64(cpu_env, arg2, &st);
11324         return ret;
11325 #endif
11326 #ifdef TARGET_NR_lstat64
11327     case TARGET_NR_lstat64:
11328         if (!(p = lock_user_string(arg1))) {
11329             return -TARGET_EFAULT;
11330         }
11331         ret = get_errno(lstat(path(p), &st));
11332         unlock_user(p, arg1, 0);
11333         if (!is_error(ret))
11334             ret = host_to_target_stat64(cpu_env, arg2, &st);
11335         return ret;
11336 #endif
11337 #ifdef TARGET_NR_fstat64
11338     case TARGET_NR_fstat64:
11339         ret = get_errno(fstat(arg1, &st));
11340         if (!is_error(ret))
11341             ret = host_to_target_stat64(cpu_env, arg2, &st);
11342         return ret;
11343 #endif
11344 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11345 #ifdef TARGET_NR_fstatat64
11346     case TARGET_NR_fstatat64:
11347 #endif
11348 #ifdef TARGET_NR_newfstatat
11349     case TARGET_NR_newfstatat:
11350 #endif
11351         if (!(p = lock_user_string(arg2))) {
11352             return -TARGET_EFAULT;
11353         }
11354         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11355         unlock_user(p, arg2, 0);
11356         if (!is_error(ret))
11357             ret = host_to_target_stat64(cpu_env, arg3, &st);
11358         return ret;
11359 #endif
11360 #if defined(TARGET_NR_statx)
11361     case TARGET_NR_statx:
11362         {
11363             struct target_statx *target_stx;
11364             int dirfd = arg1;
11365             int flags = arg3;
11366 
11367             p = lock_user_string(arg2);
11368             if (p == NULL) {
11369                 return -TARGET_EFAULT;
11370             }
11371 #if defined(__NR_statx)
11372             {
11373                 /*
11374                  * It is assumed that struct statx is architecture independent.
11375                  */
11376                 struct target_statx host_stx;
11377                 int mask = arg4;
11378 
11379                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11380                 if (!is_error(ret)) {
11381                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11382                         unlock_user(p, arg2, 0);
11383                         return -TARGET_EFAULT;
11384                     }
11385                 }
11386 
11387                 if (ret != -TARGET_ENOSYS) {
11388                     unlock_user(p, arg2, 0);
11389                     return ret;
11390                 }
11391             }
11392 #endif
11393             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11394             unlock_user(p, arg2, 0);
11395 
11396             if (!is_error(ret)) {
11397                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11398                     return -TARGET_EFAULT;
11399                 }
11400                 memset(target_stx, 0, sizeof(*target_stx));
11401                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11402                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11403                 __put_user(st.st_ino, &target_stx->stx_ino);
11404                 __put_user(st.st_mode, &target_stx->stx_mode);
11405                 __put_user(st.st_uid, &target_stx->stx_uid);
11406                 __put_user(st.st_gid, &target_stx->stx_gid);
11407                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11408                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11409                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11410                 __put_user(st.st_size, &target_stx->stx_size);
11411                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11412                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11413                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11414                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11415                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11416                 unlock_user_struct(target_stx, arg5, 1);
11417             }
11418         }
11419         return ret;
11420 #endif
11421 #ifdef TARGET_NR_lchown
11422     case TARGET_NR_lchown:
11423         if (!(p = lock_user_string(arg1)))
11424             return -TARGET_EFAULT;
11425         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11426         unlock_user(p, arg1, 0);
11427         return ret;
11428 #endif
11429 #ifdef TARGET_NR_getuid
11430     case TARGET_NR_getuid:
11431         return get_errno(high2lowuid(getuid()));
11432 #endif
11433 #ifdef TARGET_NR_getgid
11434     case TARGET_NR_getgid:
11435         return get_errno(high2lowgid(getgid()));
11436 #endif
11437 #ifdef TARGET_NR_geteuid
11438     case TARGET_NR_geteuid:
11439         return get_errno(high2lowuid(geteuid()));
11440 #endif
11441 #ifdef TARGET_NR_getegid
11442     case TARGET_NR_getegid:
11443         return get_errno(high2lowgid(getegid()));
11444 #endif
11445     case TARGET_NR_setreuid:
11446         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11447     case TARGET_NR_setregid:
11448         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11449     case TARGET_NR_getgroups:
11450         {
11451             int gidsetsize = arg1;
11452             target_id *target_grouplist;
11453             gid_t *grouplist;
11454             int i;
11455 
11456             grouplist = alloca(gidsetsize * sizeof(gid_t));
11457             ret = get_errno(getgroups(gidsetsize, grouplist));
11458             if (gidsetsize == 0)
11459                 return ret;
11460             if (!is_error(ret)) {
11461                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11462                 if (!target_grouplist)
11463                     return -TARGET_EFAULT;
11464                 for(i = 0;i < ret; i++)
11465                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11466                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11467             }
11468         }
11469         return ret;
11470     case TARGET_NR_setgroups:
11471         {
11472             int gidsetsize = arg1;
11473             target_id *target_grouplist;
11474             gid_t *grouplist = NULL;
11475             int i;
11476             if (gidsetsize) {
11477                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11478                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11479                 if (!target_grouplist) {
11480                     return -TARGET_EFAULT;
11481                 }
11482                 for (i = 0; i < gidsetsize; i++) {
11483                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11484                 }
11485                 unlock_user(target_grouplist, arg2, 0);
11486             }
11487             return get_errno(setgroups(gidsetsize, grouplist));
11488         }
11489     case TARGET_NR_fchown:
11490         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11491 #if defined(TARGET_NR_fchownat)
11492     case TARGET_NR_fchownat:
11493         if (!(p = lock_user_string(arg2)))
11494             return -TARGET_EFAULT;
11495         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11496                                  low2highgid(arg4), arg5));
11497         unlock_user(p, arg2, 0);
11498         return ret;
11499 #endif
11500 #ifdef TARGET_NR_setresuid
11501     case TARGET_NR_setresuid:
11502         return get_errno(sys_setresuid(low2highuid(arg1),
11503                                        low2highuid(arg2),
11504                                        low2highuid(arg3)));
11505 #endif
11506 #ifdef TARGET_NR_getresuid
11507     case TARGET_NR_getresuid:
11508         {
11509             uid_t ruid, euid, suid;
11510             ret = get_errno(getresuid(&ruid, &euid, &suid));
11511             if (!is_error(ret)) {
11512                 if (put_user_id(high2lowuid(ruid), arg1)
11513                     || put_user_id(high2lowuid(euid), arg2)
11514                     || put_user_id(high2lowuid(suid), arg3))
11515                     return -TARGET_EFAULT;
11516             }
11517         }
11518         return ret;
11519 #endif
11520 #ifdef TARGET_NR_getresgid
11521     case TARGET_NR_setresgid:
11522         return get_errno(sys_setresgid(low2highgid(arg1),
11523                                        low2highgid(arg2),
11524                                        low2highgid(arg3)));
11525 #endif
11526 #ifdef TARGET_NR_getresgid
11527     case TARGET_NR_getresgid:
11528         {
11529             gid_t rgid, egid, sgid;
11530             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11531             if (!is_error(ret)) {
11532                 if (put_user_id(high2lowgid(rgid), arg1)
11533                     || put_user_id(high2lowgid(egid), arg2)
11534                     || put_user_id(high2lowgid(sgid), arg3))
11535                     return -TARGET_EFAULT;
11536             }
11537         }
11538         return ret;
11539 #endif
11540 #ifdef TARGET_NR_chown
11541     case TARGET_NR_chown:
11542         if (!(p = lock_user_string(arg1)))
11543             return -TARGET_EFAULT;
11544         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11545         unlock_user(p, arg1, 0);
11546         return ret;
11547 #endif
11548     case TARGET_NR_setuid:
11549         return get_errno(sys_setuid(low2highuid(arg1)));
11550     case TARGET_NR_setgid:
11551         return get_errno(sys_setgid(low2highgid(arg1)));
11552     case TARGET_NR_setfsuid:
11553         return get_errno(setfsuid(arg1));
11554     case TARGET_NR_setfsgid:
11555         return get_errno(setfsgid(arg1));
11556 
11557 #ifdef TARGET_NR_lchown32
11558     case TARGET_NR_lchown32:
11559         if (!(p = lock_user_string(arg1)))
11560             return -TARGET_EFAULT;
11561         ret = get_errno(lchown(p, arg2, arg3));
11562         unlock_user(p, arg1, 0);
11563         return ret;
11564 #endif
11565 #ifdef TARGET_NR_getuid32
11566     case TARGET_NR_getuid32:
11567         return get_errno(getuid());
11568 #endif
11569 
11570 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11571    /* Alpha specific */
11572     case TARGET_NR_getxuid:
11573          {
11574             uid_t euid;
11575             euid=geteuid();
11576             cpu_env->ir[IR_A4]=euid;
11577          }
11578         return get_errno(getuid());
11579 #endif
11580 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11581    /* Alpha specific */
11582     case TARGET_NR_getxgid:
11583          {
11584             uid_t egid;
11585             egid=getegid();
11586             cpu_env->ir[IR_A4]=egid;
11587          }
11588         return get_errno(getgid());
11589 #endif
11590 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11591     /* Alpha specific */
11592     case TARGET_NR_osf_getsysinfo:
11593         ret = -TARGET_EOPNOTSUPP;
11594         switch (arg1) {
11595           case TARGET_GSI_IEEE_FP_CONTROL:
11596             {
11597                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11598                 uint64_t swcr = cpu_env->swcr;
11599 
11600                 swcr &= ~SWCR_STATUS_MASK;
11601                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11602 
11603                 if (put_user_u64 (swcr, arg2))
11604                         return -TARGET_EFAULT;
11605                 ret = 0;
11606             }
11607             break;
11608 
11609           /* case GSI_IEEE_STATE_AT_SIGNAL:
11610              -- Not implemented in linux kernel.
11611              case GSI_UACPROC:
11612              -- Retrieves current unaligned access state; not much used.
11613              case GSI_PROC_TYPE:
11614              -- Retrieves implver information; surely not used.
11615              case GSI_GET_HWRPB:
11616              -- Grabs a copy of the HWRPB; surely not used.
11617           */
11618         }
11619         return ret;
11620 #endif
11621 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11622     /* Alpha specific */
11623     case TARGET_NR_osf_setsysinfo:
11624         ret = -TARGET_EOPNOTSUPP;
11625         switch (arg1) {
11626           case TARGET_SSI_IEEE_FP_CONTROL:
11627             {
11628                 uint64_t swcr, fpcr;
11629 
11630                 if (get_user_u64 (swcr, arg2)) {
11631                     return -TARGET_EFAULT;
11632                 }
11633 
11634                 /*
11635                  * The kernel calls swcr_update_status to update the
11636                  * status bits from the fpcr at every point that it
11637                  * could be queried.  Therefore, we store the status
11638                  * bits only in FPCR.
11639                  */
11640                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11641 
11642                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11643                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11644                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11645                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11646                 ret = 0;
11647             }
11648             break;
11649 
11650           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11651             {
11652                 uint64_t exc, fpcr, fex;
11653 
11654                 if (get_user_u64(exc, arg2)) {
11655                     return -TARGET_EFAULT;
11656                 }
11657                 exc &= SWCR_STATUS_MASK;
11658                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11659 
11660                 /* Old exceptions are not signaled.  */
11661                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11662                 fex = exc & ~fex;
11663                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11664                 fex &= (cpu_env)->swcr;
11665 
11666                 /* Update the hardware fpcr.  */
11667                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11668                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11669 
11670                 if (fex) {
11671                     int si_code = TARGET_FPE_FLTUNK;
11672                     target_siginfo_t info;
11673 
11674                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11675                         si_code = TARGET_FPE_FLTUND;
11676                     }
11677                     if (fex & SWCR_TRAP_ENABLE_INE) {
11678                         si_code = TARGET_FPE_FLTRES;
11679                     }
11680                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11681                         si_code = TARGET_FPE_FLTUND;
11682                     }
11683                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11684                         si_code = TARGET_FPE_FLTOVF;
11685                     }
11686                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11687                         si_code = TARGET_FPE_FLTDIV;
11688                     }
11689                     if (fex & SWCR_TRAP_ENABLE_INV) {
11690                         si_code = TARGET_FPE_FLTINV;
11691                     }
11692 
11693                     info.si_signo = SIGFPE;
11694                     info.si_errno = 0;
11695                     info.si_code = si_code;
11696                     info._sifields._sigfault._addr = (cpu_env)->pc;
11697                     queue_signal(cpu_env, info.si_signo,
11698                                  QEMU_SI_FAULT, &info);
11699                 }
11700                 ret = 0;
11701             }
11702             break;
11703 
11704           /* case SSI_NVPAIRS:
11705              -- Used with SSIN_UACPROC to enable unaligned accesses.
11706              case SSI_IEEE_STATE_AT_SIGNAL:
11707              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11708              -- Not implemented in linux kernel
11709           */
11710         }
11711         return ret;
11712 #endif
11713 #ifdef TARGET_NR_osf_sigprocmask
11714     /* Alpha specific.  */
11715     case TARGET_NR_osf_sigprocmask:
11716         {
11717             abi_ulong mask;
11718             int how;
11719             sigset_t set, oldset;
11720 
11721             switch(arg1) {
11722             case TARGET_SIG_BLOCK:
11723                 how = SIG_BLOCK;
11724                 break;
11725             case TARGET_SIG_UNBLOCK:
11726                 how = SIG_UNBLOCK;
11727                 break;
11728             case TARGET_SIG_SETMASK:
11729                 how = SIG_SETMASK;
11730                 break;
11731             default:
11732                 return -TARGET_EINVAL;
11733             }
11734             mask = arg2;
11735             target_to_host_old_sigset(&set, &mask);
11736             ret = do_sigprocmask(how, &set, &oldset);
11737             if (!ret) {
11738                 host_to_target_old_sigset(&mask, &oldset);
11739                 ret = mask;
11740             }
11741         }
11742         return ret;
11743 #endif
11744 
11745 #ifdef TARGET_NR_getgid32
11746     case TARGET_NR_getgid32:
11747         return get_errno(getgid());
11748 #endif
11749 #ifdef TARGET_NR_geteuid32
11750     case TARGET_NR_geteuid32:
11751         return get_errno(geteuid());
11752 #endif
11753 #ifdef TARGET_NR_getegid32
11754     case TARGET_NR_getegid32:
11755         return get_errno(getegid());
11756 #endif
11757 #ifdef TARGET_NR_setreuid32
11758     case TARGET_NR_setreuid32:
11759         return get_errno(setreuid(arg1, arg2));
11760 #endif
11761 #ifdef TARGET_NR_setregid32
11762     case TARGET_NR_setregid32:
11763         return get_errno(setregid(arg1, arg2));
11764 #endif
11765 #ifdef TARGET_NR_getgroups32
11766     case TARGET_NR_getgroups32:
11767         {
11768             int gidsetsize = arg1;
11769             uint32_t *target_grouplist;
11770             gid_t *grouplist;
11771             int i;
11772 
11773             grouplist = alloca(gidsetsize * sizeof(gid_t));
11774             ret = get_errno(getgroups(gidsetsize, grouplist));
11775             if (gidsetsize == 0)
11776                 return ret;
11777             if (!is_error(ret)) {
11778                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11779                 if (!target_grouplist) {
11780                     return -TARGET_EFAULT;
11781                 }
11782                 for(i = 0;i < ret; i++)
11783                     target_grouplist[i] = tswap32(grouplist[i]);
11784                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11785             }
11786         }
11787         return ret;
11788 #endif
11789 #ifdef TARGET_NR_setgroups32
11790     case TARGET_NR_setgroups32:
11791         {
11792             int gidsetsize = arg1;
11793             uint32_t *target_grouplist;
11794             gid_t *grouplist;
11795             int i;
11796 
11797             grouplist = alloca(gidsetsize * sizeof(gid_t));
11798             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11799             if (!target_grouplist) {
11800                 return -TARGET_EFAULT;
11801             }
11802             for(i = 0;i < gidsetsize; i++)
11803                 grouplist[i] = tswap32(target_grouplist[i]);
11804             unlock_user(target_grouplist, arg2, 0);
11805             return get_errno(setgroups(gidsetsize, grouplist));
11806         }
11807 #endif
11808 #ifdef TARGET_NR_fchown32
11809     case TARGET_NR_fchown32:
11810         return get_errno(fchown(arg1, arg2, arg3));
11811 #endif
11812 #ifdef TARGET_NR_setresuid32
11813     case TARGET_NR_setresuid32:
11814         return get_errno(sys_setresuid(arg1, arg2, arg3));
11815 #endif
11816 #ifdef TARGET_NR_getresuid32
11817     case TARGET_NR_getresuid32:
11818         {
11819             uid_t ruid, euid, suid;
11820             ret = get_errno(getresuid(&ruid, &euid, &suid));
11821             if (!is_error(ret)) {
11822                 if (put_user_u32(ruid, arg1)
11823                     || put_user_u32(euid, arg2)
11824                     || put_user_u32(suid, arg3))
11825                     return -TARGET_EFAULT;
11826             }
11827         }
11828         return ret;
11829 #endif
11830 #ifdef TARGET_NR_setresgid32
11831     case TARGET_NR_setresgid32:
11832         return get_errno(sys_setresgid(arg1, arg2, arg3));
11833 #endif
11834 #ifdef TARGET_NR_getresgid32
11835     case TARGET_NR_getresgid32:
11836         {
11837             gid_t rgid, egid, sgid;
11838             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11839             if (!is_error(ret)) {
11840                 if (put_user_u32(rgid, arg1)
11841                     || put_user_u32(egid, arg2)
11842                     || put_user_u32(sgid, arg3))
11843                     return -TARGET_EFAULT;
11844             }
11845         }
11846         return ret;
11847 #endif
11848 #ifdef TARGET_NR_chown32
11849     case TARGET_NR_chown32:
11850         if (!(p = lock_user_string(arg1)))
11851             return -TARGET_EFAULT;
11852         ret = get_errno(chown(p, arg2, arg3));
11853         unlock_user(p, arg1, 0);
11854         return ret;
11855 #endif
11856 #ifdef TARGET_NR_setuid32
11857     case TARGET_NR_setuid32:
11858         return get_errno(sys_setuid(arg1));
11859 #endif
11860 #ifdef TARGET_NR_setgid32
11861     case TARGET_NR_setgid32:
11862         return get_errno(sys_setgid(arg1));
11863 #endif
11864 #ifdef TARGET_NR_setfsuid32
11865     case TARGET_NR_setfsuid32:
11866         return get_errno(setfsuid(arg1));
11867 #endif
11868 #ifdef TARGET_NR_setfsgid32
11869     case TARGET_NR_setfsgid32:
11870         return get_errno(setfsgid(arg1));
11871 #endif
11872 #ifdef TARGET_NR_mincore
11873     case TARGET_NR_mincore:
11874         {
11875             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11876             if (!a) {
11877                 return -TARGET_ENOMEM;
11878             }
11879             p = lock_user_string(arg3);
11880             if (!p) {
11881                 ret = -TARGET_EFAULT;
11882             } else {
11883                 ret = get_errno(mincore(a, arg2, p));
11884                 unlock_user(p, arg3, ret);
11885             }
11886             unlock_user(a, arg1, 0);
11887         }
11888         return ret;
11889 #endif
11890 #ifdef TARGET_NR_arm_fadvise64_64
11891     case TARGET_NR_arm_fadvise64_64:
11892         /* arm_fadvise64_64 looks like fadvise64_64 but
11893          * with different argument order: fd, advice, offset, len
11894          * rather than the usual fd, offset, len, advice.
11895          * Note that offset and len are both 64-bit so appear as
11896          * pairs of 32-bit registers.
11897          */
11898         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11899                             target_offset64(arg5, arg6), arg2);
11900         return -host_to_target_errno(ret);
11901 #endif
11902 
11903 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11904 
11905 #ifdef TARGET_NR_fadvise64_64
11906     case TARGET_NR_fadvise64_64:
11907 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11908         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11909         ret = arg2;
11910         arg2 = arg3;
11911         arg3 = arg4;
11912         arg4 = arg5;
11913         arg5 = arg6;
11914         arg6 = ret;
11915 #else
11916         /* 6 args: fd, offset (high, low), len (high, low), advice */
11917         if (regpairs_aligned(cpu_env, num)) {
11918             /* offset is in (3,4), len in (5,6) and advice in 7 */
11919             arg2 = arg3;
11920             arg3 = arg4;
11921             arg4 = arg5;
11922             arg5 = arg6;
11923             arg6 = arg7;
11924         }
11925 #endif
11926         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11927                             target_offset64(arg4, arg5), arg6);
11928         return -host_to_target_errno(ret);
11929 #endif
11930 
11931 #ifdef TARGET_NR_fadvise64
11932     case TARGET_NR_fadvise64:
11933         /* 5 args: fd, offset (high, low), len, advice */
11934         if (regpairs_aligned(cpu_env, num)) {
11935             /* offset is in (3,4), len in 5 and advice in 6 */
11936             arg2 = arg3;
11937             arg3 = arg4;
11938             arg4 = arg5;
11939             arg5 = arg6;
11940         }
11941         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11942         return -host_to_target_errno(ret);
11943 #endif
11944 
11945 #else /* not a 32-bit ABI */
11946 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11947 #ifdef TARGET_NR_fadvise64_64
11948     case TARGET_NR_fadvise64_64:
11949 #endif
11950 #ifdef TARGET_NR_fadvise64
11951     case TARGET_NR_fadvise64:
11952 #endif
11953 #ifdef TARGET_S390X
11954         switch (arg4) {
11955         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11956         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11957         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11958         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11959         default: break;
11960         }
11961 #endif
11962         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11963 #endif
11964 #endif /* end of 64-bit ABI fadvise handling */
11965 
11966 #ifdef TARGET_NR_madvise
11967     case TARGET_NR_madvise:
11968         return target_madvise(arg1, arg2, arg3);
11969 #endif
11970 #ifdef TARGET_NR_fcntl64
11971     case TARGET_NR_fcntl64:
11972     {
11973         int cmd;
11974         struct flock64 fl;
11975         from_flock64_fn *copyfrom = copy_from_user_flock64;
11976         to_flock64_fn *copyto = copy_to_user_flock64;
11977 
11978 #ifdef TARGET_ARM
11979         if (!cpu_env->eabi) {
11980             copyfrom = copy_from_user_oabi_flock64;
11981             copyto = copy_to_user_oabi_flock64;
11982         }
11983 #endif
11984 
11985         cmd = target_to_host_fcntl_cmd(arg2);
11986         if (cmd == -TARGET_EINVAL) {
11987             return cmd;
11988         }
11989 
11990         switch(arg2) {
11991         case TARGET_F_GETLK64:
11992             ret = copyfrom(&fl, arg3);
11993             if (ret) {
11994                 break;
11995             }
11996             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11997             if (ret == 0) {
11998                 ret = copyto(arg3, &fl);
11999             }
12000 	    break;
12001 
12002         case TARGET_F_SETLK64:
12003         case TARGET_F_SETLKW64:
12004             ret = copyfrom(&fl, arg3);
12005             if (ret) {
12006                 break;
12007             }
12008             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12009 	    break;
12010         default:
12011             ret = do_fcntl(arg1, arg2, arg3);
12012             break;
12013         }
12014         return ret;
12015     }
12016 #endif
12017 #ifdef TARGET_NR_cacheflush
12018     case TARGET_NR_cacheflush:
12019         /* self-modifying code is handled automatically, so nothing needed */
12020         return 0;
12021 #endif
12022 #ifdef TARGET_NR_getpagesize
12023     case TARGET_NR_getpagesize:
12024         return TARGET_PAGE_SIZE;
12025 #endif
12026     case TARGET_NR_gettid:
12027         return get_errno(sys_gettid());
12028 #ifdef TARGET_NR_readahead
12029     case TARGET_NR_readahead:
12030 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12031         if (regpairs_aligned(cpu_env, num)) {
12032             arg2 = arg3;
12033             arg3 = arg4;
12034             arg4 = arg5;
12035         }
12036         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12037 #else
12038         ret = get_errno(readahead(arg1, arg2, arg3));
12039 #endif
12040         return ret;
12041 #endif
12042 #ifdef CONFIG_ATTR
12043 #ifdef TARGET_NR_setxattr
12044     case TARGET_NR_listxattr:
12045     case TARGET_NR_llistxattr:
12046     {
12047         void *p, *b = 0;
12048         if (arg2) {
12049             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12050             if (!b) {
12051                 return -TARGET_EFAULT;
12052             }
12053         }
12054         p = lock_user_string(arg1);
12055         if (p) {
12056             if (num == TARGET_NR_listxattr) {
12057                 ret = get_errno(listxattr(p, b, arg3));
12058             } else {
12059                 ret = get_errno(llistxattr(p, b, arg3));
12060             }
12061         } else {
12062             ret = -TARGET_EFAULT;
12063         }
12064         unlock_user(p, arg1, 0);
12065         unlock_user(b, arg2, arg3);
12066         return ret;
12067     }
12068     case TARGET_NR_flistxattr:
12069     {
12070         void *b = 0;
12071         if (arg2) {
12072             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12073             if (!b) {
12074                 return -TARGET_EFAULT;
12075             }
12076         }
12077         ret = get_errno(flistxattr(arg1, b, arg3));
12078         unlock_user(b, arg2, arg3);
12079         return ret;
12080     }
12081     case TARGET_NR_setxattr:
12082     case TARGET_NR_lsetxattr:
12083         {
12084             void *p, *n, *v = 0;
12085             if (arg3) {
12086                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12087                 if (!v) {
12088                     return -TARGET_EFAULT;
12089                 }
12090             }
12091             p = lock_user_string(arg1);
12092             n = lock_user_string(arg2);
12093             if (p && n) {
12094                 if (num == TARGET_NR_setxattr) {
12095                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12096                 } else {
12097                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12098                 }
12099             } else {
12100                 ret = -TARGET_EFAULT;
12101             }
12102             unlock_user(p, arg1, 0);
12103             unlock_user(n, arg2, 0);
12104             unlock_user(v, arg3, 0);
12105         }
12106         return ret;
12107     case TARGET_NR_fsetxattr:
12108         {
12109             void *n, *v = 0;
12110             if (arg3) {
12111                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12112                 if (!v) {
12113                     return -TARGET_EFAULT;
12114                 }
12115             }
12116             n = lock_user_string(arg2);
12117             if (n) {
12118                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12119             } else {
12120                 ret = -TARGET_EFAULT;
12121             }
12122             unlock_user(n, arg2, 0);
12123             unlock_user(v, arg3, 0);
12124         }
12125         return ret;
12126     case TARGET_NR_getxattr:
12127     case TARGET_NR_lgetxattr:
12128         {
12129             void *p, *n, *v = 0;
12130             if (arg3) {
12131                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12132                 if (!v) {
12133                     return -TARGET_EFAULT;
12134                 }
12135             }
12136             p = lock_user_string(arg1);
12137             n = lock_user_string(arg2);
12138             if (p && n) {
12139                 if (num == TARGET_NR_getxattr) {
12140                     ret = get_errno(getxattr(p, n, v, arg4));
12141                 } else {
12142                     ret = get_errno(lgetxattr(p, n, v, arg4));
12143                 }
12144             } else {
12145                 ret = -TARGET_EFAULT;
12146             }
12147             unlock_user(p, arg1, 0);
12148             unlock_user(n, arg2, 0);
12149             unlock_user(v, arg3, arg4);
12150         }
12151         return ret;
12152     case TARGET_NR_fgetxattr:
12153         {
12154             void *n, *v = 0;
12155             if (arg3) {
12156                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12157                 if (!v) {
12158                     return -TARGET_EFAULT;
12159                 }
12160             }
12161             n = lock_user_string(arg2);
12162             if (n) {
12163                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12164             } else {
12165                 ret = -TARGET_EFAULT;
12166             }
12167             unlock_user(n, arg2, 0);
12168             unlock_user(v, arg3, arg4);
12169         }
12170         return ret;
12171     case TARGET_NR_removexattr:
12172     case TARGET_NR_lremovexattr:
12173         {
12174             void *p, *n;
12175             p = lock_user_string(arg1);
12176             n = lock_user_string(arg2);
12177             if (p && n) {
12178                 if (num == TARGET_NR_removexattr) {
12179                     ret = get_errno(removexattr(p, n));
12180                 } else {
12181                     ret = get_errno(lremovexattr(p, n));
12182                 }
12183             } else {
12184                 ret = -TARGET_EFAULT;
12185             }
12186             unlock_user(p, arg1, 0);
12187             unlock_user(n, arg2, 0);
12188         }
12189         return ret;
12190     case TARGET_NR_fremovexattr:
12191         {
12192             void *n;
12193             n = lock_user_string(arg2);
12194             if (n) {
12195                 ret = get_errno(fremovexattr(arg1, n));
12196             } else {
12197                 ret = -TARGET_EFAULT;
12198             }
12199             unlock_user(n, arg2, 0);
12200         }
12201         return ret;
12202 #endif
12203 #endif /* CONFIG_ATTR */
12204 #ifdef TARGET_NR_set_thread_area
12205     case TARGET_NR_set_thread_area:
12206 #if defined(TARGET_MIPS)
12207       cpu_env->active_tc.CP0_UserLocal = arg1;
12208       return 0;
12209 #elif defined(TARGET_CRIS)
12210       if (arg1 & 0xff)
12211           ret = -TARGET_EINVAL;
12212       else {
12213           cpu_env->pregs[PR_PID] = arg1;
12214           ret = 0;
12215       }
12216       return ret;
12217 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12218       return do_set_thread_area(cpu_env, arg1);
12219 #elif defined(TARGET_M68K)
12220       {
12221           TaskState *ts = cpu->opaque;
12222           ts->tp_value = arg1;
12223           return 0;
12224       }
12225 #else
12226       return -TARGET_ENOSYS;
12227 #endif
12228 #endif
12229 #ifdef TARGET_NR_get_thread_area
12230     case TARGET_NR_get_thread_area:
12231 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12232         return do_get_thread_area(cpu_env, arg1);
12233 #elif defined(TARGET_M68K)
12234         {
12235             TaskState *ts = cpu->opaque;
12236             return ts->tp_value;
12237         }
12238 #else
12239         return -TARGET_ENOSYS;
12240 #endif
12241 #endif
12242 #ifdef TARGET_NR_getdomainname
12243     case TARGET_NR_getdomainname:
12244         return -TARGET_ENOSYS;
12245 #endif
12246 
12247 #ifdef TARGET_NR_clock_settime
12248     case TARGET_NR_clock_settime:
12249     {
12250         struct timespec ts;
12251 
12252         ret = target_to_host_timespec(&ts, arg2);
12253         if (!is_error(ret)) {
12254             ret = get_errno(clock_settime(arg1, &ts));
12255         }
12256         return ret;
12257     }
12258 #endif
12259 #ifdef TARGET_NR_clock_settime64
12260     case TARGET_NR_clock_settime64:
12261     {
12262         struct timespec ts;
12263 
12264         ret = target_to_host_timespec64(&ts, arg2);
12265         if (!is_error(ret)) {
12266             ret = get_errno(clock_settime(arg1, &ts));
12267         }
12268         return ret;
12269     }
12270 #endif
12271 #ifdef TARGET_NR_clock_gettime
12272     case TARGET_NR_clock_gettime:
12273     {
12274         struct timespec ts;
12275         ret = get_errno(clock_gettime(arg1, &ts));
12276         if (!is_error(ret)) {
12277             ret = host_to_target_timespec(arg2, &ts);
12278         }
12279         return ret;
12280     }
12281 #endif
12282 #ifdef TARGET_NR_clock_gettime64
12283     case TARGET_NR_clock_gettime64:
12284     {
12285         struct timespec ts;
12286         ret = get_errno(clock_gettime(arg1, &ts));
12287         if (!is_error(ret)) {
12288             ret = host_to_target_timespec64(arg2, &ts);
12289         }
12290         return ret;
12291     }
12292 #endif
12293 #ifdef TARGET_NR_clock_getres
12294     case TARGET_NR_clock_getres:
12295     {
12296         struct timespec ts;
12297         ret = get_errno(clock_getres(arg1, &ts));
12298         if (!is_error(ret)) {
12299             host_to_target_timespec(arg2, &ts);
12300         }
12301         return ret;
12302     }
12303 #endif
12304 #ifdef TARGET_NR_clock_getres_time64
12305     case TARGET_NR_clock_getres_time64:
12306     {
12307         struct timespec ts;
12308         ret = get_errno(clock_getres(arg1, &ts));
12309         if (!is_error(ret)) {
12310             host_to_target_timespec64(arg2, &ts);
12311         }
12312         return ret;
12313     }
12314 #endif
12315 #ifdef TARGET_NR_clock_nanosleep
12316     case TARGET_NR_clock_nanosleep:
12317     {
12318         struct timespec ts;
12319         if (target_to_host_timespec(&ts, arg3)) {
12320             return -TARGET_EFAULT;
12321         }
12322         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12323                                              &ts, arg4 ? &ts : NULL));
12324         /*
12325          * if the call is interrupted by a signal handler, it fails
12326          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12327          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12328          */
12329         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12330             host_to_target_timespec(arg4, &ts)) {
12331               return -TARGET_EFAULT;
12332         }
12333 
12334         return ret;
12335     }
12336 #endif
12337 #ifdef TARGET_NR_clock_nanosleep_time64
12338     case TARGET_NR_clock_nanosleep_time64:
12339     {
12340         struct timespec ts;
12341 
12342         if (target_to_host_timespec64(&ts, arg3)) {
12343             return -TARGET_EFAULT;
12344         }
12345 
12346         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12347                                              &ts, arg4 ? &ts : NULL));
12348 
12349         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12350             host_to_target_timespec64(arg4, &ts)) {
12351             return -TARGET_EFAULT;
12352         }
12353         return ret;
12354     }
12355 #endif
12356 
12357 #if defined(TARGET_NR_set_tid_address)
12358     case TARGET_NR_set_tid_address:
12359     {
12360         TaskState *ts = cpu->opaque;
12361         ts->child_tidptr = arg1;
12362         /* do not call host set_tid_address() syscall, instead return tid() */
12363         return get_errno(sys_gettid());
12364     }
12365 #endif
12366 
12367     case TARGET_NR_tkill:
12368         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12369 
12370     case TARGET_NR_tgkill:
12371         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12372                          target_to_host_signal(arg3)));
12373 
12374 #ifdef TARGET_NR_set_robust_list
12375     case TARGET_NR_set_robust_list:
12376     case TARGET_NR_get_robust_list:
12377         /* The ABI for supporting robust futexes has userspace pass
12378          * the kernel a pointer to a linked list which is updated by
12379          * userspace after the syscall; the list is walked by the kernel
12380          * when the thread exits. Since the linked list in QEMU guest
12381          * memory isn't a valid linked list for the host and we have
12382          * no way to reliably intercept the thread-death event, we can't
12383          * support these. Silently return ENOSYS so that guest userspace
12384          * falls back to a non-robust futex implementation (which should
12385          * be OK except in the corner case of the guest crashing while
12386          * holding a mutex that is shared with another process via
12387          * shared memory).
12388          */
12389         return -TARGET_ENOSYS;
12390 #endif
12391 
12392 #if defined(TARGET_NR_utimensat)
12393     case TARGET_NR_utimensat:
12394         {
12395             struct timespec *tsp, ts[2];
12396             if (!arg3) {
12397                 tsp = NULL;
12398             } else {
12399                 if (target_to_host_timespec(ts, arg3)) {
12400                     return -TARGET_EFAULT;
12401                 }
12402                 if (target_to_host_timespec(ts + 1, arg3 +
12403                                             sizeof(struct target_timespec))) {
12404                     return -TARGET_EFAULT;
12405                 }
12406                 tsp = ts;
12407             }
12408             if (!arg2)
12409                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12410             else {
12411                 if (!(p = lock_user_string(arg2))) {
12412                     return -TARGET_EFAULT;
12413                 }
12414                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12415                 unlock_user(p, arg2, 0);
12416             }
12417         }
12418         return ret;
12419 #endif
12420 #ifdef TARGET_NR_utimensat_time64
12421     case TARGET_NR_utimensat_time64:
12422         {
12423             struct timespec *tsp, ts[2];
12424             if (!arg3) {
12425                 tsp = NULL;
12426             } else {
12427                 if (target_to_host_timespec64(ts, arg3)) {
12428                     return -TARGET_EFAULT;
12429                 }
12430                 if (target_to_host_timespec64(ts + 1, arg3 +
12431                                      sizeof(struct target__kernel_timespec))) {
12432                     return -TARGET_EFAULT;
12433                 }
12434                 tsp = ts;
12435             }
12436             if (!arg2)
12437                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12438             else {
12439                 p = lock_user_string(arg2);
12440                 if (!p) {
12441                     return -TARGET_EFAULT;
12442                 }
12443                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12444                 unlock_user(p, arg2, 0);
12445             }
12446         }
12447         return ret;
12448 #endif
12449 #ifdef TARGET_NR_futex
12450     case TARGET_NR_futex:
12451         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12452 #endif
12453 #ifdef TARGET_NR_futex_time64
12454     case TARGET_NR_futex_time64:
12455         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12456 #endif
12457 #ifdef CONFIG_INOTIFY
12458 #if defined(TARGET_NR_inotify_init)
12459     case TARGET_NR_inotify_init:
12460         ret = get_errno(inotify_init());
12461         if (ret >= 0) {
12462             fd_trans_register(ret, &target_inotify_trans);
12463         }
12464         return ret;
12465 #endif
12466 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12467     case TARGET_NR_inotify_init1:
12468         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12469                                           fcntl_flags_tbl)));
12470         if (ret >= 0) {
12471             fd_trans_register(ret, &target_inotify_trans);
12472         }
12473         return ret;
12474 #endif
12475 #if defined(TARGET_NR_inotify_add_watch)
12476     case TARGET_NR_inotify_add_watch:
12477         p = lock_user_string(arg2);
12478         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12479         unlock_user(p, arg2, 0);
12480         return ret;
12481 #endif
12482 #if defined(TARGET_NR_inotify_rm_watch)
12483     case TARGET_NR_inotify_rm_watch:
12484         return get_errno(inotify_rm_watch(arg1, arg2));
12485 #endif
12486 #endif
12487 
12488 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12489     case TARGET_NR_mq_open:
12490         {
12491             struct mq_attr posix_mq_attr;
12492             struct mq_attr *pposix_mq_attr;
12493             int host_flags;
12494 
12495             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12496             pposix_mq_attr = NULL;
12497             if (arg4) {
12498                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12499                     return -TARGET_EFAULT;
12500                 }
12501                 pposix_mq_attr = &posix_mq_attr;
12502             }
12503             p = lock_user_string(arg1 - 1);
12504             if (!p) {
12505                 return -TARGET_EFAULT;
12506             }
12507             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12508             unlock_user (p, arg1, 0);
12509         }
12510         return ret;
12511 
12512     case TARGET_NR_mq_unlink:
12513         p = lock_user_string(arg1 - 1);
12514         if (!p) {
12515             return -TARGET_EFAULT;
12516         }
12517         ret = get_errno(mq_unlink(p));
12518         unlock_user (p, arg1, 0);
12519         return ret;
12520 
12521 #ifdef TARGET_NR_mq_timedsend
12522     case TARGET_NR_mq_timedsend:
12523         {
12524             struct timespec ts;
12525 
12526             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12527             if (arg5 != 0) {
12528                 if (target_to_host_timespec(&ts, arg5)) {
12529                     return -TARGET_EFAULT;
12530                 }
12531                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12532                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12533                     return -TARGET_EFAULT;
12534                 }
12535             } else {
12536                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12537             }
12538             unlock_user (p, arg2, arg3);
12539         }
12540         return ret;
12541 #endif
12542 #ifdef TARGET_NR_mq_timedsend_time64
12543     case TARGET_NR_mq_timedsend_time64:
12544         {
12545             struct timespec ts;
12546 
12547             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12548             if (arg5 != 0) {
12549                 if (target_to_host_timespec64(&ts, arg5)) {
12550                     return -TARGET_EFAULT;
12551                 }
12552                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12553                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12554                     return -TARGET_EFAULT;
12555                 }
12556             } else {
12557                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12558             }
12559             unlock_user(p, arg2, arg3);
12560         }
12561         return ret;
12562 #endif
12563 
12564 #ifdef TARGET_NR_mq_timedreceive
12565     case TARGET_NR_mq_timedreceive:
12566         {
12567             struct timespec ts;
12568             unsigned int prio;
12569 
12570             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12571             if (arg5 != 0) {
12572                 if (target_to_host_timespec(&ts, arg5)) {
12573                     return -TARGET_EFAULT;
12574                 }
12575                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12576                                                      &prio, &ts));
12577                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12578                     return -TARGET_EFAULT;
12579                 }
12580             } else {
12581                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12582                                                      &prio, NULL));
12583             }
12584             unlock_user (p, arg2, arg3);
12585             if (arg4 != 0)
12586                 put_user_u32(prio, arg4);
12587         }
12588         return ret;
12589 #endif
12590 #ifdef TARGET_NR_mq_timedreceive_time64
12591     case TARGET_NR_mq_timedreceive_time64:
12592         {
12593             struct timespec ts;
12594             unsigned int prio;
12595 
12596             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12597             if (arg5 != 0) {
12598                 if (target_to_host_timespec64(&ts, arg5)) {
12599                     return -TARGET_EFAULT;
12600                 }
12601                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12602                                                      &prio, &ts));
12603                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12604                     return -TARGET_EFAULT;
12605                 }
12606             } else {
12607                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12608                                                      &prio, NULL));
12609             }
12610             unlock_user(p, arg2, arg3);
12611             if (arg4 != 0) {
12612                 put_user_u32(prio, arg4);
12613             }
12614         }
12615         return ret;
12616 #endif
12617 
12618     /* Not implemented for now... */
12619 /*     case TARGET_NR_mq_notify: */
12620 /*         break; */
12621 
12622     case TARGET_NR_mq_getsetattr:
12623         {
12624             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12625             ret = 0;
12626             if (arg2 != 0) {
12627                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12628                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12629                                            &posix_mq_attr_out));
12630             } else if (arg3 != 0) {
12631                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12632             }
12633             if (ret == 0 && arg3 != 0) {
12634                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12635             }
12636         }
12637         return ret;
12638 #endif
12639 
12640 #ifdef CONFIG_SPLICE
12641 #ifdef TARGET_NR_tee
12642     case TARGET_NR_tee:
12643         {
12644             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12645         }
12646         return ret;
12647 #endif
12648 #ifdef TARGET_NR_splice
12649     case TARGET_NR_splice:
12650         {
12651             loff_t loff_in, loff_out;
12652             loff_t *ploff_in = NULL, *ploff_out = NULL;
12653             if (arg2) {
12654                 if (get_user_u64(loff_in, arg2)) {
12655                     return -TARGET_EFAULT;
12656                 }
12657                 ploff_in = &loff_in;
12658             }
12659             if (arg4) {
12660                 if (get_user_u64(loff_out, arg4)) {
12661                     return -TARGET_EFAULT;
12662                 }
12663                 ploff_out = &loff_out;
12664             }
12665             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12666             if (arg2) {
12667                 if (put_user_u64(loff_in, arg2)) {
12668                     return -TARGET_EFAULT;
12669                 }
12670             }
12671             if (arg4) {
12672                 if (put_user_u64(loff_out, arg4)) {
12673                     return -TARGET_EFAULT;
12674                 }
12675             }
12676         }
12677         return ret;
12678 #endif
12679 #ifdef TARGET_NR_vmsplice
12680 	case TARGET_NR_vmsplice:
12681         {
12682             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12683             if (vec != NULL) {
12684                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12685                 unlock_iovec(vec, arg2, arg3, 0);
12686             } else {
12687                 ret = -host_to_target_errno(errno);
12688             }
12689         }
12690         return ret;
12691 #endif
12692 #endif /* CONFIG_SPLICE */
12693 #ifdef CONFIG_EVENTFD
12694 #if defined(TARGET_NR_eventfd)
12695     case TARGET_NR_eventfd:
12696         ret = get_errno(eventfd(arg1, 0));
12697         if (ret >= 0) {
12698             fd_trans_register(ret, &target_eventfd_trans);
12699         }
12700         return ret;
12701 #endif
12702 #if defined(TARGET_NR_eventfd2)
12703     case TARGET_NR_eventfd2:
12704     {
12705         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12706         if (arg2 & TARGET_O_NONBLOCK) {
12707             host_flags |= O_NONBLOCK;
12708         }
12709         if (arg2 & TARGET_O_CLOEXEC) {
12710             host_flags |= O_CLOEXEC;
12711         }
12712         ret = get_errno(eventfd(arg1, host_flags));
12713         if (ret >= 0) {
12714             fd_trans_register(ret, &target_eventfd_trans);
12715         }
12716         return ret;
12717     }
12718 #endif
12719 #endif /* CONFIG_EVENTFD  */
12720 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12721     case TARGET_NR_fallocate:
12722 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12723         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12724                                   target_offset64(arg5, arg6)));
12725 #else
12726         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12727 #endif
12728         return ret;
12729 #endif
12730 #if defined(CONFIG_SYNC_FILE_RANGE)
12731 #if defined(TARGET_NR_sync_file_range)
12732     case TARGET_NR_sync_file_range:
12733 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12734 #if defined(TARGET_MIPS)
12735         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12736                                         target_offset64(arg5, arg6), arg7));
12737 #else
12738         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12739                                         target_offset64(arg4, arg5), arg6));
12740 #endif /* !TARGET_MIPS */
12741 #else
12742         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12743 #endif
12744         return ret;
12745 #endif
12746 #if defined(TARGET_NR_sync_file_range2) || \
12747     defined(TARGET_NR_arm_sync_file_range)
12748 #if defined(TARGET_NR_sync_file_range2)
12749     case TARGET_NR_sync_file_range2:
12750 #endif
12751 #if defined(TARGET_NR_arm_sync_file_range)
12752     case TARGET_NR_arm_sync_file_range:
12753 #endif
12754         /* This is like sync_file_range but the arguments are reordered */
12755 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12756         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12757                                         target_offset64(arg5, arg6), arg2));
12758 #else
12759         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12760 #endif
12761         return ret;
12762 #endif
12763 #endif
12764 #if defined(TARGET_NR_signalfd4)
12765     case TARGET_NR_signalfd4:
12766         return do_signalfd4(arg1, arg2, arg4);
12767 #endif
12768 #if defined(TARGET_NR_signalfd)
12769     case TARGET_NR_signalfd:
12770         return do_signalfd4(arg1, arg2, 0);
12771 #endif
12772 #if defined(CONFIG_EPOLL)
12773 #if defined(TARGET_NR_epoll_create)
12774     case TARGET_NR_epoll_create:
12775         return get_errno(epoll_create(arg1));
12776 #endif
12777 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12778     case TARGET_NR_epoll_create1:
12779         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12780 #endif
12781 #if defined(TARGET_NR_epoll_ctl)
12782     case TARGET_NR_epoll_ctl:
12783     {
12784         struct epoll_event ep;
12785         struct epoll_event *epp = 0;
12786         if (arg4) {
12787             if (arg2 != EPOLL_CTL_DEL) {
12788                 struct target_epoll_event *target_ep;
12789                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12790                     return -TARGET_EFAULT;
12791                 }
12792                 ep.events = tswap32(target_ep->events);
12793                 /*
12794                  * The epoll_data_t union is just opaque data to the kernel,
12795                  * so we transfer all 64 bits across and need not worry what
12796                  * actual data type it is.
12797                  */
12798                 ep.data.u64 = tswap64(target_ep->data.u64);
12799                 unlock_user_struct(target_ep, arg4, 0);
12800             }
12801             /*
12802              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12803              * non-null pointer, even though this argument is ignored.
12804              *
12805              */
12806             epp = &ep;
12807         }
12808         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12809     }
12810 #endif
12811 
12812 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12813 #if defined(TARGET_NR_epoll_wait)
12814     case TARGET_NR_epoll_wait:
12815 #endif
12816 #if defined(TARGET_NR_epoll_pwait)
12817     case TARGET_NR_epoll_pwait:
12818 #endif
12819     {
12820         struct target_epoll_event *target_ep;
12821         struct epoll_event *ep;
12822         int epfd = arg1;
12823         int maxevents = arg3;
12824         int timeout = arg4;
12825 
12826         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12827             return -TARGET_EINVAL;
12828         }
12829 
12830         target_ep = lock_user(VERIFY_WRITE, arg2,
12831                               maxevents * sizeof(struct target_epoll_event), 1);
12832         if (!target_ep) {
12833             return -TARGET_EFAULT;
12834         }
12835 
12836         ep = g_try_new(struct epoll_event, maxevents);
12837         if (!ep) {
12838             unlock_user(target_ep, arg2, 0);
12839             return -TARGET_ENOMEM;
12840         }
12841 
12842         switch (num) {
12843 #if defined(TARGET_NR_epoll_pwait)
12844         case TARGET_NR_epoll_pwait:
12845         {
12846             sigset_t *set = NULL;
12847 
12848             if (arg5) {
12849                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12850                 if (ret != 0) {
12851                     break;
12852                 }
12853             }
12854 
12855             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12856                                              set, SIGSET_T_SIZE));
12857 
12858             if (set) {
12859                 finish_sigsuspend_mask(ret);
12860             }
12861             break;
12862         }
12863 #endif
12864 #if defined(TARGET_NR_epoll_wait)
12865         case TARGET_NR_epoll_wait:
12866             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12867                                              NULL, 0));
12868             break;
12869 #endif
12870         default:
12871             ret = -TARGET_ENOSYS;
12872         }
12873         if (!is_error(ret)) {
12874             int i;
12875             for (i = 0; i < ret; i++) {
12876                 target_ep[i].events = tswap32(ep[i].events);
12877                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12878             }
12879             unlock_user(target_ep, arg2,
12880                         ret * sizeof(struct target_epoll_event));
12881         } else {
12882             unlock_user(target_ep, arg2, 0);
12883         }
12884         g_free(ep);
12885         return ret;
12886     }
12887 #endif
12888 #endif
12889 #ifdef TARGET_NR_prlimit64
12890     case TARGET_NR_prlimit64:
12891     {
12892         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12893         struct target_rlimit64 *target_rnew, *target_rold;
12894         struct host_rlimit64 rnew, rold, *rnewp = 0;
12895         int resource = target_to_host_resource(arg2);
12896 
12897         if (arg3 && (resource != RLIMIT_AS &&
12898                      resource != RLIMIT_DATA &&
12899                      resource != RLIMIT_STACK)) {
12900             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12901                 return -TARGET_EFAULT;
12902             }
12903             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12904             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12905             unlock_user_struct(target_rnew, arg3, 0);
12906             rnewp = &rnew;
12907         }
12908 
12909         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12910         if (!is_error(ret) && arg4) {
12911             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12912                 return -TARGET_EFAULT;
12913             }
12914             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12915             target_rold->rlim_max = tswap64(rold.rlim_max);
12916             unlock_user_struct(target_rold, arg4, 1);
12917         }
12918         return ret;
12919     }
12920 #endif
12921 #ifdef TARGET_NR_gethostname
12922     case TARGET_NR_gethostname:
12923     {
12924         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12925         if (name) {
12926             ret = get_errno(gethostname(name, arg2));
12927             unlock_user(name, arg1, arg2);
12928         } else {
12929             ret = -TARGET_EFAULT;
12930         }
12931         return ret;
12932     }
12933 #endif
12934 #ifdef TARGET_NR_atomic_cmpxchg_32
12935     case TARGET_NR_atomic_cmpxchg_32:
12936     {
12937         /* should use start_exclusive from main.c */
12938         abi_ulong mem_value;
12939         if (get_user_u32(mem_value, arg6)) {
12940             target_siginfo_t info;
12941             info.si_signo = SIGSEGV;
12942             info.si_errno = 0;
12943             info.si_code = TARGET_SEGV_MAPERR;
12944             info._sifields._sigfault._addr = arg6;
12945             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12946             ret = 0xdeadbeef;
12947 
12948         }
12949         if (mem_value == arg2)
12950             put_user_u32(arg1, arg6);
12951         return mem_value;
12952     }
12953 #endif
12954 #ifdef TARGET_NR_atomic_barrier
12955     case TARGET_NR_atomic_barrier:
12956         /* Like the kernel implementation and the
12957            qemu arm barrier, no-op this? */
12958         return 0;
12959 #endif
12960 
12961 #ifdef TARGET_NR_timer_create
12962     case TARGET_NR_timer_create:
12963     {
12964         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12965 
12966         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12967 
12968         int clkid = arg1;
12969         int timer_index = next_free_host_timer();
12970 
12971         if (timer_index < 0) {
12972             ret = -TARGET_EAGAIN;
12973         } else {
12974             timer_t *phtimer = g_posix_timers  + timer_index;
12975 
12976             if (arg2) {
12977                 phost_sevp = &host_sevp;
12978                 ret = target_to_host_sigevent(phost_sevp, arg2);
12979                 if (ret != 0) {
12980                     free_host_timer_slot(timer_index);
12981                     return ret;
12982                 }
12983             }
12984 
12985             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12986             if (ret) {
12987                 free_host_timer_slot(timer_index);
12988             } else {
12989                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12990                     timer_delete(*phtimer);
12991                     free_host_timer_slot(timer_index);
12992                     return -TARGET_EFAULT;
12993                 }
12994             }
12995         }
12996         return ret;
12997     }
12998 #endif
12999 
13000 #ifdef TARGET_NR_timer_settime
13001     case TARGET_NR_timer_settime:
13002     {
13003         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13004          * struct itimerspec * old_value */
13005         target_timer_t timerid = get_timer_id(arg1);
13006 
13007         if (timerid < 0) {
13008             ret = timerid;
13009         } else if (arg3 == 0) {
13010             ret = -TARGET_EINVAL;
13011         } else {
13012             timer_t htimer = g_posix_timers[timerid];
13013             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13014 
13015             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13016                 return -TARGET_EFAULT;
13017             }
13018             ret = get_errno(
13019                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13020             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13021                 return -TARGET_EFAULT;
13022             }
13023         }
13024         return ret;
13025     }
13026 #endif
13027 
13028 #ifdef TARGET_NR_timer_settime64
13029     case TARGET_NR_timer_settime64:
13030     {
13031         target_timer_t timerid = get_timer_id(arg1);
13032 
13033         if (timerid < 0) {
13034             ret = timerid;
13035         } else if (arg3 == 0) {
13036             ret = -TARGET_EINVAL;
13037         } else {
13038             timer_t htimer = g_posix_timers[timerid];
13039             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13040 
13041             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13042                 return -TARGET_EFAULT;
13043             }
13044             ret = get_errno(
13045                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13046             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13047                 return -TARGET_EFAULT;
13048             }
13049         }
13050         return ret;
13051     }
13052 #endif
13053 
13054 #ifdef TARGET_NR_timer_gettime
13055     case TARGET_NR_timer_gettime:
13056     {
13057         /* args: timer_t timerid, struct itimerspec *curr_value */
13058         target_timer_t timerid = get_timer_id(arg1);
13059 
13060         if (timerid < 0) {
13061             ret = timerid;
13062         } else if (!arg2) {
13063             ret = -TARGET_EFAULT;
13064         } else {
13065             timer_t htimer = g_posix_timers[timerid];
13066             struct itimerspec hspec;
13067             ret = get_errno(timer_gettime(htimer, &hspec));
13068 
13069             if (host_to_target_itimerspec(arg2, &hspec)) {
13070                 ret = -TARGET_EFAULT;
13071             }
13072         }
13073         return ret;
13074     }
13075 #endif
13076 
13077 #ifdef TARGET_NR_timer_gettime64
13078     case TARGET_NR_timer_gettime64:
13079     {
13080         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13081         target_timer_t timerid = get_timer_id(arg1);
13082 
13083         if (timerid < 0) {
13084             ret = timerid;
13085         } else if (!arg2) {
13086             ret = -TARGET_EFAULT;
13087         } else {
13088             timer_t htimer = g_posix_timers[timerid];
13089             struct itimerspec hspec;
13090             ret = get_errno(timer_gettime(htimer, &hspec));
13091 
13092             if (host_to_target_itimerspec64(arg2, &hspec)) {
13093                 ret = -TARGET_EFAULT;
13094             }
13095         }
13096         return ret;
13097     }
13098 #endif
13099 
13100 #ifdef TARGET_NR_timer_getoverrun
13101     case TARGET_NR_timer_getoverrun:
13102     {
13103         /* args: timer_t timerid */
13104         target_timer_t timerid = get_timer_id(arg1);
13105 
13106         if (timerid < 0) {
13107             ret = timerid;
13108         } else {
13109             timer_t htimer = g_posix_timers[timerid];
13110             ret = get_errno(timer_getoverrun(htimer));
13111         }
13112         return ret;
13113     }
13114 #endif
13115 
13116 #ifdef TARGET_NR_timer_delete
13117     case TARGET_NR_timer_delete:
13118     {
13119         /* args: timer_t timerid */
13120         target_timer_t timerid = get_timer_id(arg1);
13121 
13122         if (timerid < 0) {
13123             ret = timerid;
13124         } else {
13125             timer_t htimer = g_posix_timers[timerid];
13126             ret = get_errno(timer_delete(htimer));
13127             free_host_timer_slot(timerid);
13128         }
13129         return ret;
13130     }
13131 #endif
13132 
13133 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13134     case TARGET_NR_timerfd_create:
13135         return get_errno(timerfd_create(arg1,
13136                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13137 #endif
13138 
13139 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13140     case TARGET_NR_timerfd_gettime:
13141         {
13142             struct itimerspec its_curr;
13143 
13144             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13145 
13146             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13147                 return -TARGET_EFAULT;
13148             }
13149         }
13150         return ret;
13151 #endif
13152 
13153 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13154     case TARGET_NR_timerfd_gettime64:
13155         {
13156             struct itimerspec its_curr;
13157 
13158             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13159 
13160             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13161                 return -TARGET_EFAULT;
13162             }
13163         }
13164         return ret;
13165 #endif
13166 
13167 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13168     case TARGET_NR_timerfd_settime:
13169         {
13170             struct itimerspec its_new, its_old, *p_new;
13171 
13172             if (arg3) {
13173                 if (target_to_host_itimerspec(&its_new, arg3)) {
13174                     return -TARGET_EFAULT;
13175                 }
13176                 p_new = &its_new;
13177             } else {
13178                 p_new = NULL;
13179             }
13180 
13181             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13182 
13183             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13184                 return -TARGET_EFAULT;
13185             }
13186         }
13187         return ret;
13188 #endif
13189 
13190 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13191     case TARGET_NR_timerfd_settime64:
13192         {
13193             struct itimerspec its_new, its_old, *p_new;
13194 
13195             if (arg3) {
13196                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13197                     return -TARGET_EFAULT;
13198                 }
13199                 p_new = &its_new;
13200             } else {
13201                 p_new = NULL;
13202             }
13203 
13204             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13205 
13206             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13207                 return -TARGET_EFAULT;
13208             }
13209         }
13210         return ret;
13211 #endif
13212 
13213 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13214     case TARGET_NR_ioprio_get:
13215         return get_errno(ioprio_get(arg1, arg2));
13216 #endif
13217 
13218 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13219     case TARGET_NR_ioprio_set:
13220         return get_errno(ioprio_set(arg1, arg2, arg3));
13221 #endif
13222 
13223 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13224     case TARGET_NR_setns:
13225         return get_errno(setns(arg1, arg2));
13226 #endif
13227 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13228     case TARGET_NR_unshare:
13229         return get_errno(unshare(arg1));
13230 #endif
13231 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13232     case TARGET_NR_kcmp:
13233         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13234 #endif
13235 #ifdef TARGET_NR_swapcontext
13236     case TARGET_NR_swapcontext:
13237         /* PowerPC specific.  */
13238         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13239 #endif
13240 #ifdef TARGET_NR_memfd_create
13241     case TARGET_NR_memfd_create:
13242         p = lock_user_string(arg1);
13243         if (!p) {
13244             return -TARGET_EFAULT;
13245         }
13246         ret = get_errno(memfd_create(p, arg2));
13247         fd_trans_unregister(ret);
13248         unlock_user(p, arg1, 0);
13249         return ret;
13250 #endif
13251 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13252     case TARGET_NR_membarrier:
13253         return get_errno(membarrier(arg1, arg2));
13254 #endif
13255 
13256 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13257     case TARGET_NR_copy_file_range:
13258         {
13259             loff_t inoff, outoff;
13260             loff_t *pinoff = NULL, *poutoff = NULL;
13261 
13262             if (arg2) {
13263                 if (get_user_u64(inoff, arg2)) {
13264                     return -TARGET_EFAULT;
13265                 }
13266                 pinoff = &inoff;
13267             }
13268             if (arg4) {
13269                 if (get_user_u64(outoff, arg4)) {
13270                     return -TARGET_EFAULT;
13271                 }
13272                 poutoff = &outoff;
13273             }
13274             /* Do not sign-extend the count parameter. */
13275             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13276                                                  (abi_ulong)arg5, arg6));
13277             if (!is_error(ret) && ret > 0) {
13278                 if (arg2) {
13279                     if (put_user_u64(inoff, arg2)) {
13280                         return -TARGET_EFAULT;
13281                     }
13282                 }
13283                 if (arg4) {
13284                     if (put_user_u64(outoff, arg4)) {
13285                         return -TARGET_EFAULT;
13286                     }
13287                 }
13288             }
13289         }
13290         return ret;
13291 #endif
13292 
13293 #if defined(TARGET_NR_pivot_root)
13294     case TARGET_NR_pivot_root:
13295         {
13296             void *p2;
13297             p = lock_user_string(arg1); /* new_root */
13298             p2 = lock_user_string(arg2); /* put_old */
13299             if (!p || !p2) {
13300                 ret = -TARGET_EFAULT;
13301             } else {
13302                 ret = get_errno(pivot_root(p, p2));
13303             }
13304             unlock_user(p2, arg2, 0);
13305             unlock_user(p, arg1, 0);
13306         }
13307         return ret;
13308 #endif
13309 
13310     default:
13311         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13312         return -TARGET_ENOSYS;
13313     }
13314     return ret;
13315 }
13316 
13317 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13318                     abi_long arg2, abi_long arg3, abi_long arg4,
13319                     abi_long arg5, abi_long arg6, abi_long arg7,
13320                     abi_long arg8)
13321 {
13322     CPUState *cpu = env_cpu(cpu_env);
13323     abi_long ret;
13324 
13325 #ifdef DEBUG_ERESTARTSYS
13326     /* Debug-only code for exercising the syscall-restart code paths
13327      * in the per-architecture cpu main loops: restart every syscall
13328      * the guest makes once before letting it through.
13329      */
13330     {
13331         static bool flag;
13332         flag = !flag;
13333         if (flag) {
13334             return -QEMU_ERESTARTSYS;
13335         }
13336     }
13337 #endif
13338 
13339     record_syscall_start(cpu, num, arg1,
13340                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13341 
13342     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13343         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13344     }
13345 
13346     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13347                       arg5, arg6, arg7, arg8);
13348 
13349     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13350         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13351                           arg3, arg4, arg5, arg6);
13352     }
13353 
13354     record_syscall_return(cpu, num, ret);
13355     return ret;
13356 }
13357