xref: /openbmc/qemu/linux-user/syscall.c (revision 701bff24)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 
99 #ifdef HAVE_SYS_MOUNT_FSCONFIG
100 /*
101  * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h,
102  * which in turn prevents use of linux/fs.h. So we have to
103  * define the constants ourselves for now.
104  */
105 #define FS_IOC_GETFLAGS                _IOR('f', 1, long)
106 #define FS_IOC_SETFLAGS                _IOW('f', 2, long)
107 #define FS_IOC_GETVERSION              _IOR('v', 1, long)
108 #define FS_IOC_SETVERSION              _IOW('v', 2, long)
109 #define FS_IOC_FIEMAP                  _IOWR('f', 11, struct fiemap)
110 #define FS_IOC32_GETFLAGS              _IOR('f', 1, int)
111 #define FS_IOC32_SETFLAGS              _IOW('f', 2, int)
112 #define FS_IOC32_GETVERSION            _IOR('v', 1, int)
113 #define FS_IOC32_SETVERSION            _IOW('v', 2, int)
114 
115 #define BLKGETSIZE64 _IOR(0x12,114,size_t)
116 #define BLKDISCARD _IO(0x12,119)
117 #define BLKIOMIN _IO(0x12,120)
118 #define BLKIOOPT _IO(0x12,121)
119 #define BLKALIGNOFF _IO(0x12,122)
120 #define BLKPBSZGET _IO(0x12,123)
121 #define BLKDISCARDZEROES _IO(0x12,124)
122 #define BLKSECDISCARD _IO(0x12,125)
123 #define BLKROTATIONAL _IO(0x12,126)
124 #define BLKZEROOUT _IO(0x12,127)
125 
126 #define FIBMAP     _IO(0x00,1)
127 #define FIGETBSZ   _IO(0x00,2)
128 
129 struct file_clone_range {
130         __s64 src_fd;
131         __u64 src_offset;
132         __u64 src_length;
133         __u64 dest_offset;
134 };
135 
136 #define FICLONE         _IOW(0x94, 9, int)
137 #define FICLONERANGE    _IOW(0x94, 13, struct file_clone_range)
138 
139 #else
140 #include <linux/fs.h>
141 #endif
142 #include <linux/fd.h>
143 #if defined(CONFIG_FIEMAP)
144 #include <linux/fiemap.h>
145 #endif
146 #include <linux/fb.h>
147 #if defined(CONFIG_USBFS)
148 #include <linux/usbdevice_fs.h>
149 #include <linux/usb/ch9.h>
150 #endif
151 #include <linux/vt.h>
152 #include <linux/dm-ioctl.h>
153 #include <linux/reboot.h>
154 #include <linux/route.h>
155 #include <linux/filter.h>
156 #include <linux/blkpg.h>
157 #include <netpacket/packet.h>
158 #include <linux/netlink.h>
159 #include <linux/if_alg.h>
160 #include <linux/rtc.h>
161 #include <sound/asound.h>
162 #ifdef HAVE_BTRFS_H
163 #include <linux/btrfs.h>
164 #endif
165 #ifdef HAVE_DRM_H
166 #include <libdrm/drm.h>
167 #include <libdrm/i915_drm.h>
168 #endif
169 #include "linux_loop.h"
170 #include "uname.h"
171 
172 #include "qemu.h"
173 #include "user-internals.h"
174 #include "strace.h"
175 #include "signal-common.h"
176 #include "loader.h"
177 #include "user-mmap.h"
178 #include "user/safe-syscall.h"
179 #include "qemu/guest-random.h"
180 #include "qemu/selfmap.h"
181 #include "user/syscall-trace.h"
182 #include "special-errno.h"
183 #include "qapi/error.h"
184 #include "fd-trans.h"
185 #include "tcg/tcg.h"
186 #include "cpu_loop-common.h"
187 
188 #ifndef CLONE_IO
189 #define CLONE_IO                0x80000000      /* Clone io context */
190 #endif
191 
192 /* We can't directly call the host clone syscall, because this will
193  * badly confuse libc (breaking mutexes, for example). So we must
194  * divide clone flags into:
195  *  * flag combinations that look like pthread_create()
196  *  * flag combinations that look like fork()
197  *  * flags we can implement within QEMU itself
198  *  * flags we can't support and will return an error for
199  */
200 /* For thread creation, all these flags must be present; for
201  * fork, none must be present.
202  */
203 #define CLONE_THREAD_FLAGS                              \
204     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
205      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
206 
207 /* These flags are ignored:
208  * CLONE_DETACHED is now ignored by the kernel;
209  * CLONE_IO is just an optimisation hint to the I/O scheduler
210  */
211 #define CLONE_IGNORED_FLAGS                     \
212     (CLONE_DETACHED | CLONE_IO)
213 
214 /* Flags for fork which we can implement within QEMU itself */
215 #define CLONE_OPTIONAL_FORK_FLAGS               \
216     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
217      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
218 
219 /* Flags for thread creation which we can implement within QEMU itself */
220 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
221     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
222      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
223 
224 #define CLONE_INVALID_FORK_FLAGS                                        \
225     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
226 
227 #define CLONE_INVALID_THREAD_FLAGS                                      \
228     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
229        CLONE_IGNORED_FLAGS))
230 
231 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
232  * have almost all been allocated. We cannot support any of
233  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
234  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
235  * The checks against the invalid thread masks above will catch these.
236  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
237  */
238 
239 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
240  * once. This exercises the codepaths for restart.
241  */
242 //#define DEBUG_ERESTARTSYS
243 
244 //#include <linux/msdos_fs.h>
245 #define VFAT_IOCTL_READDIR_BOTH \
246     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
247 #define VFAT_IOCTL_READDIR_SHORT \
248     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
249 
250 #undef _syscall0
251 #undef _syscall1
252 #undef _syscall2
253 #undef _syscall3
254 #undef _syscall4
255 #undef _syscall5
256 #undef _syscall6
257 
258 #define _syscall0(type,name)		\
259 static type name (void)			\
260 {					\
261 	return syscall(__NR_##name);	\
262 }
263 
264 #define _syscall1(type,name,type1,arg1)		\
265 static type name (type1 arg1)			\
266 {						\
267 	return syscall(__NR_##name, arg1);	\
268 }
269 
270 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
271 static type name (type1 arg1,type2 arg2)		\
272 {							\
273 	return syscall(__NR_##name, arg1, arg2);	\
274 }
275 
276 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
277 static type name (type1 arg1,type2 arg2,type3 arg3)		\
278 {								\
279 	return syscall(__NR_##name, arg1, arg2, arg3);		\
280 }
281 
282 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
283 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
284 {										\
285 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
286 }
287 
288 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
289 		  type5,arg5)							\
290 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
291 {										\
292 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
293 }
294 
295 
296 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
297 		  type5,arg5,type6,arg6)					\
298 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
299                   type6 arg6)							\
300 {										\
301 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
302 }
303 
304 
305 #define __NR_sys_uname __NR_uname
306 #define __NR_sys_getcwd1 __NR_getcwd
307 #define __NR_sys_getdents __NR_getdents
308 #define __NR_sys_getdents64 __NR_getdents64
309 #define __NR_sys_getpriority __NR_getpriority
310 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
311 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
312 #define __NR_sys_syslog __NR_syslog
313 #if defined(__NR_futex)
314 # define __NR_sys_futex __NR_futex
315 #endif
316 #if defined(__NR_futex_time64)
317 # define __NR_sys_futex_time64 __NR_futex_time64
318 #endif
319 #define __NR_sys_statx __NR_statx
320 
321 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
322 #define __NR__llseek __NR_lseek
323 #endif
324 
325 /* Newer kernel ports have llseek() instead of _llseek() */
326 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
327 #define TARGET_NR__llseek TARGET_NR_llseek
328 #endif
329 
330 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
331 #ifndef TARGET_O_NONBLOCK_MASK
332 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
333 #endif
334 
335 #define __NR_sys_gettid __NR_gettid
336 _syscall0(int, sys_gettid)
337 
338 /* For the 64-bit guest on 32-bit host case we must emulate
339  * getdents using getdents64, because otherwise the host
340  * might hand us back more dirent records than we can fit
341  * into the guest buffer after structure format conversion.
342  * Otherwise we emulate getdents with getdents if the host has it.
343  */
344 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
345 #define EMULATE_GETDENTS_WITH_GETDENTS
346 #endif
347 
348 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
349 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
350 #endif
351 #if (defined(TARGET_NR_getdents) && \
352       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
353     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
354 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
355 #endif
356 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
357 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
358           loff_t *, res, uint, wh);
359 #endif
360 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
361 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
362           siginfo_t *, uinfo)
363 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
364 #ifdef __NR_exit_group
365 _syscall1(int,exit_group,int,error_code)
366 #endif
367 #if defined(__NR_futex)
368 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
369           const struct timespec *,timeout,int *,uaddr2,int,val3)
370 #endif
371 #if defined(__NR_futex_time64)
372 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
373           const struct timespec *,timeout,int *,uaddr2,int,val3)
374 #endif
375 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
376 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
377 #endif
378 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
379 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
380                              unsigned int, flags);
381 #endif
382 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
383 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
384 #endif
385 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
386 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
387           unsigned long *, user_mask_ptr);
388 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
389 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
390           unsigned long *, user_mask_ptr);
391 /* sched_attr is not defined in glibc */
392 struct sched_attr {
393     uint32_t size;
394     uint32_t sched_policy;
395     uint64_t sched_flags;
396     int32_t sched_nice;
397     uint32_t sched_priority;
398     uint64_t sched_runtime;
399     uint64_t sched_deadline;
400     uint64_t sched_period;
401     uint32_t sched_util_min;
402     uint32_t sched_util_max;
403 };
404 #define __NR_sys_sched_getattr __NR_sched_getattr
405 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
406           unsigned int, size, unsigned int, flags);
407 #define __NR_sys_sched_setattr __NR_sched_setattr
408 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
409           unsigned int, flags);
410 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
411 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
412 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
413 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
414           const struct sched_param *, param);
415 #define __NR_sys_sched_getparam __NR_sched_getparam
416 _syscall2(int, sys_sched_getparam, pid_t, pid,
417           struct sched_param *, param);
418 #define __NR_sys_sched_setparam __NR_sched_setparam
419 _syscall2(int, sys_sched_setparam, pid_t, pid,
420           const struct sched_param *, param);
421 #define __NR_sys_getcpu __NR_getcpu
422 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
423 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
424           void *, arg);
425 _syscall2(int, capget, struct __user_cap_header_struct *, header,
426           struct __user_cap_data_struct *, data);
427 _syscall2(int, capset, struct __user_cap_header_struct *, header,
428           struct __user_cap_data_struct *, data);
429 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
430 _syscall2(int, ioprio_get, int, which, int, who)
431 #endif
432 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
433 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
434 #endif
435 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
436 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
437 #endif
438 
439 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
440 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
441           unsigned long, idx1, unsigned long, idx2)
442 #endif
443 
444 /*
445  * It is assumed that struct statx is architecture independent.
446  */
447 #if defined(TARGET_NR_statx) && defined(__NR_statx)
448 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
449           unsigned int, mask, struct target_statx *, statxbuf)
450 #endif
451 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
452 _syscall2(int, membarrier, int, cmd, int, flags)
453 #endif
454 
455 static const bitmask_transtbl fcntl_flags_tbl[] = {
456   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
457   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
458   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
459   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
460   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
461   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
462   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
463   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
464   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
465   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
466   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
467   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
468   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
469 #if defined(O_DIRECT)
470   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
471 #endif
472 #if defined(O_NOATIME)
473   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
474 #endif
475 #if defined(O_CLOEXEC)
476   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
477 #endif
478 #if defined(O_PATH)
479   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
480 #endif
481 #if defined(O_TMPFILE)
482   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
483 #endif
484   /* Don't terminate the list prematurely on 64-bit host+guest.  */
485 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
486   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
487 #endif
488   { 0, 0, 0, 0 }
489 };
490 
491 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
492 
493 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
494 #if defined(__NR_utimensat)
495 #define __NR_sys_utimensat __NR_utimensat
496 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
497           const struct timespec *,tsp,int,flags)
498 #else
499 static int sys_utimensat(int dirfd, const char *pathname,
500                          const struct timespec times[2], int flags)
501 {
502     errno = ENOSYS;
503     return -1;
504 }
505 #endif
506 #endif /* TARGET_NR_utimensat */
507 
508 #ifdef TARGET_NR_renameat2
509 #if defined(__NR_renameat2)
510 #define __NR_sys_renameat2 __NR_renameat2
511 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
512           const char *, new, unsigned int, flags)
513 #else
514 static int sys_renameat2(int oldfd, const char *old,
515                          int newfd, const char *new, int flags)
516 {
517     if (flags == 0) {
518         return renameat(oldfd, old, newfd, new);
519     }
520     errno = ENOSYS;
521     return -1;
522 }
523 #endif
524 #endif /* TARGET_NR_renameat2 */
525 
526 #ifdef CONFIG_INOTIFY
527 #include <sys/inotify.h>
528 #else
529 /* Userspace can usually survive runtime without inotify */
530 #undef TARGET_NR_inotify_init
531 #undef TARGET_NR_inotify_init1
532 #undef TARGET_NR_inotify_add_watch
533 #undef TARGET_NR_inotify_rm_watch
534 #endif /* CONFIG_INOTIFY  */
535 
536 #if defined(TARGET_NR_prlimit64)
537 #ifndef __NR_prlimit64
538 # define __NR_prlimit64 -1
539 #endif
540 #define __NR_sys_prlimit64 __NR_prlimit64
541 /* The glibc rlimit structure may not be that used by the underlying syscall */
542 struct host_rlimit64 {
543     uint64_t rlim_cur;
544     uint64_t rlim_max;
545 };
546 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
547           const struct host_rlimit64 *, new_limit,
548           struct host_rlimit64 *, old_limit)
549 #endif
550 
551 
552 #if defined(TARGET_NR_timer_create)
553 /* Maximum of 32 active POSIX timers allowed at any one time. */
554 #define GUEST_TIMER_MAX 32
555 static timer_t g_posix_timers[GUEST_TIMER_MAX];
556 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
557 
558 static inline int next_free_host_timer(void)
559 {
560     int k;
561     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
562         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
563             return k;
564         }
565     }
566     return -1;
567 }
568 
569 static inline void free_host_timer_slot(int id)
570 {
571     qatomic_store_release(g_posix_timer_allocated + id, 0);
572 }
573 #endif
574 
575 static inline int host_to_target_errno(int host_errno)
576 {
577     switch (host_errno) {
578 #define E(X)  case X: return TARGET_##X;
579 #include "errnos.c.inc"
580 #undef E
581     default:
582         return host_errno;
583     }
584 }
585 
586 static inline int target_to_host_errno(int target_errno)
587 {
588     switch (target_errno) {
589 #define E(X)  case TARGET_##X: return X;
590 #include "errnos.c.inc"
591 #undef E
592     default:
593         return target_errno;
594     }
595 }
596 
597 abi_long get_errno(abi_long ret)
598 {
599     if (ret == -1)
600         return -host_to_target_errno(errno);
601     else
602         return ret;
603 }
604 
605 const char *target_strerror(int err)
606 {
607     if (err == QEMU_ERESTARTSYS) {
608         return "To be restarted";
609     }
610     if (err == QEMU_ESIGRETURN) {
611         return "Successful exit from sigreturn";
612     }
613 
614     return strerror(target_to_host_errno(err));
615 }
616 
617 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
618 {
619     int i;
620     uint8_t b;
621     if (usize <= ksize) {
622         return 1;
623     }
624     for (i = ksize; i < usize; i++) {
625         if (get_user_u8(b, addr + i)) {
626             return -TARGET_EFAULT;
627         }
628         if (b != 0) {
629             return 0;
630         }
631     }
632     return 1;
633 }
634 
635 #define safe_syscall0(type, name) \
636 static type safe_##name(void) \
637 { \
638     return safe_syscall(__NR_##name); \
639 }
640 
641 #define safe_syscall1(type, name, type1, arg1) \
642 static type safe_##name(type1 arg1) \
643 { \
644     return safe_syscall(__NR_##name, arg1); \
645 }
646 
647 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
648 static type safe_##name(type1 arg1, type2 arg2) \
649 { \
650     return safe_syscall(__NR_##name, arg1, arg2); \
651 }
652 
653 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
654 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
655 { \
656     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
657 }
658 
659 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
660     type4, arg4) \
661 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
662 { \
663     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
664 }
665 
666 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
667     type4, arg4, type5, arg5) \
668 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
669     type5 arg5) \
670 { \
671     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
672 }
673 
674 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
675     type4, arg4, type5, arg5, type6, arg6) \
676 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
677     type5 arg5, type6 arg6) \
678 { \
679     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
680 }
681 
682 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
683 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
684 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
685               int, flags, mode_t, mode)
686 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
687 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
688               struct rusage *, rusage)
689 #endif
690 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
691               int, options, struct rusage *, rusage)
692 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
693 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
694     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
695 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
696               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
697 #endif
698 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
699 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
700               struct timespec *, tsp, const sigset_t *, sigmask,
701               size_t, sigsetsize)
702 #endif
703 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
704               int, maxevents, int, timeout, const sigset_t *, sigmask,
705               size_t, sigsetsize)
706 #if defined(__NR_futex)
707 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
708               const struct timespec *,timeout,int *,uaddr2,int,val3)
709 #endif
710 #if defined(__NR_futex_time64)
711 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
712               const struct timespec *,timeout,int *,uaddr2,int,val3)
713 #endif
714 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
715 safe_syscall2(int, kill, pid_t, pid, int, sig)
716 safe_syscall2(int, tkill, int, tid, int, sig)
717 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
718 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
719 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
720 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
721               unsigned long, pos_l, unsigned long, pos_h)
722 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
723               unsigned long, pos_l, unsigned long, pos_h)
724 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
725               socklen_t, addrlen)
726 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
727               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
728 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
729               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
730 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
731 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
732 safe_syscall2(int, flock, int, fd, int, operation)
733 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
734 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
735               const struct timespec *, uts, size_t, sigsetsize)
736 #endif
737 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
738               int, flags)
739 #if defined(TARGET_NR_nanosleep)
740 safe_syscall2(int, nanosleep, const struct timespec *, req,
741               struct timespec *, rem)
742 #endif
743 #if defined(TARGET_NR_clock_nanosleep) || \
744     defined(TARGET_NR_clock_nanosleep_time64)
745 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
746               const struct timespec *, req, struct timespec *, rem)
747 #endif
748 #ifdef __NR_ipc
749 #ifdef __s390x__
750 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
751               void *, ptr)
752 #else
753 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
754               void *, ptr, long, fifth)
755 #endif
756 #endif
757 #ifdef __NR_msgsnd
758 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
759               int, flags)
760 #endif
761 #ifdef __NR_msgrcv
762 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
763               long, msgtype, int, flags)
764 #endif
765 #ifdef __NR_semtimedop
766 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
767               unsigned, nsops, const struct timespec *, timeout)
768 #endif
769 #if defined(TARGET_NR_mq_timedsend) || \
770     defined(TARGET_NR_mq_timedsend_time64)
771 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
772               size_t, len, unsigned, prio, const struct timespec *, timeout)
773 #endif
774 #if defined(TARGET_NR_mq_timedreceive) || \
775     defined(TARGET_NR_mq_timedreceive_time64)
776 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
777               size_t, len, unsigned *, prio, const struct timespec *, timeout)
778 #endif
779 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
780 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
781               int, outfd, loff_t *, poutoff, size_t, length,
782               unsigned int, flags)
783 #endif
784 
785 /* We do ioctl like this rather than via safe_syscall3 to preserve the
786  * "third argument might be integer or pointer or not present" behaviour of
787  * the libc function.
788  */
789 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
790 /* Similarly for fcntl. Note that callers must always:
791  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
792  *  use the flock64 struct rather than unsuffixed flock
793  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
794  */
795 #ifdef __NR_fcntl64
796 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
797 #else
798 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
799 #endif
800 
801 static inline int host_to_target_sock_type(int host_type)
802 {
803     int target_type;
804 
805     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
806     case SOCK_DGRAM:
807         target_type = TARGET_SOCK_DGRAM;
808         break;
809     case SOCK_STREAM:
810         target_type = TARGET_SOCK_STREAM;
811         break;
812     default:
813         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
814         break;
815     }
816 
817 #if defined(SOCK_CLOEXEC)
818     if (host_type & SOCK_CLOEXEC) {
819         target_type |= TARGET_SOCK_CLOEXEC;
820     }
821 #endif
822 
823 #if defined(SOCK_NONBLOCK)
824     if (host_type & SOCK_NONBLOCK) {
825         target_type |= TARGET_SOCK_NONBLOCK;
826     }
827 #endif
828 
829     return target_type;
830 }
831 
832 static abi_ulong target_brk;
833 static abi_ulong target_original_brk;
834 static abi_ulong brk_page;
835 
836 void target_set_brk(abi_ulong new_brk)
837 {
838     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
839     brk_page = HOST_PAGE_ALIGN(target_brk);
840 }
841 
842 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
843 #define DEBUGF_BRK(message, args...)
844 
845 /* do_brk() must return target values and target errnos. */
846 abi_long do_brk(abi_ulong new_brk)
847 {
848     abi_long mapped_addr;
849     abi_ulong new_alloc_size;
850 
851     /* brk pointers are always untagged */
852 
853     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
854 
855     if (!new_brk) {
856         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
857         return target_brk;
858     }
859     if (new_brk < target_original_brk) {
860         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
861                    target_brk);
862         return target_brk;
863     }
864 
865     /* If the new brk is less than the highest page reserved to the
866      * target heap allocation, set it and we're almost done...  */
867     if (new_brk <= brk_page) {
868         /* Heap contents are initialized to zero, as for anonymous
869          * mapped pages.  */
870         if (new_brk > target_brk) {
871             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
872         }
873 	target_brk = new_brk;
874         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
875 	return target_brk;
876     }
877 
878     /* We need to allocate more memory after the brk... Note that
879      * we don't use MAP_FIXED because that will map over the top of
880      * any existing mapping (like the one with the host libc or qemu
881      * itself); instead we treat "mapped but at wrong address" as
882      * a failure and unmap again.
883      */
884     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
885     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
886                                         PROT_READ|PROT_WRITE,
887                                         MAP_ANON|MAP_PRIVATE, 0, 0));
888 
889     if (mapped_addr == brk_page) {
890         /* Heap contents are initialized to zero, as for anonymous
891          * mapped pages.  Technically the new pages are already
892          * initialized to zero since they *are* anonymous mapped
893          * pages, however we have to take care with the contents that
894          * come from the remaining part of the previous page: it may
895          * contains garbage data due to a previous heap usage (grown
896          * then shrunken).  */
897         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
898 
899         target_brk = new_brk;
900         brk_page = HOST_PAGE_ALIGN(target_brk);
901         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
902             target_brk);
903         return target_brk;
904     } else if (mapped_addr != -1) {
905         /* Mapped but at wrong address, meaning there wasn't actually
906          * enough space for this brk.
907          */
908         target_munmap(mapped_addr, new_alloc_size);
909         mapped_addr = -1;
910         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
911     }
912     else {
913         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
914     }
915 
916 #if defined(TARGET_ALPHA)
917     /* We (partially) emulate OSF/1 on Alpha, which requires we
918        return a proper errno, not an unchanged brk value.  */
919     return -TARGET_ENOMEM;
920 #endif
921     /* For everything else, return the previous break. */
922     return target_brk;
923 }
924 
925 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
926     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
927 static inline abi_long copy_from_user_fdset(fd_set *fds,
928                                             abi_ulong target_fds_addr,
929                                             int n)
930 {
931     int i, nw, j, k;
932     abi_ulong b, *target_fds;
933 
934     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
935     if (!(target_fds = lock_user(VERIFY_READ,
936                                  target_fds_addr,
937                                  sizeof(abi_ulong) * nw,
938                                  1)))
939         return -TARGET_EFAULT;
940 
941     FD_ZERO(fds);
942     k = 0;
943     for (i = 0; i < nw; i++) {
944         /* grab the abi_ulong */
945         __get_user(b, &target_fds[i]);
946         for (j = 0; j < TARGET_ABI_BITS; j++) {
947             /* check the bit inside the abi_ulong */
948             if ((b >> j) & 1)
949                 FD_SET(k, fds);
950             k++;
951         }
952     }
953 
954     unlock_user(target_fds, target_fds_addr, 0);
955 
956     return 0;
957 }
958 
959 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
960                                                  abi_ulong target_fds_addr,
961                                                  int n)
962 {
963     if (target_fds_addr) {
964         if (copy_from_user_fdset(fds, target_fds_addr, n))
965             return -TARGET_EFAULT;
966         *fds_ptr = fds;
967     } else {
968         *fds_ptr = NULL;
969     }
970     return 0;
971 }
972 
973 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
974                                           const fd_set *fds,
975                                           int n)
976 {
977     int i, nw, j, k;
978     abi_long v;
979     abi_ulong *target_fds;
980 
981     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
982     if (!(target_fds = lock_user(VERIFY_WRITE,
983                                  target_fds_addr,
984                                  sizeof(abi_ulong) * nw,
985                                  0)))
986         return -TARGET_EFAULT;
987 
988     k = 0;
989     for (i = 0; i < nw; i++) {
990         v = 0;
991         for (j = 0; j < TARGET_ABI_BITS; j++) {
992             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
993             k++;
994         }
995         __put_user(v, &target_fds[i]);
996     }
997 
998     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
999 
1000     return 0;
1001 }
1002 #endif
1003 
1004 #if defined(__alpha__)
1005 #define HOST_HZ 1024
1006 #else
1007 #define HOST_HZ 100
1008 #endif
1009 
1010 static inline abi_long host_to_target_clock_t(long ticks)
1011 {
1012 #if HOST_HZ == TARGET_HZ
1013     return ticks;
1014 #else
1015     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1016 #endif
1017 }
1018 
1019 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1020                                              const struct rusage *rusage)
1021 {
1022     struct target_rusage *target_rusage;
1023 
1024     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1025         return -TARGET_EFAULT;
1026     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1027     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1028     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1029     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1030     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1031     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1032     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1033     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1034     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1035     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1036     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1037     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1038     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1039     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1040     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1041     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1042     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1043     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1044     unlock_user_struct(target_rusage, target_addr, 1);
1045 
1046     return 0;
1047 }
1048 
1049 #ifdef TARGET_NR_setrlimit
1050 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1051 {
1052     abi_ulong target_rlim_swap;
1053     rlim_t result;
1054 
1055     target_rlim_swap = tswapal(target_rlim);
1056     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1057         return RLIM_INFINITY;
1058 
1059     result = target_rlim_swap;
1060     if (target_rlim_swap != (rlim_t)result)
1061         return RLIM_INFINITY;
1062 
1063     return result;
1064 }
1065 #endif
1066 
1067 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1068 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1069 {
1070     abi_ulong target_rlim_swap;
1071     abi_ulong result;
1072 
1073     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1074         target_rlim_swap = TARGET_RLIM_INFINITY;
1075     else
1076         target_rlim_swap = rlim;
1077     result = tswapal(target_rlim_swap);
1078 
1079     return result;
1080 }
1081 #endif
1082 
1083 static inline int target_to_host_resource(int code)
1084 {
1085     switch (code) {
1086     case TARGET_RLIMIT_AS:
1087         return RLIMIT_AS;
1088     case TARGET_RLIMIT_CORE:
1089         return RLIMIT_CORE;
1090     case TARGET_RLIMIT_CPU:
1091         return RLIMIT_CPU;
1092     case TARGET_RLIMIT_DATA:
1093         return RLIMIT_DATA;
1094     case TARGET_RLIMIT_FSIZE:
1095         return RLIMIT_FSIZE;
1096     case TARGET_RLIMIT_LOCKS:
1097         return RLIMIT_LOCKS;
1098     case TARGET_RLIMIT_MEMLOCK:
1099         return RLIMIT_MEMLOCK;
1100     case TARGET_RLIMIT_MSGQUEUE:
1101         return RLIMIT_MSGQUEUE;
1102     case TARGET_RLIMIT_NICE:
1103         return RLIMIT_NICE;
1104     case TARGET_RLIMIT_NOFILE:
1105         return RLIMIT_NOFILE;
1106     case TARGET_RLIMIT_NPROC:
1107         return RLIMIT_NPROC;
1108     case TARGET_RLIMIT_RSS:
1109         return RLIMIT_RSS;
1110     case TARGET_RLIMIT_RTPRIO:
1111         return RLIMIT_RTPRIO;
1112 #ifdef RLIMIT_RTTIME
1113     case TARGET_RLIMIT_RTTIME:
1114         return RLIMIT_RTTIME;
1115 #endif
1116     case TARGET_RLIMIT_SIGPENDING:
1117         return RLIMIT_SIGPENDING;
1118     case TARGET_RLIMIT_STACK:
1119         return RLIMIT_STACK;
1120     default:
1121         return code;
1122     }
1123 }
1124 
1125 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1126                                               abi_ulong target_tv_addr)
1127 {
1128     struct target_timeval *target_tv;
1129 
1130     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1131         return -TARGET_EFAULT;
1132     }
1133 
1134     __get_user(tv->tv_sec, &target_tv->tv_sec);
1135     __get_user(tv->tv_usec, &target_tv->tv_usec);
1136 
1137     unlock_user_struct(target_tv, target_tv_addr, 0);
1138 
1139     return 0;
1140 }
1141 
1142 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1143                                             const struct timeval *tv)
1144 {
1145     struct target_timeval *target_tv;
1146 
1147     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1148         return -TARGET_EFAULT;
1149     }
1150 
1151     __put_user(tv->tv_sec, &target_tv->tv_sec);
1152     __put_user(tv->tv_usec, &target_tv->tv_usec);
1153 
1154     unlock_user_struct(target_tv, target_tv_addr, 1);
1155 
1156     return 0;
1157 }
1158 
1159 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1160 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1161                                                 abi_ulong target_tv_addr)
1162 {
1163     struct target__kernel_sock_timeval *target_tv;
1164 
1165     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1166         return -TARGET_EFAULT;
1167     }
1168 
1169     __get_user(tv->tv_sec, &target_tv->tv_sec);
1170     __get_user(tv->tv_usec, &target_tv->tv_usec);
1171 
1172     unlock_user_struct(target_tv, target_tv_addr, 0);
1173 
1174     return 0;
1175 }
1176 #endif
1177 
1178 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1179                                               const struct timeval *tv)
1180 {
1181     struct target__kernel_sock_timeval *target_tv;
1182 
1183     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1184         return -TARGET_EFAULT;
1185     }
1186 
1187     __put_user(tv->tv_sec, &target_tv->tv_sec);
1188     __put_user(tv->tv_usec, &target_tv->tv_usec);
1189 
1190     unlock_user_struct(target_tv, target_tv_addr, 1);
1191 
1192     return 0;
1193 }
1194 
1195 #if defined(TARGET_NR_futex) || \
1196     defined(TARGET_NR_rt_sigtimedwait) || \
1197     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1198     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1199     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1200     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1201     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1202     defined(TARGET_NR_timer_settime) || \
1203     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1204 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1205                                                abi_ulong target_addr)
1206 {
1207     struct target_timespec *target_ts;
1208 
1209     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1210         return -TARGET_EFAULT;
1211     }
1212     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1213     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1214     unlock_user_struct(target_ts, target_addr, 0);
1215     return 0;
1216 }
1217 #endif
1218 
1219 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1220     defined(TARGET_NR_timer_settime64) || \
1221     defined(TARGET_NR_mq_timedsend_time64) || \
1222     defined(TARGET_NR_mq_timedreceive_time64) || \
1223     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1224     defined(TARGET_NR_clock_nanosleep_time64) || \
1225     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1226     defined(TARGET_NR_utimensat) || \
1227     defined(TARGET_NR_utimensat_time64) || \
1228     defined(TARGET_NR_semtimedop_time64) || \
1229     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1230 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1231                                                  abi_ulong target_addr)
1232 {
1233     struct target__kernel_timespec *target_ts;
1234 
1235     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1236         return -TARGET_EFAULT;
1237     }
1238     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1239     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1240     /* in 32bit mode, this drops the padding */
1241     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1242     unlock_user_struct(target_ts, target_addr, 0);
1243     return 0;
1244 }
1245 #endif
1246 
1247 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1248                                                struct timespec *host_ts)
1249 {
1250     struct target_timespec *target_ts;
1251 
1252     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1253         return -TARGET_EFAULT;
1254     }
1255     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1256     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1257     unlock_user_struct(target_ts, target_addr, 1);
1258     return 0;
1259 }
1260 
1261 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1262                                                  struct timespec *host_ts)
1263 {
1264     struct target__kernel_timespec *target_ts;
1265 
1266     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1267         return -TARGET_EFAULT;
1268     }
1269     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1270     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1271     unlock_user_struct(target_ts, target_addr, 1);
1272     return 0;
1273 }
1274 
1275 #if defined(TARGET_NR_gettimeofday)
1276 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1277                                              struct timezone *tz)
1278 {
1279     struct target_timezone *target_tz;
1280 
1281     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1282         return -TARGET_EFAULT;
1283     }
1284 
1285     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1286     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1287 
1288     unlock_user_struct(target_tz, target_tz_addr, 1);
1289 
1290     return 0;
1291 }
1292 #endif
1293 
1294 #if defined(TARGET_NR_settimeofday)
1295 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1296                                                abi_ulong target_tz_addr)
1297 {
1298     struct target_timezone *target_tz;
1299 
1300     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1301         return -TARGET_EFAULT;
1302     }
1303 
1304     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1305     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1306 
1307     unlock_user_struct(target_tz, target_tz_addr, 0);
1308 
1309     return 0;
1310 }
1311 #endif
1312 
1313 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1314 #include <mqueue.h>
1315 
1316 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1317                                               abi_ulong target_mq_attr_addr)
1318 {
1319     struct target_mq_attr *target_mq_attr;
1320 
1321     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1322                           target_mq_attr_addr, 1))
1323         return -TARGET_EFAULT;
1324 
1325     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1326     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1327     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1328     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1329 
1330     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1331 
1332     return 0;
1333 }
1334 
1335 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1336                                             const struct mq_attr *attr)
1337 {
1338     struct target_mq_attr *target_mq_attr;
1339 
1340     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1341                           target_mq_attr_addr, 0))
1342         return -TARGET_EFAULT;
1343 
1344     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1345     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1346     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1347     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1348 
1349     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1350 
1351     return 0;
1352 }
1353 #endif
1354 
1355 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1356 /* do_select() must return target values and target errnos. */
1357 static abi_long do_select(int n,
1358                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1359                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1360 {
1361     fd_set rfds, wfds, efds;
1362     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1363     struct timeval tv;
1364     struct timespec ts, *ts_ptr;
1365     abi_long ret;
1366 
1367     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1368     if (ret) {
1369         return ret;
1370     }
1371     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1372     if (ret) {
1373         return ret;
1374     }
1375     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1376     if (ret) {
1377         return ret;
1378     }
1379 
1380     if (target_tv_addr) {
1381         if (copy_from_user_timeval(&tv, target_tv_addr))
1382             return -TARGET_EFAULT;
1383         ts.tv_sec = tv.tv_sec;
1384         ts.tv_nsec = tv.tv_usec * 1000;
1385         ts_ptr = &ts;
1386     } else {
1387         ts_ptr = NULL;
1388     }
1389 
1390     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1391                                   ts_ptr, NULL));
1392 
1393     if (!is_error(ret)) {
1394         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1395             return -TARGET_EFAULT;
1396         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1397             return -TARGET_EFAULT;
1398         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1399             return -TARGET_EFAULT;
1400 
1401         if (target_tv_addr) {
1402             tv.tv_sec = ts.tv_sec;
1403             tv.tv_usec = ts.tv_nsec / 1000;
1404             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1405                 return -TARGET_EFAULT;
1406             }
1407         }
1408     }
1409 
1410     return ret;
1411 }
1412 
1413 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1414 static abi_long do_old_select(abi_ulong arg1)
1415 {
1416     struct target_sel_arg_struct *sel;
1417     abi_ulong inp, outp, exp, tvp;
1418     long nsel;
1419 
1420     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1421         return -TARGET_EFAULT;
1422     }
1423 
1424     nsel = tswapal(sel->n);
1425     inp = tswapal(sel->inp);
1426     outp = tswapal(sel->outp);
1427     exp = tswapal(sel->exp);
1428     tvp = tswapal(sel->tvp);
1429 
1430     unlock_user_struct(sel, arg1, 0);
1431 
1432     return do_select(nsel, inp, outp, exp, tvp);
1433 }
1434 #endif
1435 #endif
1436 
1437 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1438 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1439                             abi_long arg4, abi_long arg5, abi_long arg6,
1440                             bool time64)
1441 {
1442     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1443     fd_set rfds, wfds, efds;
1444     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1445     struct timespec ts, *ts_ptr;
1446     abi_long ret;
1447 
1448     /*
1449      * The 6th arg is actually two args smashed together,
1450      * so we cannot use the C library.
1451      */
1452     struct {
1453         sigset_t *set;
1454         size_t size;
1455     } sig, *sig_ptr;
1456 
1457     abi_ulong arg_sigset, arg_sigsize, *arg7;
1458 
1459     n = arg1;
1460     rfd_addr = arg2;
1461     wfd_addr = arg3;
1462     efd_addr = arg4;
1463     ts_addr = arg5;
1464 
1465     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1466     if (ret) {
1467         return ret;
1468     }
1469     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1470     if (ret) {
1471         return ret;
1472     }
1473     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1474     if (ret) {
1475         return ret;
1476     }
1477 
1478     /*
1479      * This takes a timespec, and not a timeval, so we cannot
1480      * use the do_select() helper ...
1481      */
1482     if (ts_addr) {
1483         if (time64) {
1484             if (target_to_host_timespec64(&ts, ts_addr)) {
1485                 return -TARGET_EFAULT;
1486             }
1487         } else {
1488             if (target_to_host_timespec(&ts, ts_addr)) {
1489                 return -TARGET_EFAULT;
1490             }
1491         }
1492             ts_ptr = &ts;
1493     } else {
1494         ts_ptr = NULL;
1495     }
1496 
1497     /* Extract the two packed args for the sigset */
1498     sig_ptr = NULL;
1499     if (arg6) {
1500         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1501         if (!arg7) {
1502             return -TARGET_EFAULT;
1503         }
1504         arg_sigset = tswapal(arg7[0]);
1505         arg_sigsize = tswapal(arg7[1]);
1506         unlock_user(arg7, arg6, 0);
1507 
1508         if (arg_sigset) {
1509             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1510             if (ret != 0) {
1511                 return ret;
1512             }
1513             sig_ptr = &sig;
1514             sig.size = SIGSET_T_SIZE;
1515         }
1516     }
1517 
1518     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1519                                   ts_ptr, sig_ptr));
1520 
1521     if (sig_ptr) {
1522         finish_sigsuspend_mask(ret);
1523     }
1524 
1525     if (!is_error(ret)) {
1526         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1527             return -TARGET_EFAULT;
1528         }
1529         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1530             return -TARGET_EFAULT;
1531         }
1532         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1533             return -TARGET_EFAULT;
1534         }
1535         if (time64) {
1536             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1537                 return -TARGET_EFAULT;
1538             }
1539         } else {
1540             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1541                 return -TARGET_EFAULT;
1542             }
1543         }
1544     }
1545     return ret;
1546 }
1547 #endif
1548 
1549 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1550     defined(TARGET_NR_ppoll_time64)
1551 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1552                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1553 {
1554     struct target_pollfd *target_pfd;
1555     unsigned int nfds = arg2;
1556     struct pollfd *pfd;
1557     unsigned int i;
1558     abi_long ret;
1559 
1560     pfd = NULL;
1561     target_pfd = NULL;
1562     if (nfds) {
1563         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1564             return -TARGET_EINVAL;
1565         }
1566         target_pfd = lock_user(VERIFY_WRITE, arg1,
1567                                sizeof(struct target_pollfd) * nfds, 1);
1568         if (!target_pfd) {
1569             return -TARGET_EFAULT;
1570         }
1571 
1572         pfd = alloca(sizeof(struct pollfd) * nfds);
1573         for (i = 0; i < nfds; i++) {
1574             pfd[i].fd = tswap32(target_pfd[i].fd);
1575             pfd[i].events = tswap16(target_pfd[i].events);
1576         }
1577     }
1578     if (ppoll) {
1579         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1580         sigset_t *set = NULL;
1581 
1582         if (arg3) {
1583             if (time64) {
1584                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1585                     unlock_user(target_pfd, arg1, 0);
1586                     return -TARGET_EFAULT;
1587                 }
1588             } else {
1589                 if (target_to_host_timespec(timeout_ts, arg3)) {
1590                     unlock_user(target_pfd, arg1, 0);
1591                     return -TARGET_EFAULT;
1592                 }
1593             }
1594         } else {
1595             timeout_ts = NULL;
1596         }
1597 
1598         if (arg4) {
1599             ret = process_sigsuspend_mask(&set, arg4, arg5);
1600             if (ret != 0) {
1601                 unlock_user(target_pfd, arg1, 0);
1602                 return ret;
1603             }
1604         }
1605 
1606         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1607                                    set, SIGSET_T_SIZE));
1608 
1609         if (set) {
1610             finish_sigsuspend_mask(ret);
1611         }
1612         if (!is_error(ret) && arg3) {
1613             if (time64) {
1614                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1615                     return -TARGET_EFAULT;
1616                 }
1617             } else {
1618                 if (host_to_target_timespec(arg3, timeout_ts)) {
1619                     return -TARGET_EFAULT;
1620                 }
1621             }
1622         }
1623     } else {
1624           struct timespec ts, *pts;
1625 
1626           if (arg3 >= 0) {
1627               /* Convert ms to secs, ns */
1628               ts.tv_sec = arg3 / 1000;
1629               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1630               pts = &ts;
1631           } else {
1632               /* -ve poll() timeout means "infinite" */
1633               pts = NULL;
1634           }
1635           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1636     }
1637 
1638     if (!is_error(ret)) {
1639         for (i = 0; i < nfds; i++) {
1640             target_pfd[i].revents = tswap16(pfd[i].revents);
1641         }
1642     }
1643     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1644     return ret;
1645 }
1646 #endif
1647 
1648 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1649                         int flags, int is_pipe2)
1650 {
1651     int host_pipe[2];
1652     abi_long ret;
1653     ret = pipe2(host_pipe, flags);
1654 
1655     if (is_error(ret))
1656         return get_errno(ret);
1657 
1658     /* Several targets have special calling conventions for the original
1659        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1660     if (!is_pipe2) {
1661 #if defined(TARGET_ALPHA)
1662         cpu_env->ir[IR_A4] = host_pipe[1];
1663         return host_pipe[0];
1664 #elif defined(TARGET_MIPS)
1665         cpu_env->active_tc.gpr[3] = host_pipe[1];
1666         return host_pipe[0];
1667 #elif defined(TARGET_SH4)
1668         cpu_env->gregs[1] = host_pipe[1];
1669         return host_pipe[0];
1670 #elif defined(TARGET_SPARC)
1671         cpu_env->regwptr[1] = host_pipe[1];
1672         return host_pipe[0];
1673 #endif
1674     }
1675 
1676     if (put_user_s32(host_pipe[0], pipedes)
1677         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1678         return -TARGET_EFAULT;
1679     return get_errno(ret);
1680 }
1681 
1682 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1683                                               abi_ulong target_addr,
1684                                               socklen_t len)
1685 {
1686     struct target_ip_mreqn *target_smreqn;
1687 
1688     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1689     if (!target_smreqn)
1690         return -TARGET_EFAULT;
1691     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1692     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1693     if (len == sizeof(struct target_ip_mreqn))
1694         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1695     unlock_user(target_smreqn, target_addr, 0);
1696 
1697     return 0;
1698 }
1699 
1700 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1701                                                abi_ulong target_addr,
1702                                                socklen_t len)
1703 {
1704     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1705     sa_family_t sa_family;
1706     struct target_sockaddr *target_saddr;
1707 
1708     if (fd_trans_target_to_host_addr(fd)) {
1709         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1710     }
1711 
1712     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1713     if (!target_saddr)
1714         return -TARGET_EFAULT;
1715 
1716     sa_family = tswap16(target_saddr->sa_family);
1717 
1718     /* Oops. The caller might send a incomplete sun_path; sun_path
1719      * must be terminated by \0 (see the manual page), but
1720      * unfortunately it is quite common to specify sockaddr_un
1721      * length as "strlen(x->sun_path)" while it should be
1722      * "strlen(...) + 1". We'll fix that here if needed.
1723      * Linux kernel has a similar feature.
1724      */
1725 
1726     if (sa_family == AF_UNIX) {
1727         if (len < unix_maxlen && len > 0) {
1728             char *cp = (char*)target_saddr;
1729 
1730             if ( cp[len-1] && !cp[len] )
1731                 len++;
1732         }
1733         if (len > unix_maxlen)
1734             len = unix_maxlen;
1735     }
1736 
1737     memcpy(addr, target_saddr, len);
1738     addr->sa_family = sa_family;
1739     if (sa_family == AF_NETLINK) {
1740         struct sockaddr_nl *nladdr;
1741 
1742         nladdr = (struct sockaddr_nl *)addr;
1743         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1744         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1745     } else if (sa_family == AF_PACKET) {
1746 	struct target_sockaddr_ll *lladdr;
1747 
1748 	lladdr = (struct target_sockaddr_ll *)addr;
1749 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1750 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1751     }
1752     unlock_user(target_saddr, target_addr, 0);
1753 
1754     return 0;
1755 }
1756 
1757 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1758                                                struct sockaddr *addr,
1759                                                socklen_t len)
1760 {
1761     struct target_sockaddr *target_saddr;
1762 
1763     if (len == 0) {
1764         return 0;
1765     }
1766     assert(addr);
1767 
1768     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1769     if (!target_saddr)
1770         return -TARGET_EFAULT;
1771     memcpy(target_saddr, addr, len);
1772     if (len >= offsetof(struct target_sockaddr, sa_family) +
1773         sizeof(target_saddr->sa_family)) {
1774         target_saddr->sa_family = tswap16(addr->sa_family);
1775     }
1776     if (addr->sa_family == AF_NETLINK &&
1777         len >= sizeof(struct target_sockaddr_nl)) {
1778         struct target_sockaddr_nl *target_nl =
1779                (struct target_sockaddr_nl *)target_saddr;
1780         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1781         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1782     } else if (addr->sa_family == AF_PACKET) {
1783         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1784         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1785         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1786     } else if (addr->sa_family == AF_INET6 &&
1787                len >= sizeof(struct target_sockaddr_in6)) {
1788         struct target_sockaddr_in6 *target_in6 =
1789                (struct target_sockaddr_in6 *)target_saddr;
1790         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1791     }
1792     unlock_user(target_saddr, target_addr, len);
1793 
1794     return 0;
1795 }
1796 
1797 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1798                                            struct target_msghdr *target_msgh)
1799 {
1800     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1801     abi_long msg_controllen;
1802     abi_ulong target_cmsg_addr;
1803     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1804     socklen_t space = 0;
1805 
1806     msg_controllen = tswapal(target_msgh->msg_controllen);
1807     if (msg_controllen < sizeof (struct target_cmsghdr))
1808         goto the_end;
1809     target_cmsg_addr = tswapal(target_msgh->msg_control);
1810     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1811     target_cmsg_start = target_cmsg;
1812     if (!target_cmsg)
1813         return -TARGET_EFAULT;
1814 
1815     while (cmsg && target_cmsg) {
1816         void *data = CMSG_DATA(cmsg);
1817         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1818 
1819         int len = tswapal(target_cmsg->cmsg_len)
1820             - sizeof(struct target_cmsghdr);
1821 
1822         space += CMSG_SPACE(len);
1823         if (space > msgh->msg_controllen) {
1824             space -= CMSG_SPACE(len);
1825             /* This is a QEMU bug, since we allocated the payload
1826              * area ourselves (unlike overflow in host-to-target
1827              * conversion, which is just the guest giving us a buffer
1828              * that's too small). It can't happen for the payload types
1829              * we currently support; if it becomes an issue in future
1830              * we would need to improve our allocation strategy to
1831              * something more intelligent than "twice the size of the
1832              * target buffer we're reading from".
1833              */
1834             qemu_log_mask(LOG_UNIMP,
1835                           ("Unsupported ancillary data %d/%d: "
1836                            "unhandled msg size\n"),
1837                           tswap32(target_cmsg->cmsg_level),
1838                           tswap32(target_cmsg->cmsg_type));
1839             break;
1840         }
1841 
1842         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1843             cmsg->cmsg_level = SOL_SOCKET;
1844         } else {
1845             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1846         }
1847         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1848         cmsg->cmsg_len = CMSG_LEN(len);
1849 
1850         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1851             int *fd = (int *)data;
1852             int *target_fd = (int *)target_data;
1853             int i, numfds = len / sizeof(int);
1854 
1855             for (i = 0; i < numfds; i++) {
1856                 __get_user(fd[i], target_fd + i);
1857             }
1858         } else if (cmsg->cmsg_level == SOL_SOCKET
1859                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1860             struct ucred *cred = (struct ucred *)data;
1861             struct target_ucred *target_cred =
1862                 (struct target_ucred *)target_data;
1863 
1864             __get_user(cred->pid, &target_cred->pid);
1865             __get_user(cred->uid, &target_cred->uid);
1866             __get_user(cred->gid, &target_cred->gid);
1867         } else {
1868             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1869                           cmsg->cmsg_level, cmsg->cmsg_type);
1870             memcpy(data, target_data, len);
1871         }
1872 
1873         cmsg = CMSG_NXTHDR(msgh, cmsg);
1874         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1875                                          target_cmsg_start);
1876     }
1877     unlock_user(target_cmsg, target_cmsg_addr, 0);
1878  the_end:
1879     msgh->msg_controllen = space;
1880     return 0;
1881 }
1882 
1883 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1884                                            struct msghdr *msgh)
1885 {
1886     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1887     abi_long msg_controllen;
1888     abi_ulong target_cmsg_addr;
1889     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1890     socklen_t space = 0;
1891 
1892     msg_controllen = tswapal(target_msgh->msg_controllen);
1893     if (msg_controllen < sizeof (struct target_cmsghdr))
1894         goto the_end;
1895     target_cmsg_addr = tswapal(target_msgh->msg_control);
1896     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1897     target_cmsg_start = target_cmsg;
1898     if (!target_cmsg)
1899         return -TARGET_EFAULT;
1900 
1901     while (cmsg && target_cmsg) {
1902         void *data = CMSG_DATA(cmsg);
1903         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1904 
1905         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1906         int tgt_len, tgt_space;
1907 
1908         /* We never copy a half-header but may copy half-data;
1909          * this is Linux's behaviour in put_cmsg(). Note that
1910          * truncation here is a guest problem (which we report
1911          * to the guest via the CTRUNC bit), unlike truncation
1912          * in target_to_host_cmsg, which is a QEMU bug.
1913          */
1914         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1915             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1916             break;
1917         }
1918 
1919         if (cmsg->cmsg_level == SOL_SOCKET) {
1920             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1921         } else {
1922             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1923         }
1924         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1925 
1926         /* Payload types which need a different size of payload on
1927          * the target must adjust tgt_len here.
1928          */
1929         tgt_len = len;
1930         switch (cmsg->cmsg_level) {
1931         case SOL_SOCKET:
1932             switch (cmsg->cmsg_type) {
1933             case SO_TIMESTAMP:
1934                 tgt_len = sizeof(struct target_timeval);
1935                 break;
1936             default:
1937                 break;
1938             }
1939             break;
1940         default:
1941             break;
1942         }
1943 
1944         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1945             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1946             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1947         }
1948 
1949         /* We must now copy-and-convert len bytes of payload
1950          * into tgt_len bytes of destination space. Bear in mind
1951          * that in both source and destination we may be dealing
1952          * with a truncated value!
1953          */
1954         switch (cmsg->cmsg_level) {
1955         case SOL_SOCKET:
1956             switch (cmsg->cmsg_type) {
1957             case SCM_RIGHTS:
1958             {
1959                 int *fd = (int *)data;
1960                 int *target_fd = (int *)target_data;
1961                 int i, numfds = tgt_len / sizeof(int);
1962 
1963                 for (i = 0; i < numfds; i++) {
1964                     __put_user(fd[i], target_fd + i);
1965                 }
1966                 break;
1967             }
1968             case SO_TIMESTAMP:
1969             {
1970                 struct timeval *tv = (struct timeval *)data;
1971                 struct target_timeval *target_tv =
1972                     (struct target_timeval *)target_data;
1973 
1974                 if (len != sizeof(struct timeval) ||
1975                     tgt_len != sizeof(struct target_timeval)) {
1976                     goto unimplemented;
1977                 }
1978 
1979                 /* copy struct timeval to target */
1980                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1981                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1982                 break;
1983             }
1984             case SCM_CREDENTIALS:
1985             {
1986                 struct ucred *cred = (struct ucred *)data;
1987                 struct target_ucred *target_cred =
1988                     (struct target_ucred *)target_data;
1989 
1990                 __put_user(cred->pid, &target_cred->pid);
1991                 __put_user(cred->uid, &target_cred->uid);
1992                 __put_user(cred->gid, &target_cred->gid);
1993                 break;
1994             }
1995             default:
1996                 goto unimplemented;
1997             }
1998             break;
1999 
2000         case SOL_IP:
2001             switch (cmsg->cmsg_type) {
2002             case IP_TTL:
2003             {
2004                 uint32_t *v = (uint32_t *)data;
2005                 uint32_t *t_int = (uint32_t *)target_data;
2006 
2007                 if (len != sizeof(uint32_t) ||
2008                     tgt_len != sizeof(uint32_t)) {
2009                     goto unimplemented;
2010                 }
2011                 __put_user(*v, t_int);
2012                 break;
2013             }
2014             case IP_RECVERR:
2015             {
2016                 struct errhdr_t {
2017                    struct sock_extended_err ee;
2018                    struct sockaddr_in offender;
2019                 };
2020                 struct errhdr_t *errh = (struct errhdr_t *)data;
2021                 struct errhdr_t *target_errh =
2022                     (struct errhdr_t *)target_data;
2023 
2024                 if (len != sizeof(struct errhdr_t) ||
2025                     tgt_len != sizeof(struct errhdr_t)) {
2026                     goto unimplemented;
2027                 }
2028                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2029                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2030                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2031                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2032                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2033                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2034                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2035                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2036                     (void *) &errh->offender, sizeof(errh->offender));
2037                 break;
2038             }
2039             default:
2040                 goto unimplemented;
2041             }
2042             break;
2043 
2044         case SOL_IPV6:
2045             switch (cmsg->cmsg_type) {
2046             case IPV6_HOPLIMIT:
2047             {
2048                 uint32_t *v = (uint32_t *)data;
2049                 uint32_t *t_int = (uint32_t *)target_data;
2050 
2051                 if (len != sizeof(uint32_t) ||
2052                     tgt_len != sizeof(uint32_t)) {
2053                     goto unimplemented;
2054                 }
2055                 __put_user(*v, t_int);
2056                 break;
2057             }
2058             case IPV6_RECVERR:
2059             {
2060                 struct errhdr6_t {
2061                    struct sock_extended_err ee;
2062                    struct sockaddr_in6 offender;
2063                 };
2064                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2065                 struct errhdr6_t *target_errh =
2066                     (struct errhdr6_t *)target_data;
2067 
2068                 if (len != sizeof(struct errhdr6_t) ||
2069                     tgt_len != sizeof(struct errhdr6_t)) {
2070                     goto unimplemented;
2071                 }
2072                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2073                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2074                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2075                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2076                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2077                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2078                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2079                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2080                     (void *) &errh->offender, sizeof(errh->offender));
2081                 break;
2082             }
2083             default:
2084                 goto unimplemented;
2085             }
2086             break;
2087 
2088         default:
2089         unimplemented:
2090             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2091                           cmsg->cmsg_level, cmsg->cmsg_type);
2092             memcpy(target_data, data, MIN(len, tgt_len));
2093             if (tgt_len > len) {
2094                 memset(target_data + len, 0, tgt_len - len);
2095             }
2096         }
2097 
2098         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2099         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2100         if (msg_controllen < tgt_space) {
2101             tgt_space = msg_controllen;
2102         }
2103         msg_controllen -= tgt_space;
2104         space += tgt_space;
2105         cmsg = CMSG_NXTHDR(msgh, cmsg);
2106         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2107                                          target_cmsg_start);
2108     }
2109     unlock_user(target_cmsg, target_cmsg_addr, space);
2110  the_end:
2111     target_msgh->msg_controllen = tswapal(space);
2112     return 0;
2113 }
2114 
2115 /* do_setsockopt() Must return target values and target errnos. */
2116 static abi_long do_setsockopt(int sockfd, int level, int optname,
2117                               abi_ulong optval_addr, socklen_t optlen)
2118 {
2119     abi_long ret;
2120     int val;
2121     struct ip_mreqn *ip_mreq;
2122     struct ip_mreq_source *ip_mreq_source;
2123 
2124     switch(level) {
2125     case SOL_TCP:
2126     case SOL_UDP:
2127         /* TCP and UDP options all take an 'int' value.  */
2128         if (optlen < sizeof(uint32_t))
2129             return -TARGET_EINVAL;
2130 
2131         if (get_user_u32(val, optval_addr))
2132             return -TARGET_EFAULT;
2133         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2134         break;
2135     case SOL_IP:
2136         switch(optname) {
2137         case IP_TOS:
2138         case IP_TTL:
2139         case IP_HDRINCL:
2140         case IP_ROUTER_ALERT:
2141         case IP_RECVOPTS:
2142         case IP_RETOPTS:
2143         case IP_PKTINFO:
2144         case IP_MTU_DISCOVER:
2145         case IP_RECVERR:
2146         case IP_RECVTTL:
2147         case IP_RECVTOS:
2148 #ifdef IP_FREEBIND
2149         case IP_FREEBIND:
2150 #endif
2151         case IP_MULTICAST_TTL:
2152         case IP_MULTICAST_LOOP:
2153             val = 0;
2154             if (optlen >= sizeof(uint32_t)) {
2155                 if (get_user_u32(val, optval_addr))
2156                     return -TARGET_EFAULT;
2157             } else if (optlen >= 1) {
2158                 if (get_user_u8(val, optval_addr))
2159                     return -TARGET_EFAULT;
2160             }
2161             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2162             break;
2163         case IP_ADD_MEMBERSHIP:
2164         case IP_DROP_MEMBERSHIP:
2165             if (optlen < sizeof (struct target_ip_mreq) ||
2166                 optlen > sizeof (struct target_ip_mreqn))
2167                 return -TARGET_EINVAL;
2168 
2169             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2170             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2171             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2172             break;
2173 
2174         case IP_BLOCK_SOURCE:
2175         case IP_UNBLOCK_SOURCE:
2176         case IP_ADD_SOURCE_MEMBERSHIP:
2177         case IP_DROP_SOURCE_MEMBERSHIP:
2178             if (optlen != sizeof (struct target_ip_mreq_source))
2179                 return -TARGET_EINVAL;
2180 
2181             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2182             if (!ip_mreq_source) {
2183                 return -TARGET_EFAULT;
2184             }
2185             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2186             unlock_user (ip_mreq_source, optval_addr, 0);
2187             break;
2188 
2189         default:
2190             goto unimplemented;
2191         }
2192         break;
2193     case SOL_IPV6:
2194         switch (optname) {
2195         case IPV6_MTU_DISCOVER:
2196         case IPV6_MTU:
2197         case IPV6_V6ONLY:
2198         case IPV6_RECVPKTINFO:
2199         case IPV6_UNICAST_HOPS:
2200         case IPV6_MULTICAST_HOPS:
2201         case IPV6_MULTICAST_LOOP:
2202         case IPV6_RECVERR:
2203         case IPV6_RECVHOPLIMIT:
2204         case IPV6_2292HOPLIMIT:
2205         case IPV6_CHECKSUM:
2206         case IPV6_ADDRFORM:
2207         case IPV6_2292PKTINFO:
2208         case IPV6_RECVTCLASS:
2209         case IPV6_RECVRTHDR:
2210         case IPV6_2292RTHDR:
2211         case IPV6_RECVHOPOPTS:
2212         case IPV6_2292HOPOPTS:
2213         case IPV6_RECVDSTOPTS:
2214         case IPV6_2292DSTOPTS:
2215         case IPV6_TCLASS:
2216         case IPV6_ADDR_PREFERENCES:
2217 #ifdef IPV6_RECVPATHMTU
2218         case IPV6_RECVPATHMTU:
2219 #endif
2220 #ifdef IPV6_TRANSPARENT
2221         case IPV6_TRANSPARENT:
2222 #endif
2223 #ifdef IPV6_FREEBIND
2224         case IPV6_FREEBIND:
2225 #endif
2226 #ifdef IPV6_RECVORIGDSTADDR
2227         case IPV6_RECVORIGDSTADDR:
2228 #endif
2229             val = 0;
2230             if (optlen < sizeof(uint32_t)) {
2231                 return -TARGET_EINVAL;
2232             }
2233             if (get_user_u32(val, optval_addr)) {
2234                 return -TARGET_EFAULT;
2235             }
2236             ret = get_errno(setsockopt(sockfd, level, optname,
2237                                        &val, sizeof(val)));
2238             break;
2239         case IPV6_PKTINFO:
2240         {
2241             struct in6_pktinfo pki;
2242 
2243             if (optlen < sizeof(pki)) {
2244                 return -TARGET_EINVAL;
2245             }
2246 
2247             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2248                 return -TARGET_EFAULT;
2249             }
2250 
2251             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2252 
2253             ret = get_errno(setsockopt(sockfd, level, optname,
2254                                        &pki, sizeof(pki)));
2255             break;
2256         }
2257         case IPV6_ADD_MEMBERSHIP:
2258         case IPV6_DROP_MEMBERSHIP:
2259         {
2260             struct ipv6_mreq ipv6mreq;
2261 
2262             if (optlen < sizeof(ipv6mreq)) {
2263                 return -TARGET_EINVAL;
2264             }
2265 
2266             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2267                 return -TARGET_EFAULT;
2268             }
2269 
2270             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2271 
2272             ret = get_errno(setsockopt(sockfd, level, optname,
2273                                        &ipv6mreq, sizeof(ipv6mreq)));
2274             break;
2275         }
2276         default:
2277             goto unimplemented;
2278         }
2279         break;
2280     case SOL_ICMPV6:
2281         switch (optname) {
2282         case ICMPV6_FILTER:
2283         {
2284             struct icmp6_filter icmp6f;
2285 
2286             if (optlen > sizeof(icmp6f)) {
2287                 optlen = sizeof(icmp6f);
2288             }
2289 
2290             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2291                 return -TARGET_EFAULT;
2292             }
2293 
2294             for (val = 0; val < 8; val++) {
2295                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2296             }
2297 
2298             ret = get_errno(setsockopt(sockfd, level, optname,
2299                                        &icmp6f, optlen));
2300             break;
2301         }
2302         default:
2303             goto unimplemented;
2304         }
2305         break;
2306     case SOL_RAW:
2307         switch (optname) {
2308         case ICMP_FILTER:
2309         case IPV6_CHECKSUM:
2310             /* those take an u32 value */
2311             if (optlen < sizeof(uint32_t)) {
2312                 return -TARGET_EINVAL;
2313             }
2314 
2315             if (get_user_u32(val, optval_addr)) {
2316                 return -TARGET_EFAULT;
2317             }
2318             ret = get_errno(setsockopt(sockfd, level, optname,
2319                                        &val, sizeof(val)));
2320             break;
2321 
2322         default:
2323             goto unimplemented;
2324         }
2325         break;
2326 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2327     case SOL_ALG:
2328         switch (optname) {
2329         case ALG_SET_KEY:
2330         {
2331             char *alg_key = g_malloc(optlen);
2332 
2333             if (!alg_key) {
2334                 return -TARGET_ENOMEM;
2335             }
2336             if (copy_from_user(alg_key, optval_addr, optlen)) {
2337                 g_free(alg_key);
2338                 return -TARGET_EFAULT;
2339             }
2340             ret = get_errno(setsockopt(sockfd, level, optname,
2341                                        alg_key, optlen));
2342             g_free(alg_key);
2343             break;
2344         }
2345         case ALG_SET_AEAD_AUTHSIZE:
2346         {
2347             ret = get_errno(setsockopt(sockfd, level, optname,
2348                                        NULL, optlen));
2349             break;
2350         }
2351         default:
2352             goto unimplemented;
2353         }
2354         break;
2355 #endif
2356     case TARGET_SOL_SOCKET:
2357         switch (optname) {
2358         case TARGET_SO_RCVTIMEO:
2359         {
2360                 struct timeval tv;
2361 
2362                 optname = SO_RCVTIMEO;
2363 
2364 set_timeout:
2365                 if (optlen != sizeof(struct target_timeval)) {
2366                     return -TARGET_EINVAL;
2367                 }
2368 
2369                 if (copy_from_user_timeval(&tv, optval_addr)) {
2370                     return -TARGET_EFAULT;
2371                 }
2372 
2373                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2374                                 &tv, sizeof(tv)));
2375                 return ret;
2376         }
2377         case TARGET_SO_SNDTIMEO:
2378                 optname = SO_SNDTIMEO;
2379                 goto set_timeout;
2380         case TARGET_SO_ATTACH_FILTER:
2381         {
2382                 struct target_sock_fprog *tfprog;
2383                 struct target_sock_filter *tfilter;
2384                 struct sock_fprog fprog;
2385                 struct sock_filter *filter;
2386                 int i;
2387 
2388                 if (optlen != sizeof(*tfprog)) {
2389                     return -TARGET_EINVAL;
2390                 }
2391                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2392                     return -TARGET_EFAULT;
2393                 }
2394                 if (!lock_user_struct(VERIFY_READ, tfilter,
2395                                       tswapal(tfprog->filter), 0)) {
2396                     unlock_user_struct(tfprog, optval_addr, 1);
2397                     return -TARGET_EFAULT;
2398                 }
2399 
2400                 fprog.len = tswap16(tfprog->len);
2401                 filter = g_try_new(struct sock_filter, fprog.len);
2402                 if (filter == NULL) {
2403                     unlock_user_struct(tfilter, tfprog->filter, 1);
2404                     unlock_user_struct(tfprog, optval_addr, 1);
2405                     return -TARGET_ENOMEM;
2406                 }
2407                 for (i = 0; i < fprog.len; i++) {
2408                     filter[i].code = tswap16(tfilter[i].code);
2409                     filter[i].jt = tfilter[i].jt;
2410                     filter[i].jf = tfilter[i].jf;
2411                     filter[i].k = tswap32(tfilter[i].k);
2412                 }
2413                 fprog.filter = filter;
2414 
2415                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2416                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2417                 g_free(filter);
2418 
2419                 unlock_user_struct(tfilter, tfprog->filter, 1);
2420                 unlock_user_struct(tfprog, optval_addr, 1);
2421                 return ret;
2422         }
2423 	case TARGET_SO_BINDTODEVICE:
2424 	{
2425 		char *dev_ifname, *addr_ifname;
2426 
2427 		if (optlen > IFNAMSIZ - 1) {
2428 		    optlen = IFNAMSIZ - 1;
2429 		}
2430 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2431 		if (!dev_ifname) {
2432 		    return -TARGET_EFAULT;
2433 		}
2434 		optname = SO_BINDTODEVICE;
2435 		addr_ifname = alloca(IFNAMSIZ);
2436 		memcpy(addr_ifname, dev_ifname, optlen);
2437 		addr_ifname[optlen] = 0;
2438 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2439                                            addr_ifname, optlen));
2440 		unlock_user (dev_ifname, optval_addr, 0);
2441 		return ret;
2442 	}
2443         case TARGET_SO_LINGER:
2444         {
2445                 struct linger lg;
2446                 struct target_linger *tlg;
2447 
2448                 if (optlen != sizeof(struct target_linger)) {
2449                     return -TARGET_EINVAL;
2450                 }
2451                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2452                     return -TARGET_EFAULT;
2453                 }
2454                 __get_user(lg.l_onoff, &tlg->l_onoff);
2455                 __get_user(lg.l_linger, &tlg->l_linger);
2456                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2457                                 &lg, sizeof(lg)));
2458                 unlock_user_struct(tlg, optval_addr, 0);
2459                 return ret;
2460         }
2461             /* Options with 'int' argument.  */
2462         case TARGET_SO_DEBUG:
2463 		optname = SO_DEBUG;
2464 		break;
2465         case TARGET_SO_REUSEADDR:
2466 		optname = SO_REUSEADDR;
2467 		break;
2468 #ifdef SO_REUSEPORT
2469         case TARGET_SO_REUSEPORT:
2470                 optname = SO_REUSEPORT;
2471                 break;
2472 #endif
2473         case TARGET_SO_TYPE:
2474 		optname = SO_TYPE;
2475 		break;
2476         case TARGET_SO_ERROR:
2477 		optname = SO_ERROR;
2478 		break;
2479         case TARGET_SO_DONTROUTE:
2480 		optname = SO_DONTROUTE;
2481 		break;
2482         case TARGET_SO_BROADCAST:
2483 		optname = SO_BROADCAST;
2484 		break;
2485         case TARGET_SO_SNDBUF:
2486 		optname = SO_SNDBUF;
2487 		break;
2488         case TARGET_SO_SNDBUFFORCE:
2489                 optname = SO_SNDBUFFORCE;
2490                 break;
2491         case TARGET_SO_RCVBUF:
2492 		optname = SO_RCVBUF;
2493 		break;
2494         case TARGET_SO_RCVBUFFORCE:
2495                 optname = SO_RCVBUFFORCE;
2496                 break;
2497         case TARGET_SO_KEEPALIVE:
2498 		optname = SO_KEEPALIVE;
2499 		break;
2500         case TARGET_SO_OOBINLINE:
2501 		optname = SO_OOBINLINE;
2502 		break;
2503         case TARGET_SO_NO_CHECK:
2504 		optname = SO_NO_CHECK;
2505 		break;
2506         case TARGET_SO_PRIORITY:
2507 		optname = SO_PRIORITY;
2508 		break;
2509 #ifdef SO_BSDCOMPAT
2510         case TARGET_SO_BSDCOMPAT:
2511 		optname = SO_BSDCOMPAT;
2512 		break;
2513 #endif
2514         case TARGET_SO_PASSCRED:
2515 		optname = SO_PASSCRED;
2516 		break;
2517         case TARGET_SO_PASSSEC:
2518                 optname = SO_PASSSEC;
2519                 break;
2520         case TARGET_SO_TIMESTAMP:
2521 		optname = SO_TIMESTAMP;
2522 		break;
2523         case TARGET_SO_RCVLOWAT:
2524 		optname = SO_RCVLOWAT;
2525 		break;
2526         default:
2527             goto unimplemented;
2528         }
2529 	if (optlen < sizeof(uint32_t))
2530             return -TARGET_EINVAL;
2531 
2532 	if (get_user_u32(val, optval_addr))
2533             return -TARGET_EFAULT;
2534 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2535         break;
2536 #ifdef SOL_NETLINK
2537     case SOL_NETLINK:
2538         switch (optname) {
2539         case NETLINK_PKTINFO:
2540         case NETLINK_ADD_MEMBERSHIP:
2541         case NETLINK_DROP_MEMBERSHIP:
2542         case NETLINK_BROADCAST_ERROR:
2543         case NETLINK_NO_ENOBUFS:
2544 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2545         case NETLINK_LISTEN_ALL_NSID:
2546         case NETLINK_CAP_ACK:
2547 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2548 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2549         case NETLINK_EXT_ACK:
2550 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2551 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2552         case NETLINK_GET_STRICT_CHK:
2553 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2554             break;
2555         default:
2556             goto unimplemented;
2557         }
2558         val = 0;
2559         if (optlen < sizeof(uint32_t)) {
2560             return -TARGET_EINVAL;
2561         }
2562         if (get_user_u32(val, optval_addr)) {
2563             return -TARGET_EFAULT;
2564         }
2565         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2566                                    sizeof(val)));
2567         break;
2568 #endif /* SOL_NETLINK */
2569     default:
2570     unimplemented:
2571         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2572                       level, optname);
2573         ret = -TARGET_ENOPROTOOPT;
2574     }
2575     return ret;
2576 }
2577 
2578 /* do_getsockopt() Must return target values and target errnos. */
2579 static abi_long do_getsockopt(int sockfd, int level, int optname,
2580                               abi_ulong optval_addr, abi_ulong optlen)
2581 {
2582     abi_long ret;
2583     int len, val;
2584     socklen_t lv;
2585 
2586     switch(level) {
2587     case TARGET_SOL_SOCKET:
2588         level = SOL_SOCKET;
2589         switch (optname) {
2590         /* These don't just return a single integer */
2591         case TARGET_SO_PEERNAME:
2592             goto unimplemented;
2593         case TARGET_SO_RCVTIMEO: {
2594             struct timeval tv;
2595             socklen_t tvlen;
2596 
2597             optname = SO_RCVTIMEO;
2598 
2599 get_timeout:
2600             if (get_user_u32(len, optlen)) {
2601                 return -TARGET_EFAULT;
2602             }
2603             if (len < 0) {
2604                 return -TARGET_EINVAL;
2605             }
2606 
2607             tvlen = sizeof(tv);
2608             ret = get_errno(getsockopt(sockfd, level, optname,
2609                                        &tv, &tvlen));
2610             if (ret < 0) {
2611                 return ret;
2612             }
2613             if (len > sizeof(struct target_timeval)) {
2614                 len = sizeof(struct target_timeval);
2615             }
2616             if (copy_to_user_timeval(optval_addr, &tv)) {
2617                 return -TARGET_EFAULT;
2618             }
2619             if (put_user_u32(len, optlen)) {
2620                 return -TARGET_EFAULT;
2621             }
2622             break;
2623         }
2624         case TARGET_SO_SNDTIMEO:
2625             optname = SO_SNDTIMEO;
2626             goto get_timeout;
2627         case TARGET_SO_PEERCRED: {
2628             struct ucred cr;
2629             socklen_t crlen;
2630             struct target_ucred *tcr;
2631 
2632             if (get_user_u32(len, optlen)) {
2633                 return -TARGET_EFAULT;
2634             }
2635             if (len < 0) {
2636                 return -TARGET_EINVAL;
2637             }
2638 
2639             crlen = sizeof(cr);
2640             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2641                                        &cr, &crlen));
2642             if (ret < 0) {
2643                 return ret;
2644             }
2645             if (len > crlen) {
2646                 len = crlen;
2647             }
2648             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2649                 return -TARGET_EFAULT;
2650             }
2651             __put_user(cr.pid, &tcr->pid);
2652             __put_user(cr.uid, &tcr->uid);
2653             __put_user(cr.gid, &tcr->gid);
2654             unlock_user_struct(tcr, optval_addr, 1);
2655             if (put_user_u32(len, optlen)) {
2656                 return -TARGET_EFAULT;
2657             }
2658             break;
2659         }
2660         case TARGET_SO_PEERSEC: {
2661             char *name;
2662 
2663             if (get_user_u32(len, optlen)) {
2664                 return -TARGET_EFAULT;
2665             }
2666             if (len < 0) {
2667                 return -TARGET_EINVAL;
2668             }
2669             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2670             if (!name) {
2671                 return -TARGET_EFAULT;
2672             }
2673             lv = len;
2674             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2675                                        name, &lv));
2676             if (put_user_u32(lv, optlen)) {
2677                 ret = -TARGET_EFAULT;
2678             }
2679             unlock_user(name, optval_addr, lv);
2680             break;
2681         }
2682         case TARGET_SO_LINGER:
2683         {
2684             struct linger lg;
2685             socklen_t lglen;
2686             struct target_linger *tlg;
2687 
2688             if (get_user_u32(len, optlen)) {
2689                 return -TARGET_EFAULT;
2690             }
2691             if (len < 0) {
2692                 return -TARGET_EINVAL;
2693             }
2694 
2695             lglen = sizeof(lg);
2696             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2697                                        &lg, &lglen));
2698             if (ret < 0) {
2699                 return ret;
2700             }
2701             if (len > lglen) {
2702                 len = lglen;
2703             }
2704             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2705                 return -TARGET_EFAULT;
2706             }
2707             __put_user(lg.l_onoff, &tlg->l_onoff);
2708             __put_user(lg.l_linger, &tlg->l_linger);
2709             unlock_user_struct(tlg, optval_addr, 1);
2710             if (put_user_u32(len, optlen)) {
2711                 return -TARGET_EFAULT;
2712             }
2713             break;
2714         }
2715         /* Options with 'int' argument.  */
2716         case TARGET_SO_DEBUG:
2717             optname = SO_DEBUG;
2718             goto int_case;
2719         case TARGET_SO_REUSEADDR:
2720             optname = SO_REUSEADDR;
2721             goto int_case;
2722 #ifdef SO_REUSEPORT
2723         case TARGET_SO_REUSEPORT:
2724             optname = SO_REUSEPORT;
2725             goto int_case;
2726 #endif
2727         case TARGET_SO_TYPE:
2728             optname = SO_TYPE;
2729             goto int_case;
2730         case TARGET_SO_ERROR:
2731             optname = SO_ERROR;
2732             goto int_case;
2733         case TARGET_SO_DONTROUTE:
2734             optname = SO_DONTROUTE;
2735             goto int_case;
2736         case TARGET_SO_BROADCAST:
2737             optname = SO_BROADCAST;
2738             goto int_case;
2739         case TARGET_SO_SNDBUF:
2740             optname = SO_SNDBUF;
2741             goto int_case;
2742         case TARGET_SO_RCVBUF:
2743             optname = SO_RCVBUF;
2744             goto int_case;
2745         case TARGET_SO_KEEPALIVE:
2746             optname = SO_KEEPALIVE;
2747             goto int_case;
2748         case TARGET_SO_OOBINLINE:
2749             optname = SO_OOBINLINE;
2750             goto int_case;
2751         case TARGET_SO_NO_CHECK:
2752             optname = SO_NO_CHECK;
2753             goto int_case;
2754         case TARGET_SO_PRIORITY:
2755             optname = SO_PRIORITY;
2756             goto int_case;
2757 #ifdef SO_BSDCOMPAT
2758         case TARGET_SO_BSDCOMPAT:
2759             optname = SO_BSDCOMPAT;
2760             goto int_case;
2761 #endif
2762         case TARGET_SO_PASSCRED:
2763             optname = SO_PASSCRED;
2764             goto int_case;
2765         case TARGET_SO_TIMESTAMP:
2766             optname = SO_TIMESTAMP;
2767             goto int_case;
2768         case TARGET_SO_RCVLOWAT:
2769             optname = SO_RCVLOWAT;
2770             goto int_case;
2771         case TARGET_SO_ACCEPTCONN:
2772             optname = SO_ACCEPTCONN;
2773             goto int_case;
2774         case TARGET_SO_PROTOCOL:
2775             optname = SO_PROTOCOL;
2776             goto int_case;
2777         case TARGET_SO_DOMAIN:
2778             optname = SO_DOMAIN;
2779             goto int_case;
2780         default:
2781             goto int_case;
2782         }
2783         break;
2784     case SOL_TCP:
2785     case SOL_UDP:
2786         /* TCP and UDP options all take an 'int' value.  */
2787     int_case:
2788         if (get_user_u32(len, optlen))
2789             return -TARGET_EFAULT;
2790         if (len < 0)
2791             return -TARGET_EINVAL;
2792         lv = sizeof(lv);
2793         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2794         if (ret < 0)
2795             return ret;
2796         if (optname == SO_TYPE) {
2797             val = host_to_target_sock_type(val);
2798         }
2799         if (len > lv)
2800             len = lv;
2801         if (len == 4) {
2802             if (put_user_u32(val, optval_addr))
2803                 return -TARGET_EFAULT;
2804         } else {
2805             if (put_user_u8(val, optval_addr))
2806                 return -TARGET_EFAULT;
2807         }
2808         if (put_user_u32(len, optlen))
2809             return -TARGET_EFAULT;
2810         break;
2811     case SOL_IP:
2812         switch(optname) {
2813         case IP_TOS:
2814         case IP_TTL:
2815         case IP_HDRINCL:
2816         case IP_ROUTER_ALERT:
2817         case IP_RECVOPTS:
2818         case IP_RETOPTS:
2819         case IP_PKTINFO:
2820         case IP_MTU_DISCOVER:
2821         case IP_RECVERR:
2822         case IP_RECVTOS:
2823 #ifdef IP_FREEBIND
2824         case IP_FREEBIND:
2825 #endif
2826         case IP_MULTICAST_TTL:
2827         case IP_MULTICAST_LOOP:
2828             if (get_user_u32(len, optlen))
2829                 return -TARGET_EFAULT;
2830             if (len < 0)
2831                 return -TARGET_EINVAL;
2832             lv = sizeof(lv);
2833             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2834             if (ret < 0)
2835                 return ret;
2836             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2837                 len = 1;
2838                 if (put_user_u32(len, optlen)
2839                     || put_user_u8(val, optval_addr))
2840                     return -TARGET_EFAULT;
2841             } else {
2842                 if (len > sizeof(int))
2843                     len = sizeof(int);
2844                 if (put_user_u32(len, optlen)
2845                     || put_user_u32(val, optval_addr))
2846                     return -TARGET_EFAULT;
2847             }
2848             break;
2849         default:
2850             ret = -TARGET_ENOPROTOOPT;
2851             break;
2852         }
2853         break;
2854     case SOL_IPV6:
2855         switch (optname) {
2856         case IPV6_MTU_DISCOVER:
2857         case IPV6_MTU:
2858         case IPV6_V6ONLY:
2859         case IPV6_RECVPKTINFO:
2860         case IPV6_UNICAST_HOPS:
2861         case IPV6_MULTICAST_HOPS:
2862         case IPV6_MULTICAST_LOOP:
2863         case IPV6_RECVERR:
2864         case IPV6_RECVHOPLIMIT:
2865         case IPV6_2292HOPLIMIT:
2866         case IPV6_CHECKSUM:
2867         case IPV6_ADDRFORM:
2868         case IPV6_2292PKTINFO:
2869         case IPV6_RECVTCLASS:
2870         case IPV6_RECVRTHDR:
2871         case IPV6_2292RTHDR:
2872         case IPV6_RECVHOPOPTS:
2873         case IPV6_2292HOPOPTS:
2874         case IPV6_RECVDSTOPTS:
2875         case IPV6_2292DSTOPTS:
2876         case IPV6_TCLASS:
2877         case IPV6_ADDR_PREFERENCES:
2878 #ifdef IPV6_RECVPATHMTU
2879         case IPV6_RECVPATHMTU:
2880 #endif
2881 #ifdef IPV6_TRANSPARENT
2882         case IPV6_TRANSPARENT:
2883 #endif
2884 #ifdef IPV6_FREEBIND
2885         case IPV6_FREEBIND:
2886 #endif
2887 #ifdef IPV6_RECVORIGDSTADDR
2888         case IPV6_RECVORIGDSTADDR:
2889 #endif
2890             if (get_user_u32(len, optlen))
2891                 return -TARGET_EFAULT;
2892             if (len < 0)
2893                 return -TARGET_EINVAL;
2894             lv = sizeof(lv);
2895             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2896             if (ret < 0)
2897                 return ret;
2898             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2899                 len = 1;
2900                 if (put_user_u32(len, optlen)
2901                     || put_user_u8(val, optval_addr))
2902                     return -TARGET_EFAULT;
2903             } else {
2904                 if (len > sizeof(int))
2905                     len = sizeof(int);
2906                 if (put_user_u32(len, optlen)
2907                     || put_user_u32(val, optval_addr))
2908                     return -TARGET_EFAULT;
2909             }
2910             break;
2911         default:
2912             ret = -TARGET_ENOPROTOOPT;
2913             break;
2914         }
2915         break;
2916 #ifdef SOL_NETLINK
2917     case SOL_NETLINK:
2918         switch (optname) {
2919         case NETLINK_PKTINFO:
2920         case NETLINK_BROADCAST_ERROR:
2921         case NETLINK_NO_ENOBUFS:
2922 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2923         case NETLINK_LISTEN_ALL_NSID:
2924         case NETLINK_CAP_ACK:
2925 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2927         case NETLINK_EXT_ACK:
2928 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2929 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2930         case NETLINK_GET_STRICT_CHK:
2931 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2932             if (get_user_u32(len, optlen)) {
2933                 return -TARGET_EFAULT;
2934             }
2935             if (len != sizeof(val)) {
2936                 return -TARGET_EINVAL;
2937             }
2938             lv = len;
2939             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2940             if (ret < 0) {
2941                 return ret;
2942             }
2943             if (put_user_u32(lv, optlen)
2944                 || put_user_u32(val, optval_addr)) {
2945                 return -TARGET_EFAULT;
2946             }
2947             break;
2948 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2949         case NETLINK_LIST_MEMBERSHIPS:
2950         {
2951             uint32_t *results;
2952             int i;
2953             if (get_user_u32(len, optlen)) {
2954                 return -TARGET_EFAULT;
2955             }
2956             if (len < 0) {
2957                 return -TARGET_EINVAL;
2958             }
2959             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2960             if (!results && len > 0) {
2961                 return -TARGET_EFAULT;
2962             }
2963             lv = len;
2964             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2965             if (ret < 0) {
2966                 unlock_user(results, optval_addr, 0);
2967                 return ret;
2968             }
2969             /* swap host endianess to target endianess. */
2970             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2971                 results[i] = tswap32(results[i]);
2972             }
2973             if (put_user_u32(lv, optlen)) {
2974                 return -TARGET_EFAULT;
2975             }
2976             unlock_user(results, optval_addr, 0);
2977             break;
2978         }
2979 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2980         default:
2981             goto unimplemented;
2982         }
2983         break;
2984 #endif /* SOL_NETLINK */
2985     default:
2986     unimplemented:
2987         qemu_log_mask(LOG_UNIMP,
2988                       "getsockopt level=%d optname=%d not yet supported\n",
2989                       level, optname);
2990         ret = -TARGET_EOPNOTSUPP;
2991         break;
2992     }
2993     return ret;
2994 }
2995 
2996 /* Convert target low/high pair representing file offset into the host
2997  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2998  * as the kernel doesn't handle them either.
2999  */
3000 static void target_to_host_low_high(abi_ulong tlow,
3001                                     abi_ulong thigh,
3002                                     unsigned long *hlow,
3003                                     unsigned long *hhigh)
3004 {
3005     uint64_t off = tlow |
3006         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3007         TARGET_LONG_BITS / 2;
3008 
3009     *hlow = off;
3010     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3011 }
3012 
3013 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3014                                 abi_ulong count, int copy)
3015 {
3016     struct target_iovec *target_vec;
3017     struct iovec *vec;
3018     abi_ulong total_len, max_len;
3019     int i;
3020     int err = 0;
3021     bool bad_address = false;
3022 
3023     if (count == 0) {
3024         errno = 0;
3025         return NULL;
3026     }
3027     if (count > IOV_MAX) {
3028         errno = EINVAL;
3029         return NULL;
3030     }
3031 
3032     vec = g_try_new0(struct iovec, count);
3033     if (vec == NULL) {
3034         errno = ENOMEM;
3035         return NULL;
3036     }
3037 
3038     target_vec = lock_user(VERIFY_READ, target_addr,
3039                            count * sizeof(struct target_iovec), 1);
3040     if (target_vec == NULL) {
3041         err = EFAULT;
3042         goto fail2;
3043     }
3044 
3045     /* ??? If host page size > target page size, this will result in a
3046        value larger than what we can actually support.  */
3047     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3048     total_len = 0;
3049 
3050     for (i = 0; i < count; i++) {
3051         abi_ulong base = tswapal(target_vec[i].iov_base);
3052         abi_long len = tswapal(target_vec[i].iov_len);
3053 
3054         if (len < 0) {
3055             err = EINVAL;
3056             goto fail;
3057         } else if (len == 0) {
3058             /* Zero length pointer is ignored.  */
3059             vec[i].iov_base = 0;
3060         } else {
3061             vec[i].iov_base = lock_user(type, base, len, copy);
3062             /* If the first buffer pointer is bad, this is a fault.  But
3063              * subsequent bad buffers will result in a partial write; this
3064              * is realized by filling the vector with null pointers and
3065              * zero lengths. */
3066             if (!vec[i].iov_base) {
3067                 if (i == 0) {
3068                     err = EFAULT;
3069                     goto fail;
3070                 } else {
3071                     bad_address = true;
3072                 }
3073             }
3074             if (bad_address) {
3075                 len = 0;
3076             }
3077             if (len > max_len - total_len) {
3078                 len = max_len - total_len;
3079             }
3080         }
3081         vec[i].iov_len = len;
3082         total_len += len;
3083     }
3084 
3085     unlock_user(target_vec, target_addr, 0);
3086     return vec;
3087 
3088  fail:
3089     while (--i >= 0) {
3090         if (tswapal(target_vec[i].iov_len) > 0) {
3091             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3092         }
3093     }
3094     unlock_user(target_vec, target_addr, 0);
3095  fail2:
3096     g_free(vec);
3097     errno = err;
3098     return NULL;
3099 }
3100 
3101 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3102                          abi_ulong count, int copy)
3103 {
3104     struct target_iovec *target_vec;
3105     int i;
3106 
3107     target_vec = lock_user(VERIFY_READ, target_addr,
3108                            count * sizeof(struct target_iovec), 1);
3109     if (target_vec) {
3110         for (i = 0; i < count; i++) {
3111             abi_ulong base = tswapal(target_vec[i].iov_base);
3112             abi_long len = tswapal(target_vec[i].iov_len);
3113             if (len < 0) {
3114                 break;
3115             }
3116             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3117         }
3118         unlock_user(target_vec, target_addr, 0);
3119     }
3120 
3121     g_free(vec);
3122 }
3123 
3124 static inline int target_to_host_sock_type(int *type)
3125 {
3126     int host_type = 0;
3127     int target_type = *type;
3128 
3129     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3130     case TARGET_SOCK_DGRAM:
3131         host_type = SOCK_DGRAM;
3132         break;
3133     case TARGET_SOCK_STREAM:
3134         host_type = SOCK_STREAM;
3135         break;
3136     default:
3137         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3138         break;
3139     }
3140     if (target_type & TARGET_SOCK_CLOEXEC) {
3141 #if defined(SOCK_CLOEXEC)
3142         host_type |= SOCK_CLOEXEC;
3143 #else
3144         return -TARGET_EINVAL;
3145 #endif
3146     }
3147     if (target_type & TARGET_SOCK_NONBLOCK) {
3148 #if defined(SOCK_NONBLOCK)
3149         host_type |= SOCK_NONBLOCK;
3150 #elif !defined(O_NONBLOCK)
3151         return -TARGET_EINVAL;
3152 #endif
3153     }
3154     *type = host_type;
3155     return 0;
3156 }
3157 
3158 /* Try to emulate socket type flags after socket creation.  */
3159 static int sock_flags_fixup(int fd, int target_type)
3160 {
3161 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3162     if (target_type & TARGET_SOCK_NONBLOCK) {
3163         int flags = fcntl(fd, F_GETFL);
3164         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3165             close(fd);
3166             return -TARGET_EINVAL;
3167         }
3168     }
3169 #endif
3170     return fd;
3171 }
3172 
3173 /* do_socket() Must return target values and target errnos. */
3174 static abi_long do_socket(int domain, int type, int protocol)
3175 {
3176     int target_type = type;
3177     int ret;
3178 
3179     ret = target_to_host_sock_type(&type);
3180     if (ret) {
3181         return ret;
3182     }
3183 
3184     if (domain == PF_NETLINK && !(
3185 #ifdef CONFIG_RTNETLINK
3186          protocol == NETLINK_ROUTE ||
3187 #endif
3188          protocol == NETLINK_KOBJECT_UEVENT ||
3189          protocol == NETLINK_AUDIT)) {
3190         return -TARGET_EPROTONOSUPPORT;
3191     }
3192 
3193     if (domain == AF_PACKET ||
3194         (domain == AF_INET && type == SOCK_PACKET)) {
3195         protocol = tswap16(protocol);
3196     }
3197 
3198     ret = get_errno(socket(domain, type, protocol));
3199     if (ret >= 0) {
3200         ret = sock_flags_fixup(ret, target_type);
3201         if (type == SOCK_PACKET) {
3202             /* Manage an obsolete case :
3203              * if socket type is SOCK_PACKET, bind by name
3204              */
3205             fd_trans_register(ret, &target_packet_trans);
3206         } else if (domain == PF_NETLINK) {
3207             switch (protocol) {
3208 #ifdef CONFIG_RTNETLINK
3209             case NETLINK_ROUTE:
3210                 fd_trans_register(ret, &target_netlink_route_trans);
3211                 break;
3212 #endif
3213             case NETLINK_KOBJECT_UEVENT:
3214                 /* nothing to do: messages are strings */
3215                 break;
3216             case NETLINK_AUDIT:
3217                 fd_trans_register(ret, &target_netlink_audit_trans);
3218                 break;
3219             default:
3220                 g_assert_not_reached();
3221             }
3222         }
3223     }
3224     return ret;
3225 }
3226 
3227 /* do_bind() Must return target values and target errnos. */
3228 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3229                         socklen_t addrlen)
3230 {
3231     void *addr;
3232     abi_long ret;
3233 
3234     if ((int)addrlen < 0) {
3235         return -TARGET_EINVAL;
3236     }
3237 
3238     addr = alloca(addrlen+1);
3239 
3240     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3241     if (ret)
3242         return ret;
3243 
3244     return get_errno(bind(sockfd, addr, addrlen));
3245 }
3246 
3247 /* do_connect() Must return target values and target errnos. */
3248 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3249                            socklen_t addrlen)
3250 {
3251     void *addr;
3252     abi_long ret;
3253 
3254     if ((int)addrlen < 0) {
3255         return -TARGET_EINVAL;
3256     }
3257 
3258     addr = alloca(addrlen+1);
3259 
3260     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3261     if (ret)
3262         return ret;
3263 
3264     return get_errno(safe_connect(sockfd, addr, addrlen));
3265 }
3266 
3267 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3268 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3269                                       int flags, int send)
3270 {
3271     abi_long ret, len;
3272     struct msghdr msg;
3273     abi_ulong count;
3274     struct iovec *vec;
3275     abi_ulong target_vec;
3276 
3277     if (msgp->msg_name) {
3278         msg.msg_namelen = tswap32(msgp->msg_namelen);
3279         msg.msg_name = alloca(msg.msg_namelen+1);
3280         ret = target_to_host_sockaddr(fd, msg.msg_name,
3281                                       tswapal(msgp->msg_name),
3282                                       msg.msg_namelen);
3283         if (ret == -TARGET_EFAULT) {
3284             /* For connected sockets msg_name and msg_namelen must
3285              * be ignored, so returning EFAULT immediately is wrong.
3286              * Instead, pass a bad msg_name to the host kernel, and
3287              * let it decide whether to return EFAULT or not.
3288              */
3289             msg.msg_name = (void *)-1;
3290         } else if (ret) {
3291             goto out2;
3292         }
3293     } else {
3294         msg.msg_name = NULL;
3295         msg.msg_namelen = 0;
3296     }
3297     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3298     msg.msg_control = alloca(msg.msg_controllen);
3299     memset(msg.msg_control, 0, msg.msg_controllen);
3300 
3301     msg.msg_flags = tswap32(msgp->msg_flags);
3302 
3303     count = tswapal(msgp->msg_iovlen);
3304     target_vec = tswapal(msgp->msg_iov);
3305 
3306     if (count > IOV_MAX) {
3307         /* sendrcvmsg returns a different errno for this condition than
3308          * readv/writev, so we must catch it here before lock_iovec() does.
3309          */
3310         ret = -TARGET_EMSGSIZE;
3311         goto out2;
3312     }
3313 
3314     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3315                      target_vec, count, send);
3316     if (vec == NULL) {
3317         ret = -host_to_target_errno(errno);
3318         goto out2;
3319     }
3320     msg.msg_iovlen = count;
3321     msg.msg_iov = vec;
3322 
3323     if (send) {
3324         if (fd_trans_target_to_host_data(fd)) {
3325             void *host_msg;
3326 
3327             host_msg = g_malloc(msg.msg_iov->iov_len);
3328             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3329             ret = fd_trans_target_to_host_data(fd)(host_msg,
3330                                                    msg.msg_iov->iov_len);
3331             if (ret >= 0) {
3332                 msg.msg_iov->iov_base = host_msg;
3333                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3334             }
3335             g_free(host_msg);
3336         } else {
3337             ret = target_to_host_cmsg(&msg, msgp);
3338             if (ret == 0) {
3339                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3340             }
3341         }
3342     } else {
3343         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3344         if (!is_error(ret)) {
3345             len = ret;
3346             if (fd_trans_host_to_target_data(fd)) {
3347                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3348                                                MIN(msg.msg_iov->iov_len, len));
3349             } else {
3350                 ret = host_to_target_cmsg(msgp, &msg);
3351             }
3352             if (!is_error(ret)) {
3353                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3354                 msgp->msg_flags = tswap32(msg.msg_flags);
3355                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3356                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3357                                     msg.msg_name, msg.msg_namelen);
3358                     if (ret) {
3359                         goto out;
3360                     }
3361                 }
3362 
3363                 ret = len;
3364             }
3365         }
3366     }
3367 
3368 out:
3369     unlock_iovec(vec, target_vec, count, !send);
3370 out2:
3371     return ret;
3372 }
3373 
3374 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3375                                int flags, int send)
3376 {
3377     abi_long ret;
3378     struct target_msghdr *msgp;
3379 
3380     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3381                           msgp,
3382                           target_msg,
3383                           send ? 1 : 0)) {
3384         return -TARGET_EFAULT;
3385     }
3386     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3387     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3388     return ret;
3389 }
3390 
3391 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3392  * so it might not have this *mmsg-specific flag either.
3393  */
3394 #ifndef MSG_WAITFORONE
3395 #define MSG_WAITFORONE 0x10000
3396 #endif
3397 
3398 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3399                                 unsigned int vlen, unsigned int flags,
3400                                 int send)
3401 {
3402     struct target_mmsghdr *mmsgp;
3403     abi_long ret = 0;
3404     int i;
3405 
3406     if (vlen > UIO_MAXIOV) {
3407         vlen = UIO_MAXIOV;
3408     }
3409 
3410     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3411     if (!mmsgp) {
3412         return -TARGET_EFAULT;
3413     }
3414 
3415     for (i = 0; i < vlen; i++) {
3416         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3417         if (is_error(ret)) {
3418             break;
3419         }
3420         mmsgp[i].msg_len = tswap32(ret);
3421         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3422         if (flags & MSG_WAITFORONE) {
3423             flags |= MSG_DONTWAIT;
3424         }
3425     }
3426 
3427     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3428 
3429     /* Return number of datagrams sent if we sent any at all;
3430      * otherwise return the error.
3431      */
3432     if (i) {
3433         return i;
3434     }
3435     return ret;
3436 }
3437 
3438 /* do_accept4() Must return target values and target errnos. */
3439 static abi_long do_accept4(int fd, abi_ulong target_addr,
3440                            abi_ulong target_addrlen_addr, int flags)
3441 {
3442     socklen_t addrlen, ret_addrlen;
3443     void *addr;
3444     abi_long ret;
3445     int host_flags;
3446 
3447     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3448 
3449     if (target_addr == 0) {
3450         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3451     }
3452 
3453     /* linux returns EFAULT if addrlen pointer is invalid */
3454     if (get_user_u32(addrlen, target_addrlen_addr))
3455         return -TARGET_EFAULT;
3456 
3457     if ((int)addrlen < 0) {
3458         return -TARGET_EINVAL;
3459     }
3460 
3461     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3462         return -TARGET_EFAULT;
3463     }
3464 
3465     addr = alloca(addrlen);
3466 
3467     ret_addrlen = addrlen;
3468     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3469     if (!is_error(ret)) {
3470         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3471         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3472             ret = -TARGET_EFAULT;
3473         }
3474     }
3475     return ret;
3476 }
3477 
3478 /* do_getpeername() Must return target values and target errnos. */
3479 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3480                                abi_ulong target_addrlen_addr)
3481 {
3482     socklen_t addrlen, ret_addrlen;
3483     void *addr;
3484     abi_long ret;
3485 
3486     if (get_user_u32(addrlen, target_addrlen_addr))
3487         return -TARGET_EFAULT;
3488 
3489     if ((int)addrlen < 0) {
3490         return -TARGET_EINVAL;
3491     }
3492 
3493     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3494         return -TARGET_EFAULT;
3495     }
3496 
3497     addr = alloca(addrlen);
3498 
3499     ret_addrlen = addrlen;
3500     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3501     if (!is_error(ret)) {
3502         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3503         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3504             ret = -TARGET_EFAULT;
3505         }
3506     }
3507     return ret;
3508 }
3509 
3510 /* do_getsockname() Must return target values and target errnos. */
3511 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3512                                abi_ulong target_addrlen_addr)
3513 {
3514     socklen_t addrlen, ret_addrlen;
3515     void *addr;
3516     abi_long ret;
3517 
3518     if (get_user_u32(addrlen, target_addrlen_addr))
3519         return -TARGET_EFAULT;
3520 
3521     if ((int)addrlen < 0) {
3522         return -TARGET_EINVAL;
3523     }
3524 
3525     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3526         return -TARGET_EFAULT;
3527     }
3528 
3529     addr = alloca(addrlen);
3530 
3531     ret_addrlen = addrlen;
3532     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3533     if (!is_error(ret)) {
3534         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3535         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3536             ret = -TARGET_EFAULT;
3537         }
3538     }
3539     return ret;
3540 }
3541 
3542 /* do_socketpair() Must return target values and target errnos. */
3543 static abi_long do_socketpair(int domain, int type, int protocol,
3544                               abi_ulong target_tab_addr)
3545 {
3546     int tab[2];
3547     abi_long ret;
3548 
3549     target_to_host_sock_type(&type);
3550 
3551     ret = get_errno(socketpair(domain, type, protocol, tab));
3552     if (!is_error(ret)) {
3553         if (put_user_s32(tab[0], target_tab_addr)
3554             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3555             ret = -TARGET_EFAULT;
3556     }
3557     return ret;
3558 }
3559 
3560 /* do_sendto() Must return target values and target errnos. */
3561 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3562                           abi_ulong target_addr, socklen_t addrlen)
3563 {
3564     void *addr;
3565     void *host_msg;
3566     void *copy_msg = NULL;
3567     abi_long ret;
3568 
3569     if ((int)addrlen < 0) {
3570         return -TARGET_EINVAL;
3571     }
3572 
3573     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3574     if (!host_msg)
3575         return -TARGET_EFAULT;
3576     if (fd_trans_target_to_host_data(fd)) {
3577         copy_msg = host_msg;
3578         host_msg = g_malloc(len);
3579         memcpy(host_msg, copy_msg, len);
3580         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3581         if (ret < 0) {
3582             goto fail;
3583         }
3584     }
3585     if (target_addr) {
3586         addr = alloca(addrlen+1);
3587         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3588         if (ret) {
3589             goto fail;
3590         }
3591         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3592     } else {
3593         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3594     }
3595 fail:
3596     if (copy_msg) {
3597         g_free(host_msg);
3598         host_msg = copy_msg;
3599     }
3600     unlock_user(host_msg, msg, 0);
3601     return ret;
3602 }
3603 
3604 /* do_recvfrom() Must return target values and target errnos. */
3605 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3606                             abi_ulong target_addr,
3607                             abi_ulong target_addrlen)
3608 {
3609     socklen_t addrlen, ret_addrlen;
3610     void *addr;
3611     void *host_msg;
3612     abi_long ret;
3613 
3614     if (!msg) {
3615         host_msg = NULL;
3616     } else {
3617         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3618         if (!host_msg) {
3619             return -TARGET_EFAULT;
3620         }
3621     }
3622     if (target_addr) {
3623         if (get_user_u32(addrlen, target_addrlen)) {
3624             ret = -TARGET_EFAULT;
3625             goto fail;
3626         }
3627         if ((int)addrlen < 0) {
3628             ret = -TARGET_EINVAL;
3629             goto fail;
3630         }
3631         addr = alloca(addrlen);
3632         ret_addrlen = addrlen;
3633         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3634                                       addr, &ret_addrlen));
3635     } else {
3636         addr = NULL; /* To keep compiler quiet.  */
3637         addrlen = 0; /* To keep compiler quiet.  */
3638         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3639     }
3640     if (!is_error(ret)) {
3641         if (fd_trans_host_to_target_data(fd)) {
3642             abi_long trans;
3643             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3644             if (is_error(trans)) {
3645                 ret = trans;
3646                 goto fail;
3647             }
3648         }
3649         if (target_addr) {
3650             host_to_target_sockaddr(target_addr, addr,
3651                                     MIN(addrlen, ret_addrlen));
3652             if (put_user_u32(ret_addrlen, target_addrlen)) {
3653                 ret = -TARGET_EFAULT;
3654                 goto fail;
3655             }
3656         }
3657         unlock_user(host_msg, msg, len);
3658     } else {
3659 fail:
3660         unlock_user(host_msg, msg, 0);
3661     }
3662     return ret;
3663 }
3664 
3665 #ifdef TARGET_NR_socketcall
3666 /* do_socketcall() must return target values and target errnos. */
3667 static abi_long do_socketcall(int num, abi_ulong vptr)
3668 {
3669     static const unsigned nargs[] = { /* number of arguments per operation */
3670         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3671         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3672         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3673         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3674         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3675         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3676         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3677         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3678         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3679         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3680         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3681         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3682         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3683         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3684         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3685         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3686         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3687         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3688         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3689         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3690     };
3691     abi_long a[6]; /* max 6 args */
3692     unsigned i;
3693 
3694     /* check the range of the first argument num */
3695     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3696     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3697         return -TARGET_EINVAL;
3698     }
3699     /* ensure we have space for args */
3700     if (nargs[num] > ARRAY_SIZE(a)) {
3701         return -TARGET_EINVAL;
3702     }
3703     /* collect the arguments in a[] according to nargs[] */
3704     for (i = 0; i < nargs[num]; ++i) {
3705         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3706             return -TARGET_EFAULT;
3707         }
3708     }
3709     /* now when we have the args, invoke the appropriate underlying function */
3710     switch (num) {
3711     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3712         return do_socket(a[0], a[1], a[2]);
3713     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3714         return do_bind(a[0], a[1], a[2]);
3715     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3716         return do_connect(a[0], a[1], a[2]);
3717     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3718         return get_errno(listen(a[0], a[1]));
3719     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3720         return do_accept4(a[0], a[1], a[2], 0);
3721     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3722         return do_getsockname(a[0], a[1], a[2]);
3723     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3724         return do_getpeername(a[0], a[1], a[2]);
3725     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3726         return do_socketpair(a[0], a[1], a[2], a[3]);
3727     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3728         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3729     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3730         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3731     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3732         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3733     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3734         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3735     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3736         return get_errno(shutdown(a[0], a[1]));
3737     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3738         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3739     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3740         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3741     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3742         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3743     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3744         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3745     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3746         return do_accept4(a[0], a[1], a[2], a[3]);
3747     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3748         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3749     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3750         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3751     default:
3752         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3753         return -TARGET_EINVAL;
3754     }
3755 }
3756 #endif
3757 
3758 #define N_SHM_REGIONS	32
3759 
3760 static struct shm_region {
3761     abi_ulong start;
3762     abi_ulong size;
3763     bool in_use;
3764 } shm_regions[N_SHM_REGIONS];
3765 
3766 #ifndef TARGET_SEMID64_DS
3767 /* asm-generic version of this struct */
3768 struct target_semid64_ds
3769 {
3770   struct target_ipc_perm sem_perm;
3771   abi_ulong sem_otime;
3772 #if TARGET_ABI_BITS == 32
3773   abi_ulong __unused1;
3774 #endif
3775   abi_ulong sem_ctime;
3776 #if TARGET_ABI_BITS == 32
3777   abi_ulong __unused2;
3778 #endif
3779   abi_ulong sem_nsems;
3780   abi_ulong __unused3;
3781   abi_ulong __unused4;
3782 };
3783 #endif
3784 
3785 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3786                                                abi_ulong target_addr)
3787 {
3788     struct target_ipc_perm *target_ip;
3789     struct target_semid64_ds *target_sd;
3790 
3791     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3792         return -TARGET_EFAULT;
3793     target_ip = &(target_sd->sem_perm);
3794     host_ip->__key = tswap32(target_ip->__key);
3795     host_ip->uid = tswap32(target_ip->uid);
3796     host_ip->gid = tswap32(target_ip->gid);
3797     host_ip->cuid = tswap32(target_ip->cuid);
3798     host_ip->cgid = tswap32(target_ip->cgid);
3799 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3800     host_ip->mode = tswap32(target_ip->mode);
3801 #else
3802     host_ip->mode = tswap16(target_ip->mode);
3803 #endif
3804 #if defined(TARGET_PPC)
3805     host_ip->__seq = tswap32(target_ip->__seq);
3806 #else
3807     host_ip->__seq = tswap16(target_ip->__seq);
3808 #endif
3809     unlock_user_struct(target_sd, target_addr, 0);
3810     return 0;
3811 }
3812 
3813 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3814                                                struct ipc_perm *host_ip)
3815 {
3816     struct target_ipc_perm *target_ip;
3817     struct target_semid64_ds *target_sd;
3818 
3819     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3820         return -TARGET_EFAULT;
3821     target_ip = &(target_sd->sem_perm);
3822     target_ip->__key = tswap32(host_ip->__key);
3823     target_ip->uid = tswap32(host_ip->uid);
3824     target_ip->gid = tswap32(host_ip->gid);
3825     target_ip->cuid = tswap32(host_ip->cuid);
3826     target_ip->cgid = tswap32(host_ip->cgid);
3827 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3828     target_ip->mode = tswap32(host_ip->mode);
3829 #else
3830     target_ip->mode = tswap16(host_ip->mode);
3831 #endif
3832 #if defined(TARGET_PPC)
3833     target_ip->__seq = tswap32(host_ip->__seq);
3834 #else
3835     target_ip->__seq = tswap16(host_ip->__seq);
3836 #endif
3837     unlock_user_struct(target_sd, target_addr, 1);
3838     return 0;
3839 }
3840 
3841 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3842                                                abi_ulong target_addr)
3843 {
3844     struct target_semid64_ds *target_sd;
3845 
3846     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3847         return -TARGET_EFAULT;
3848     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3849         return -TARGET_EFAULT;
3850     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3851     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3852     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3853     unlock_user_struct(target_sd, target_addr, 0);
3854     return 0;
3855 }
3856 
3857 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3858                                                struct semid_ds *host_sd)
3859 {
3860     struct target_semid64_ds *target_sd;
3861 
3862     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3863         return -TARGET_EFAULT;
3864     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3865         return -TARGET_EFAULT;
3866     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3867     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3868     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3869     unlock_user_struct(target_sd, target_addr, 1);
3870     return 0;
3871 }
3872 
3873 struct target_seminfo {
3874     int semmap;
3875     int semmni;
3876     int semmns;
3877     int semmnu;
3878     int semmsl;
3879     int semopm;
3880     int semume;
3881     int semusz;
3882     int semvmx;
3883     int semaem;
3884 };
3885 
3886 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3887                                               struct seminfo *host_seminfo)
3888 {
3889     struct target_seminfo *target_seminfo;
3890     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3891         return -TARGET_EFAULT;
3892     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3893     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3894     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3895     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3896     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3897     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3898     __put_user(host_seminfo->semume, &target_seminfo->semume);
3899     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3900     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3901     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3902     unlock_user_struct(target_seminfo, target_addr, 1);
3903     return 0;
3904 }
3905 
3906 union semun {
3907 	int val;
3908 	struct semid_ds *buf;
3909 	unsigned short *array;
3910 	struct seminfo *__buf;
3911 };
3912 
3913 union target_semun {
3914 	int val;
3915 	abi_ulong buf;
3916 	abi_ulong array;
3917 	abi_ulong __buf;
3918 };
3919 
3920 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3921                                                abi_ulong target_addr)
3922 {
3923     int nsems;
3924     unsigned short *array;
3925     union semun semun;
3926     struct semid_ds semid_ds;
3927     int i, ret;
3928 
3929     semun.buf = &semid_ds;
3930 
3931     ret = semctl(semid, 0, IPC_STAT, semun);
3932     if (ret == -1)
3933         return get_errno(ret);
3934 
3935     nsems = semid_ds.sem_nsems;
3936 
3937     *host_array = g_try_new(unsigned short, nsems);
3938     if (!*host_array) {
3939         return -TARGET_ENOMEM;
3940     }
3941     array = lock_user(VERIFY_READ, target_addr,
3942                       nsems*sizeof(unsigned short), 1);
3943     if (!array) {
3944         g_free(*host_array);
3945         return -TARGET_EFAULT;
3946     }
3947 
3948     for(i=0; i<nsems; i++) {
3949         __get_user((*host_array)[i], &array[i]);
3950     }
3951     unlock_user(array, target_addr, 0);
3952 
3953     return 0;
3954 }
3955 
3956 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3957                                                unsigned short **host_array)
3958 {
3959     int nsems;
3960     unsigned short *array;
3961     union semun semun;
3962     struct semid_ds semid_ds;
3963     int i, ret;
3964 
3965     semun.buf = &semid_ds;
3966 
3967     ret = semctl(semid, 0, IPC_STAT, semun);
3968     if (ret == -1)
3969         return get_errno(ret);
3970 
3971     nsems = semid_ds.sem_nsems;
3972 
3973     array = lock_user(VERIFY_WRITE, target_addr,
3974                       nsems*sizeof(unsigned short), 0);
3975     if (!array)
3976         return -TARGET_EFAULT;
3977 
3978     for(i=0; i<nsems; i++) {
3979         __put_user((*host_array)[i], &array[i]);
3980     }
3981     g_free(*host_array);
3982     unlock_user(array, target_addr, 1);
3983 
3984     return 0;
3985 }
3986 
3987 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3988                                  abi_ulong target_arg)
3989 {
3990     union target_semun target_su = { .buf = target_arg };
3991     union semun arg;
3992     struct semid_ds dsarg;
3993     unsigned short *array = NULL;
3994     struct seminfo seminfo;
3995     abi_long ret = -TARGET_EINVAL;
3996     abi_long err;
3997     cmd &= 0xff;
3998 
3999     switch( cmd ) {
4000 	case GETVAL:
4001 	case SETVAL:
4002             /* In 64 bit cross-endian situations, we will erroneously pick up
4003              * the wrong half of the union for the "val" element.  To rectify
4004              * this, the entire 8-byte structure is byteswapped, followed by
4005 	     * a swap of the 4 byte val field. In other cases, the data is
4006 	     * already in proper host byte order. */
4007 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4008 		target_su.buf = tswapal(target_su.buf);
4009 		arg.val = tswap32(target_su.val);
4010 	    } else {
4011 		arg.val = target_su.val;
4012 	    }
4013             ret = get_errno(semctl(semid, semnum, cmd, arg));
4014             break;
4015 	case GETALL:
4016 	case SETALL:
4017             err = target_to_host_semarray(semid, &array, target_su.array);
4018             if (err)
4019                 return err;
4020             arg.array = array;
4021             ret = get_errno(semctl(semid, semnum, cmd, arg));
4022             err = host_to_target_semarray(semid, target_su.array, &array);
4023             if (err)
4024                 return err;
4025             break;
4026 	case IPC_STAT:
4027 	case IPC_SET:
4028 	case SEM_STAT:
4029             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4030             if (err)
4031                 return err;
4032             arg.buf = &dsarg;
4033             ret = get_errno(semctl(semid, semnum, cmd, arg));
4034             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4035             if (err)
4036                 return err;
4037             break;
4038 	case IPC_INFO:
4039 	case SEM_INFO:
4040             arg.__buf = &seminfo;
4041             ret = get_errno(semctl(semid, semnum, cmd, arg));
4042             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4043             if (err)
4044                 return err;
4045             break;
4046 	case IPC_RMID:
4047 	case GETPID:
4048 	case GETNCNT:
4049 	case GETZCNT:
4050             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4051             break;
4052     }
4053 
4054     return ret;
4055 }
4056 
4057 struct target_sembuf {
4058     unsigned short sem_num;
4059     short sem_op;
4060     short sem_flg;
4061 };
4062 
4063 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4064                                              abi_ulong target_addr,
4065                                              unsigned nsops)
4066 {
4067     struct target_sembuf *target_sembuf;
4068     int i;
4069 
4070     target_sembuf = lock_user(VERIFY_READ, target_addr,
4071                               nsops*sizeof(struct target_sembuf), 1);
4072     if (!target_sembuf)
4073         return -TARGET_EFAULT;
4074 
4075     for(i=0; i<nsops; i++) {
4076         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4077         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4078         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4079     }
4080 
4081     unlock_user(target_sembuf, target_addr, 0);
4082 
4083     return 0;
4084 }
4085 
4086 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4087     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4088 
4089 /*
4090  * This macro is required to handle the s390 variants, which passes the
4091  * arguments in a different order than default.
4092  */
4093 #ifdef __s390x__
4094 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4095   (__nsops), (__timeout), (__sops)
4096 #else
4097 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4098   (__nsops), 0, (__sops), (__timeout)
4099 #endif
4100 
4101 static inline abi_long do_semtimedop(int semid,
4102                                      abi_long ptr,
4103                                      unsigned nsops,
4104                                      abi_long timeout, bool time64)
4105 {
4106     struct sembuf *sops;
4107     struct timespec ts, *pts = NULL;
4108     abi_long ret;
4109 
4110     if (timeout) {
4111         pts = &ts;
4112         if (time64) {
4113             if (target_to_host_timespec64(pts, timeout)) {
4114                 return -TARGET_EFAULT;
4115             }
4116         } else {
4117             if (target_to_host_timespec(pts, timeout)) {
4118                 return -TARGET_EFAULT;
4119             }
4120         }
4121     }
4122 
4123     if (nsops > TARGET_SEMOPM) {
4124         return -TARGET_E2BIG;
4125     }
4126 
4127     sops = g_new(struct sembuf, nsops);
4128 
4129     if (target_to_host_sembuf(sops, ptr, nsops)) {
4130         g_free(sops);
4131         return -TARGET_EFAULT;
4132     }
4133 
4134     ret = -TARGET_ENOSYS;
4135 #ifdef __NR_semtimedop
4136     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4137 #endif
4138 #ifdef __NR_ipc
4139     if (ret == -TARGET_ENOSYS) {
4140         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4141                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4142     }
4143 #endif
4144     g_free(sops);
4145     return ret;
4146 }
4147 #endif
4148 
4149 struct target_msqid_ds
4150 {
4151     struct target_ipc_perm msg_perm;
4152     abi_ulong msg_stime;
4153 #if TARGET_ABI_BITS == 32
4154     abi_ulong __unused1;
4155 #endif
4156     abi_ulong msg_rtime;
4157 #if TARGET_ABI_BITS == 32
4158     abi_ulong __unused2;
4159 #endif
4160     abi_ulong msg_ctime;
4161 #if TARGET_ABI_BITS == 32
4162     abi_ulong __unused3;
4163 #endif
4164     abi_ulong __msg_cbytes;
4165     abi_ulong msg_qnum;
4166     abi_ulong msg_qbytes;
4167     abi_ulong msg_lspid;
4168     abi_ulong msg_lrpid;
4169     abi_ulong __unused4;
4170     abi_ulong __unused5;
4171 };
4172 
4173 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4174                                                abi_ulong target_addr)
4175 {
4176     struct target_msqid_ds *target_md;
4177 
4178     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4179         return -TARGET_EFAULT;
4180     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4181         return -TARGET_EFAULT;
4182     host_md->msg_stime = tswapal(target_md->msg_stime);
4183     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4184     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4185     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4186     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4187     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4188     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4189     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4190     unlock_user_struct(target_md, target_addr, 0);
4191     return 0;
4192 }
4193 
4194 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4195                                                struct msqid_ds *host_md)
4196 {
4197     struct target_msqid_ds *target_md;
4198 
4199     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4200         return -TARGET_EFAULT;
4201     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4202         return -TARGET_EFAULT;
4203     target_md->msg_stime = tswapal(host_md->msg_stime);
4204     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4205     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4206     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4207     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4208     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4209     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4210     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4211     unlock_user_struct(target_md, target_addr, 1);
4212     return 0;
4213 }
4214 
4215 struct target_msginfo {
4216     int msgpool;
4217     int msgmap;
4218     int msgmax;
4219     int msgmnb;
4220     int msgmni;
4221     int msgssz;
4222     int msgtql;
4223     unsigned short int msgseg;
4224 };
4225 
4226 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4227                                               struct msginfo *host_msginfo)
4228 {
4229     struct target_msginfo *target_msginfo;
4230     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4231         return -TARGET_EFAULT;
4232     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4233     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4234     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4235     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4236     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4237     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4238     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4239     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4240     unlock_user_struct(target_msginfo, target_addr, 1);
4241     return 0;
4242 }
4243 
4244 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4245 {
4246     struct msqid_ds dsarg;
4247     struct msginfo msginfo;
4248     abi_long ret = -TARGET_EINVAL;
4249 
4250     cmd &= 0xff;
4251 
4252     switch (cmd) {
4253     case IPC_STAT:
4254     case IPC_SET:
4255     case MSG_STAT:
4256         if (target_to_host_msqid_ds(&dsarg,ptr))
4257             return -TARGET_EFAULT;
4258         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4259         if (host_to_target_msqid_ds(ptr,&dsarg))
4260             return -TARGET_EFAULT;
4261         break;
4262     case IPC_RMID:
4263         ret = get_errno(msgctl(msgid, cmd, NULL));
4264         break;
4265     case IPC_INFO:
4266     case MSG_INFO:
4267         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4268         if (host_to_target_msginfo(ptr, &msginfo))
4269             return -TARGET_EFAULT;
4270         break;
4271     }
4272 
4273     return ret;
4274 }
4275 
4276 struct target_msgbuf {
4277     abi_long mtype;
4278     char	mtext[1];
4279 };
4280 
4281 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4282                                  ssize_t msgsz, int msgflg)
4283 {
4284     struct target_msgbuf *target_mb;
4285     struct msgbuf *host_mb;
4286     abi_long ret = 0;
4287 
4288     if (msgsz < 0) {
4289         return -TARGET_EINVAL;
4290     }
4291 
4292     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4293         return -TARGET_EFAULT;
4294     host_mb = g_try_malloc(msgsz + sizeof(long));
4295     if (!host_mb) {
4296         unlock_user_struct(target_mb, msgp, 0);
4297         return -TARGET_ENOMEM;
4298     }
4299     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4300     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4301     ret = -TARGET_ENOSYS;
4302 #ifdef __NR_msgsnd
4303     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4304 #endif
4305 #ifdef __NR_ipc
4306     if (ret == -TARGET_ENOSYS) {
4307 #ifdef __s390x__
4308         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4309                                  host_mb));
4310 #else
4311         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4312                                  host_mb, 0));
4313 #endif
4314     }
4315 #endif
4316     g_free(host_mb);
4317     unlock_user_struct(target_mb, msgp, 0);
4318 
4319     return ret;
4320 }
4321 
4322 #ifdef __NR_ipc
4323 #if defined(__sparc__)
4324 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4325 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4326 #elif defined(__s390x__)
4327 /* The s390 sys_ipc variant has only five parameters.  */
4328 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4329     ((long int[]){(long int)__msgp, __msgtyp})
4330 #else
4331 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4332     ((long int[]){(long int)__msgp, __msgtyp}), 0
4333 #endif
4334 #endif
4335 
4336 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4337                                  ssize_t msgsz, abi_long msgtyp,
4338                                  int msgflg)
4339 {
4340     struct target_msgbuf *target_mb;
4341     char *target_mtext;
4342     struct msgbuf *host_mb;
4343     abi_long ret = 0;
4344 
4345     if (msgsz < 0) {
4346         return -TARGET_EINVAL;
4347     }
4348 
4349     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4350         return -TARGET_EFAULT;
4351 
4352     host_mb = g_try_malloc(msgsz + sizeof(long));
4353     if (!host_mb) {
4354         ret = -TARGET_ENOMEM;
4355         goto end;
4356     }
4357     ret = -TARGET_ENOSYS;
4358 #ifdef __NR_msgrcv
4359     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4360 #endif
4361 #ifdef __NR_ipc
4362     if (ret == -TARGET_ENOSYS) {
4363         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4364                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4365     }
4366 #endif
4367 
4368     if (ret > 0) {
4369         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4370         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4371         if (!target_mtext) {
4372             ret = -TARGET_EFAULT;
4373             goto end;
4374         }
4375         memcpy(target_mb->mtext, host_mb->mtext, ret);
4376         unlock_user(target_mtext, target_mtext_addr, ret);
4377     }
4378 
4379     target_mb->mtype = tswapal(host_mb->mtype);
4380 
4381 end:
4382     if (target_mb)
4383         unlock_user_struct(target_mb, msgp, 1);
4384     g_free(host_mb);
4385     return ret;
4386 }
4387 
4388 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4389                                                abi_ulong target_addr)
4390 {
4391     struct target_shmid_ds *target_sd;
4392 
4393     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4394         return -TARGET_EFAULT;
4395     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4396         return -TARGET_EFAULT;
4397     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4398     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4399     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4400     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4401     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4402     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4403     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4404     unlock_user_struct(target_sd, target_addr, 0);
4405     return 0;
4406 }
4407 
4408 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4409                                                struct shmid_ds *host_sd)
4410 {
4411     struct target_shmid_ds *target_sd;
4412 
4413     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4414         return -TARGET_EFAULT;
4415     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4416         return -TARGET_EFAULT;
4417     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4418     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4419     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4420     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4421     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4422     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4423     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4424     unlock_user_struct(target_sd, target_addr, 1);
4425     return 0;
4426 }
4427 
4428 struct  target_shminfo {
4429     abi_ulong shmmax;
4430     abi_ulong shmmin;
4431     abi_ulong shmmni;
4432     abi_ulong shmseg;
4433     abi_ulong shmall;
4434 };
4435 
4436 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4437                                               struct shminfo *host_shminfo)
4438 {
4439     struct target_shminfo *target_shminfo;
4440     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4441         return -TARGET_EFAULT;
4442     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4443     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4444     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4445     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4446     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4447     unlock_user_struct(target_shminfo, target_addr, 1);
4448     return 0;
4449 }
4450 
4451 struct target_shm_info {
4452     int used_ids;
4453     abi_ulong shm_tot;
4454     abi_ulong shm_rss;
4455     abi_ulong shm_swp;
4456     abi_ulong swap_attempts;
4457     abi_ulong swap_successes;
4458 };
4459 
4460 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4461                                                struct shm_info *host_shm_info)
4462 {
4463     struct target_shm_info *target_shm_info;
4464     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4465         return -TARGET_EFAULT;
4466     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4467     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4468     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4469     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4470     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4471     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4472     unlock_user_struct(target_shm_info, target_addr, 1);
4473     return 0;
4474 }
4475 
4476 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4477 {
4478     struct shmid_ds dsarg;
4479     struct shminfo shminfo;
4480     struct shm_info shm_info;
4481     abi_long ret = -TARGET_EINVAL;
4482 
4483     cmd &= 0xff;
4484 
4485     switch(cmd) {
4486     case IPC_STAT:
4487     case IPC_SET:
4488     case SHM_STAT:
4489         if (target_to_host_shmid_ds(&dsarg, buf))
4490             return -TARGET_EFAULT;
4491         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4492         if (host_to_target_shmid_ds(buf, &dsarg))
4493             return -TARGET_EFAULT;
4494         break;
4495     case IPC_INFO:
4496         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4497         if (host_to_target_shminfo(buf, &shminfo))
4498             return -TARGET_EFAULT;
4499         break;
4500     case SHM_INFO:
4501         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4502         if (host_to_target_shm_info(buf, &shm_info))
4503             return -TARGET_EFAULT;
4504         break;
4505     case IPC_RMID:
4506     case SHM_LOCK:
4507     case SHM_UNLOCK:
4508         ret = get_errno(shmctl(shmid, cmd, NULL));
4509         break;
4510     }
4511 
4512     return ret;
4513 }
4514 
4515 #ifndef TARGET_FORCE_SHMLBA
4516 /* For most architectures, SHMLBA is the same as the page size;
4517  * some architectures have larger values, in which case they should
4518  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4519  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4520  * and defining its own value for SHMLBA.
4521  *
4522  * The kernel also permits SHMLBA to be set by the architecture to a
4523  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4524  * this means that addresses are rounded to the large size if
4525  * SHM_RND is set but addresses not aligned to that size are not rejected
4526  * as long as they are at least page-aligned. Since the only architecture
4527  * which uses this is ia64 this code doesn't provide for that oddity.
4528  */
4529 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4530 {
4531     return TARGET_PAGE_SIZE;
4532 }
4533 #endif
4534 
4535 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4536                                  int shmid, abi_ulong shmaddr, int shmflg)
4537 {
4538     CPUState *cpu = env_cpu(cpu_env);
4539     abi_long raddr;
4540     void *host_raddr;
4541     struct shmid_ds shm_info;
4542     int i,ret;
4543     abi_ulong shmlba;
4544 
4545     /* shmat pointers are always untagged */
4546 
4547     /* find out the length of the shared memory segment */
4548     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4549     if (is_error(ret)) {
4550         /* can't get length, bail out */
4551         return ret;
4552     }
4553 
4554     shmlba = target_shmlba(cpu_env);
4555 
4556     if (shmaddr & (shmlba - 1)) {
4557         if (shmflg & SHM_RND) {
4558             shmaddr &= ~(shmlba - 1);
4559         } else {
4560             return -TARGET_EINVAL;
4561         }
4562     }
4563     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4564         return -TARGET_EINVAL;
4565     }
4566 
4567     mmap_lock();
4568 
4569     /*
4570      * We're mapping shared memory, so ensure we generate code for parallel
4571      * execution and flush old translations.  This will work up to the level
4572      * supported by the host -- anything that requires EXCP_ATOMIC will not
4573      * be atomic with respect to an external process.
4574      */
4575     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4576         cpu->tcg_cflags |= CF_PARALLEL;
4577         tb_flush(cpu);
4578     }
4579 
4580     if (shmaddr)
4581         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4582     else {
4583         abi_ulong mmap_start;
4584 
4585         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4586         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4587 
4588         if (mmap_start == -1) {
4589             errno = ENOMEM;
4590             host_raddr = (void *)-1;
4591         } else
4592             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4593                                shmflg | SHM_REMAP);
4594     }
4595 
4596     if (host_raddr == (void *)-1) {
4597         mmap_unlock();
4598         return get_errno((long)host_raddr);
4599     }
4600     raddr=h2g((unsigned long)host_raddr);
4601 
4602     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4603                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4604                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4605 
4606     for (i = 0; i < N_SHM_REGIONS; i++) {
4607         if (!shm_regions[i].in_use) {
4608             shm_regions[i].in_use = true;
4609             shm_regions[i].start = raddr;
4610             shm_regions[i].size = shm_info.shm_segsz;
4611             break;
4612         }
4613     }
4614 
4615     mmap_unlock();
4616     return raddr;
4617 
4618 }
4619 
4620 static inline abi_long do_shmdt(abi_ulong shmaddr)
4621 {
4622     int i;
4623     abi_long rv;
4624 
4625     /* shmdt pointers are always untagged */
4626 
4627     mmap_lock();
4628 
4629     for (i = 0; i < N_SHM_REGIONS; ++i) {
4630         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4631             shm_regions[i].in_use = false;
4632             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4633             break;
4634         }
4635     }
4636     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4637 
4638     mmap_unlock();
4639 
4640     return rv;
4641 }
4642 
4643 #ifdef TARGET_NR_ipc
4644 /* ??? This only works with linear mappings.  */
4645 /* do_ipc() must return target values and target errnos. */
4646 static abi_long do_ipc(CPUArchState *cpu_env,
4647                        unsigned int call, abi_long first,
4648                        abi_long second, abi_long third,
4649                        abi_long ptr, abi_long fifth)
4650 {
4651     int version;
4652     abi_long ret = 0;
4653 
4654     version = call >> 16;
4655     call &= 0xffff;
4656 
4657     switch (call) {
4658     case IPCOP_semop:
4659         ret = do_semtimedop(first, ptr, second, 0, false);
4660         break;
4661     case IPCOP_semtimedop:
4662     /*
4663      * The s390 sys_ipc variant has only five parameters instead of six
4664      * (as for default variant) and the only difference is the handling of
4665      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4666      * to a struct timespec where the generic variant uses fifth parameter.
4667      */
4668 #if defined(TARGET_S390X)
4669         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4670 #else
4671         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4672 #endif
4673         break;
4674 
4675     case IPCOP_semget:
4676         ret = get_errno(semget(first, second, third));
4677         break;
4678 
4679     case IPCOP_semctl: {
4680         /* The semun argument to semctl is passed by value, so dereference the
4681          * ptr argument. */
4682         abi_ulong atptr;
4683         get_user_ual(atptr, ptr);
4684         ret = do_semctl(first, second, third, atptr);
4685         break;
4686     }
4687 
4688     case IPCOP_msgget:
4689         ret = get_errno(msgget(first, second));
4690         break;
4691 
4692     case IPCOP_msgsnd:
4693         ret = do_msgsnd(first, ptr, second, third);
4694         break;
4695 
4696     case IPCOP_msgctl:
4697         ret = do_msgctl(first, second, ptr);
4698         break;
4699 
4700     case IPCOP_msgrcv:
4701         switch (version) {
4702         case 0:
4703             {
4704                 struct target_ipc_kludge {
4705                     abi_long msgp;
4706                     abi_long msgtyp;
4707                 } *tmp;
4708 
4709                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4710                     ret = -TARGET_EFAULT;
4711                     break;
4712                 }
4713 
4714                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4715 
4716                 unlock_user_struct(tmp, ptr, 0);
4717                 break;
4718             }
4719         default:
4720             ret = do_msgrcv(first, ptr, second, fifth, third);
4721         }
4722         break;
4723 
4724     case IPCOP_shmat:
4725         switch (version) {
4726         default:
4727         {
4728             abi_ulong raddr;
4729             raddr = do_shmat(cpu_env, first, ptr, second);
4730             if (is_error(raddr))
4731                 return get_errno(raddr);
4732             if (put_user_ual(raddr, third))
4733                 return -TARGET_EFAULT;
4734             break;
4735         }
4736         case 1:
4737             ret = -TARGET_EINVAL;
4738             break;
4739         }
4740 	break;
4741     case IPCOP_shmdt:
4742         ret = do_shmdt(ptr);
4743 	break;
4744 
4745     case IPCOP_shmget:
4746 	/* IPC_* flag values are the same on all linux platforms */
4747 	ret = get_errno(shmget(first, second, third));
4748 	break;
4749 
4750 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4751     case IPCOP_shmctl:
4752         ret = do_shmctl(first, second, ptr);
4753         break;
4754     default:
4755         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4756                       call, version);
4757 	ret = -TARGET_ENOSYS;
4758 	break;
4759     }
4760     return ret;
4761 }
4762 #endif
4763 
4764 /* kernel structure types definitions */
4765 
4766 #define STRUCT(name, ...) STRUCT_ ## name,
4767 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4768 enum {
4769 #include "syscall_types.h"
4770 STRUCT_MAX
4771 };
4772 #undef STRUCT
4773 #undef STRUCT_SPECIAL
4774 
4775 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4776 #define STRUCT_SPECIAL(name)
4777 #include "syscall_types.h"
4778 #undef STRUCT
4779 #undef STRUCT_SPECIAL
4780 
4781 #define MAX_STRUCT_SIZE 4096
4782 
4783 #ifdef CONFIG_FIEMAP
4784 /* So fiemap access checks don't overflow on 32 bit systems.
4785  * This is very slightly smaller than the limit imposed by
4786  * the underlying kernel.
4787  */
4788 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4789                             / sizeof(struct fiemap_extent))
4790 
4791 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4792                                        int fd, int cmd, abi_long arg)
4793 {
4794     /* The parameter for this ioctl is a struct fiemap followed
4795      * by an array of struct fiemap_extent whose size is set
4796      * in fiemap->fm_extent_count. The array is filled in by the
4797      * ioctl.
4798      */
4799     int target_size_in, target_size_out;
4800     struct fiemap *fm;
4801     const argtype *arg_type = ie->arg_type;
4802     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4803     void *argptr, *p;
4804     abi_long ret;
4805     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4806     uint32_t outbufsz;
4807     int free_fm = 0;
4808 
4809     assert(arg_type[0] == TYPE_PTR);
4810     assert(ie->access == IOC_RW);
4811     arg_type++;
4812     target_size_in = thunk_type_size(arg_type, 0);
4813     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4814     if (!argptr) {
4815         return -TARGET_EFAULT;
4816     }
4817     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4818     unlock_user(argptr, arg, 0);
4819     fm = (struct fiemap *)buf_temp;
4820     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4821         return -TARGET_EINVAL;
4822     }
4823 
4824     outbufsz = sizeof (*fm) +
4825         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4826 
4827     if (outbufsz > MAX_STRUCT_SIZE) {
4828         /* We can't fit all the extents into the fixed size buffer.
4829          * Allocate one that is large enough and use it instead.
4830          */
4831         fm = g_try_malloc(outbufsz);
4832         if (!fm) {
4833             return -TARGET_ENOMEM;
4834         }
4835         memcpy(fm, buf_temp, sizeof(struct fiemap));
4836         free_fm = 1;
4837     }
4838     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4839     if (!is_error(ret)) {
4840         target_size_out = target_size_in;
4841         /* An extent_count of 0 means we were only counting the extents
4842          * so there are no structs to copy
4843          */
4844         if (fm->fm_extent_count != 0) {
4845             target_size_out += fm->fm_mapped_extents * extent_size;
4846         }
4847         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4848         if (!argptr) {
4849             ret = -TARGET_EFAULT;
4850         } else {
4851             /* Convert the struct fiemap */
4852             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4853             if (fm->fm_extent_count != 0) {
4854                 p = argptr + target_size_in;
4855                 /* ...and then all the struct fiemap_extents */
4856                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4857                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4858                                   THUNK_TARGET);
4859                     p += extent_size;
4860                 }
4861             }
4862             unlock_user(argptr, arg, target_size_out);
4863         }
4864     }
4865     if (free_fm) {
4866         g_free(fm);
4867     }
4868     return ret;
4869 }
4870 #endif
4871 
4872 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4873                                 int fd, int cmd, abi_long arg)
4874 {
4875     const argtype *arg_type = ie->arg_type;
4876     int target_size;
4877     void *argptr;
4878     int ret;
4879     struct ifconf *host_ifconf;
4880     uint32_t outbufsz;
4881     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4882     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4883     int target_ifreq_size;
4884     int nb_ifreq;
4885     int free_buf = 0;
4886     int i;
4887     int target_ifc_len;
4888     abi_long target_ifc_buf;
4889     int host_ifc_len;
4890     char *host_ifc_buf;
4891 
4892     assert(arg_type[0] == TYPE_PTR);
4893     assert(ie->access == IOC_RW);
4894 
4895     arg_type++;
4896     target_size = thunk_type_size(arg_type, 0);
4897 
4898     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4899     if (!argptr)
4900         return -TARGET_EFAULT;
4901     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4902     unlock_user(argptr, arg, 0);
4903 
4904     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4905     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4906     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4907 
4908     if (target_ifc_buf != 0) {
4909         target_ifc_len = host_ifconf->ifc_len;
4910         nb_ifreq = target_ifc_len / target_ifreq_size;
4911         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4912 
4913         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4914         if (outbufsz > MAX_STRUCT_SIZE) {
4915             /*
4916              * We can't fit all the extents into the fixed size buffer.
4917              * Allocate one that is large enough and use it instead.
4918              */
4919             host_ifconf = g_try_malloc(outbufsz);
4920             if (!host_ifconf) {
4921                 return -TARGET_ENOMEM;
4922             }
4923             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4924             free_buf = 1;
4925         }
4926         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4927 
4928         host_ifconf->ifc_len = host_ifc_len;
4929     } else {
4930       host_ifc_buf = NULL;
4931     }
4932     host_ifconf->ifc_buf = host_ifc_buf;
4933 
4934     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4935     if (!is_error(ret)) {
4936 	/* convert host ifc_len to target ifc_len */
4937 
4938         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4939         target_ifc_len = nb_ifreq * target_ifreq_size;
4940         host_ifconf->ifc_len = target_ifc_len;
4941 
4942 	/* restore target ifc_buf */
4943 
4944         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4945 
4946 	/* copy struct ifconf to target user */
4947 
4948         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4949         if (!argptr)
4950             return -TARGET_EFAULT;
4951         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4952         unlock_user(argptr, arg, target_size);
4953 
4954         if (target_ifc_buf != 0) {
4955             /* copy ifreq[] to target user */
4956             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4957             for (i = 0; i < nb_ifreq ; i++) {
4958                 thunk_convert(argptr + i * target_ifreq_size,
4959                               host_ifc_buf + i * sizeof(struct ifreq),
4960                               ifreq_arg_type, THUNK_TARGET);
4961             }
4962             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4963         }
4964     }
4965 
4966     if (free_buf) {
4967         g_free(host_ifconf);
4968     }
4969 
4970     return ret;
4971 }
4972 
4973 #if defined(CONFIG_USBFS)
4974 #if HOST_LONG_BITS > 64
4975 #error USBDEVFS thunks do not support >64 bit hosts yet.
4976 #endif
4977 struct live_urb {
4978     uint64_t target_urb_adr;
4979     uint64_t target_buf_adr;
4980     char *target_buf_ptr;
4981     struct usbdevfs_urb host_urb;
4982 };
4983 
4984 static GHashTable *usbdevfs_urb_hashtable(void)
4985 {
4986     static GHashTable *urb_hashtable;
4987 
4988     if (!urb_hashtable) {
4989         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4990     }
4991     return urb_hashtable;
4992 }
4993 
4994 static void urb_hashtable_insert(struct live_urb *urb)
4995 {
4996     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4997     g_hash_table_insert(urb_hashtable, urb, urb);
4998 }
4999 
5000 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5001 {
5002     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5003     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5004 }
5005 
5006 static void urb_hashtable_remove(struct live_urb *urb)
5007 {
5008     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5009     g_hash_table_remove(urb_hashtable, urb);
5010 }
5011 
5012 static abi_long
5013 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5014                           int fd, int cmd, abi_long arg)
5015 {
5016     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5017     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5018     struct live_urb *lurb;
5019     void *argptr;
5020     uint64_t hurb;
5021     int target_size;
5022     uintptr_t target_urb_adr;
5023     abi_long ret;
5024 
5025     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5026 
5027     memset(buf_temp, 0, sizeof(uint64_t));
5028     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5029     if (is_error(ret)) {
5030         return ret;
5031     }
5032 
5033     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5034     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5035     if (!lurb->target_urb_adr) {
5036         return -TARGET_EFAULT;
5037     }
5038     urb_hashtable_remove(lurb);
5039     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5040         lurb->host_urb.buffer_length);
5041     lurb->target_buf_ptr = NULL;
5042 
5043     /* restore the guest buffer pointer */
5044     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5045 
5046     /* update the guest urb struct */
5047     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5048     if (!argptr) {
5049         g_free(lurb);
5050         return -TARGET_EFAULT;
5051     }
5052     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5053     unlock_user(argptr, lurb->target_urb_adr, target_size);
5054 
5055     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5056     /* write back the urb handle */
5057     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5058     if (!argptr) {
5059         g_free(lurb);
5060         return -TARGET_EFAULT;
5061     }
5062 
5063     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5064     target_urb_adr = lurb->target_urb_adr;
5065     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5066     unlock_user(argptr, arg, target_size);
5067 
5068     g_free(lurb);
5069     return ret;
5070 }
5071 
5072 static abi_long
5073 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5074                              uint8_t *buf_temp __attribute__((unused)),
5075                              int fd, int cmd, abi_long arg)
5076 {
5077     struct live_urb *lurb;
5078 
5079     /* map target address back to host URB with metadata. */
5080     lurb = urb_hashtable_lookup(arg);
5081     if (!lurb) {
5082         return -TARGET_EFAULT;
5083     }
5084     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5085 }
5086 
5087 static abi_long
5088 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5089                             int fd, int cmd, abi_long arg)
5090 {
5091     const argtype *arg_type = ie->arg_type;
5092     int target_size;
5093     abi_long ret;
5094     void *argptr;
5095     int rw_dir;
5096     struct live_urb *lurb;
5097 
5098     /*
5099      * each submitted URB needs to map to a unique ID for the
5100      * kernel, and that unique ID needs to be a pointer to
5101      * host memory.  hence, we need to malloc for each URB.
5102      * isochronous transfers have a variable length struct.
5103      */
5104     arg_type++;
5105     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5106 
5107     /* construct host copy of urb and metadata */
5108     lurb = g_try_new0(struct live_urb, 1);
5109     if (!lurb) {
5110         return -TARGET_ENOMEM;
5111     }
5112 
5113     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5114     if (!argptr) {
5115         g_free(lurb);
5116         return -TARGET_EFAULT;
5117     }
5118     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5119     unlock_user(argptr, arg, 0);
5120 
5121     lurb->target_urb_adr = arg;
5122     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5123 
5124     /* buffer space used depends on endpoint type so lock the entire buffer */
5125     /* control type urbs should check the buffer contents for true direction */
5126     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5127     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5128         lurb->host_urb.buffer_length, 1);
5129     if (lurb->target_buf_ptr == NULL) {
5130         g_free(lurb);
5131         return -TARGET_EFAULT;
5132     }
5133 
5134     /* update buffer pointer in host copy */
5135     lurb->host_urb.buffer = lurb->target_buf_ptr;
5136 
5137     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5138     if (is_error(ret)) {
5139         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5140         g_free(lurb);
5141     } else {
5142         urb_hashtable_insert(lurb);
5143     }
5144 
5145     return ret;
5146 }
5147 #endif /* CONFIG_USBFS */
5148 
5149 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5150                             int cmd, abi_long arg)
5151 {
5152     void *argptr;
5153     struct dm_ioctl *host_dm;
5154     abi_long guest_data;
5155     uint32_t guest_data_size;
5156     int target_size;
5157     const argtype *arg_type = ie->arg_type;
5158     abi_long ret;
5159     void *big_buf = NULL;
5160     char *host_data;
5161 
5162     arg_type++;
5163     target_size = thunk_type_size(arg_type, 0);
5164     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5165     if (!argptr) {
5166         ret = -TARGET_EFAULT;
5167         goto out;
5168     }
5169     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5170     unlock_user(argptr, arg, 0);
5171 
5172     /* buf_temp is too small, so fetch things into a bigger buffer */
5173     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5174     memcpy(big_buf, buf_temp, target_size);
5175     buf_temp = big_buf;
5176     host_dm = big_buf;
5177 
5178     guest_data = arg + host_dm->data_start;
5179     if ((guest_data - arg) < 0) {
5180         ret = -TARGET_EINVAL;
5181         goto out;
5182     }
5183     guest_data_size = host_dm->data_size - host_dm->data_start;
5184     host_data = (char*)host_dm + host_dm->data_start;
5185 
5186     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5187     if (!argptr) {
5188         ret = -TARGET_EFAULT;
5189         goto out;
5190     }
5191 
5192     switch (ie->host_cmd) {
5193     case DM_REMOVE_ALL:
5194     case DM_LIST_DEVICES:
5195     case DM_DEV_CREATE:
5196     case DM_DEV_REMOVE:
5197     case DM_DEV_SUSPEND:
5198     case DM_DEV_STATUS:
5199     case DM_DEV_WAIT:
5200     case DM_TABLE_STATUS:
5201     case DM_TABLE_CLEAR:
5202     case DM_TABLE_DEPS:
5203     case DM_LIST_VERSIONS:
5204         /* no input data */
5205         break;
5206     case DM_DEV_RENAME:
5207     case DM_DEV_SET_GEOMETRY:
5208         /* data contains only strings */
5209         memcpy(host_data, argptr, guest_data_size);
5210         break;
5211     case DM_TARGET_MSG:
5212         memcpy(host_data, argptr, guest_data_size);
5213         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5214         break;
5215     case DM_TABLE_LOAD:
5216     {
5217         void *gspec = argptr;
5218         void *cur_data = host_data;
5219         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5220         int spec_size = thunk_type_size(arg_type, 0);
5221         int i;
5222 
5223         for (i = 0; i < host_dm->target_count; i++) {
5224             struct dm_target_spec *spec = cur_data;
5225             uint32_t next;
5226             int slen;
5227 
5228             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5229             slen = strlen((char*)gspec + spec_size) + 1;
5230             next = spec->next;
5231             spec->next = sizeof(*spec) + slen;
5232             strcpy((char*)&spec[1], gspec + spec_size);
5233             gspec += next;
5234             cur_data += spec->next;
5235         }
5236         break;
5237     }
5238     default:
5239         ret = -TARGET_EINVAL;
5240         unlock_user(argptr, guest_data, 0);
5241         goto out;
5242     }
5243     unlock_user(argptr, guest_data, 0);
5244 
5245     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5246     if (!is_error(ret)) {
5247         guest_data = arg + host_dm->data_start;
5248         guest_data_size = host_dm->data_size - host_dm->data_start;
5249         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5250         switch (ie->host_cmd) {
5251         case DM_REMOVE_ALL:
5252         case DM_DEV_CREATE:
5253         case DM_DEV_REMOVE:
5254         case DM_DEV_RENAME:
5255         case DM_DEV_SUSPEND:
5256         case DM_DEV_STATUS:
5257         case DM_TABLE_LOAD:
5258         case DM_TABLE_CLEAR:
5259         case DM_TARGET_MSG:
5260         case DM_DEV_SET_GEOMETRY:
5261             /* no return data */
5262             break;
5263         case DM_LIST_DEVICES:
5264         {
5265             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5266             uint32_t remaining_data = guest_data_size;
5267             void *cur_data = argptr;
5268             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5269             int nl_size = 12; /* can't use thunk_size due to alignment */
5270 
5271             while (1) {
5272                 uint32_t next = nl->next;
5273                 if (next) {
5274                     nl->next = nl_size + (strlen(nl->name) + 1);
5275                 }
5276                 if (remaining_data < nl->next) {
5277                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5278                     break;
5279                 }
5280                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5281                 strcpy(cur_data + nl_size, nl->name);
5282                 cur_data += nl->next;
5283                 remaining_data -= nl->next;
5284                 if (!next) {
5285                     break;
5286                 }
5287                 nl = (void*)nl + next;
5288             }
5289             break;
5290         }
5291         case DM_DEV_WAIT:
5292         case DM_TABLE_STATUS:
5293         {
5294             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5295             void *cur_data = argptr;
5296             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5297             int spec_size = thunk_type_size(arg_type, 0);
5298             int i;
5299 
5300             for (i = 0; i < host_dm->target_count; i++) {
5301                 uint32_t next = spec->next;
5302                 int slen = strlen((char*)&spec[1]) + 1;
5303                 spec->next = (cur_data - argptr) + spec_size + slen;
5304                 if (guest_data_size < spec->next) {
5305                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5306                     break;
5307                 }
5308                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5309                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5310                 cur_data = argptr + spec->next;
5311                 spec = (void*)host_dm + host_dm->data_start + next;
5312             }
5313             break;
5314         }
5315         case DM_TABLE_DEPS:
5316         {
5317             void *hdata = (void*)host_dm + host_dm->data_start;
5318             int count = *(uint32_t*)hdata;
5319             uint64_t *hdev = hdata + 8;
5320             uint64_t *gdev = argptr + 8;
5321             int i;
5322 
5323             *(uint32_t*)argptr = tswap32(count);
5324             for (i = 0; i < count; i++) {
5325                 *gdev = tswap64(*hdev);
5326                 gdev++;
5327                 hdev++;
5328             }
5329             break;
5330         }
5331         case DM_LIST_VERSIONS:
5332         {
5333             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5334             uint32_t remaining_data = guest_data_size;
5335             void *cur_data = argptr;
5336             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5337             int vers_size = thunk_type_size(arg_type, 0);
5338 
5339             while (1) {
5340                 uint32_t next = vers->next;
5341                 if (next) {
5342                     vers->next = vers_size + (strlen(vers->name) + 1);
5343                 }
5344                 if (remaining_data < vers->next) {
5345                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5346                     break;
5347                 }
5348                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5349                 strcpy(cur_data + vers_size, vers->name);
5350                 cur_data += vers->next;
5351                 remaining_data -= vers->next;
5352                 if (!next) {
5353                     break;
5354                 }
5355                 vers = (void*)vers + next;
5356             }
5357             break;
5358         }
5359         default:
5360             unlock_user(argptr, guest_data, 0);
5361             ret = -TARGET_EINVAL;
5362             goto out;
5363         }
5364         unlock_user(argptr, guest_data, guest_data_size);
5365 
5366         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5367         if (!argptr) {
5368             ret = -TARGET_EFAULT;
5369             goto out;
5370         }
5371         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5372         unlock_user(argptr, arg, target_size);
5373     }
5374 out:
5375     g_free(big_buf);
5376     return ret;
5377 }
5378 
5379 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5380                                int cmd, abi_long arg)
5381 {
5382     void *argptr;
5383     int target_size;
5384     const argtype *arg_type = ie->arg_type;
5385     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5386     abi_long ret;
5387 
5388     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5389     struct blkpg_partition host_part;
5390 
5391     /* Read and convert blkpg */
5392     arg_type++;
5393     target_size = thunk_type_size(arg_type, 0);
5394     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5395     if (!argptr) {
5396         ret = -TARGET_EFAULT;
5397         goto out;
5398     }
5399     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5400     unlock_user(argptr, arg, 0);
5401 
5402     switch (host_blkpg->op) {
5403     case BLKPG_ADD_PARTITION:
5404     case BLKPG_DEL_PARTITION:
5405         /* payload is struct blkpg_partition */
5406         break;
5407     default:
5408         /* Unknown opcode */
5409         ret = -TARGET_EINVAL;
5410         goto out;
5411     }
5412 
5413     /* Read and convert blkpg->data */
5414     arg = (abi_long)(uintptr_t)host_blkpg->data;
5415     target_size = thunk_type_size(part_arg_type, 0);
5416     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5417     if (!argptr) {
5418         ret = -TARGET_EFAULT;
5419         goto out;
5420     }
5421     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5422     unlock_user(argptr, arg, 0);
5423 
5424     /* Swizzle the data pointer to our local copy and call! */
5425     host_blkpg->data = &host_part;
5426     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5427 
5428 out:
5429     return ret;
5430 }
5431 
5432 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5433                                 int fd, int cmd, abi_long arg)
5434 {
5435     const argtype *arg_type = ie->arg_type;
5436     const StructEntry *se;
5437     const argtype *field_types;
5438     const int *dst_offsets, *src_offsets;
5439     int target_size;
5440     void *argptr;
5441     abi_ulong *target_rt_dev_ptr = NULL;
5442     unsigned long *host_rt_dev_ptr = NULL;
5443     abi_long ret;
5444     int i;
5445 
5446     assert(ie->access == IOC_W);
5447     assert(*arg_type == TYPE_PTR);
5448     arg_type++;
5449     assert(*arg_type == TYPE_STRUCT);
5450     target_size = thunk_type_size(arg_type, 0);
5451     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5452     if (!argptr) {
5453         return -TARGET_EFAULT;
5454     }
5455     arg_type++;
5456     assert(*arg_type == (int)STRUCT_rtentry);
5457     se = struct_entries + *arg_type++;
5458     assert(se->convert[0] == NULL);
5459     /* convert struct here to be able to catch rt_dev string */
5460     field_types = se->field_types;
5461     dst_offsets = se->field_offsets[THUNK_HOST];
5462     src_offsets = se->field_offsets[THUNK_TARGET];
5463     for (i = 0; i < se->nb_fields; i++) {
5464         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5465             assert(*field_types == TYPE_PTRVOID);
5466             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5467             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5468             if (*target_rt_dev_ptr != 0) {
5469                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5470                                                   tswapal(*target_rt_dev_ptr));
5471                 if (!*host_rt_dev_ptr) {
5472                     unlock_user(argptr, arg, 0);
5473                     return -TARGET_EFAULT;
5474                 }
5475             } else {
5476                 *host_rt_dev_ptr = 0;
5477             }
5478             field_types++;
5479             continue;
5480         }
5481         field_types = thunk_convert(buf_temp + dst_offsets[i],
5482                                     argptr + src_offsets[i],
5483                                     field_types, THUNK_HOST);
5484     }
5485     unlock_user(argptr, arg, 0);
5486 
5487     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5488 
5489     assert(host_rt_dev_ptr != NULL);
5490     assert(target_rt_dev_ptr != NULL);
5491     if (*host_rt_dev_ptr != 0) {
5492         unlock_user((void *)*host_rt_dev_ptr,
5493                     *target_rt_dev_ptr, 0);
5494     }
5495     return ret;
5496 }
5497 
5498 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5499                                      int fd, int cmd, abi_long arg)
5500 {
5501     int sig = target_to_host_signal(arg);
5502     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5503 }
5504 
5505 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5506                                     int fd, int cmd, abi_long arg)
5507 {
5508     struct timeval tv;
5509     abi_long ret;
5510 
5511     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5512     if (is_error(ret)) {
5513         return ret;
5514     }
5515 
5516     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5517         if (copy_to_user_timeval(arg, &tv)) {
5518             return -TARGET_EFAULT;
5519         }
5520     } else {
5521         if (copy_to_user_timeval64(arg, &tv)) {
5522             return -TARGET_EFAULT;
5523         }
5524     }
5525 
5526     return ret;
5527 }
5528 
5529 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5530                                       int fd, int cmd, abi_long arg)
5531 {
5532     struct timespec ts;
5533     abi_long ret;
5534 
5535     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5536     if (is_error(ret)) {
5537         return ret;
5538     }
5539 
5540     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5541         if (host_to_target_timespec(arg, &ts)) {
5542             return -TARGET_EFAULT;
5543         }
5544     } else{
5545         if (host_to_target_timespec64(arg, &ts)) {
5546             return -TARGET_EFAULT;
5547         }
5548     }
5549 
5550     return ret;
5551 }
5552 
5553 #ifdef TIOCGPTPEER
5554 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5555                                      int fd, int cmd, abi_long arg)
5556 {
5557     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5558     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5559 }
5560 #endif
5561 
5562 #ifdef HAVE_DRM_H
5563 
5564 static void unlock_drm_version(struct drm_version *host_ver,
5565                                struct target_drm_version *target_ver,
5566                                bool copy)
5567 {
5568     unlock_user(host_ver->name, target_ver->name,
5569                                 copy ? host_ver->name_len : 0);
5570     unlock_user(host_ver->date, target_ver->date,
5571                                 copy ? host_ver->date_len : 0);
5572     unlock_user(host_ver->desc, target_ver->desc,
5573                                 copy ? host_ver->desc_len : 0);
5574 }
5575 
5576 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5577                                           struct target_drm_version *target_ver)
5578 {
5579     memset(host_ver, 0, sizeof(*host_ver));
5580 
5581     __get_user(host_ver->name_len, &target_ver->name_len);
5582     if (host_ver->name_len) {
5583         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5584                                    target_ver->name_len, 0);
5585         if (!host_ver->name) {
5586             return -EFAULT;
5587         }
5588     }
5589 
5590     __get_user(host_ver->date_len, &target_ver->date_len);
5591     if (host_ver->date_len) {
5592         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5593                                    target_ver->date_len, 0);
5594         if (!host_ver->date) {
5595             goto err;
5596         }
5597     }
5598 
5599     __get_user(host_ver->desc_len, &target_ver->desc_len);
5600     if (host_ver->desc_len) {
5601         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5602                                    target_ver->desc_len, 0);
5603         if (!host_ver->desc) {
5604             goto err;
5605         }
5606     }
5607 
5608     return 0;
5609 err:
5610     unlock_drm_version(host_ver, target_ver, false);
5611     return -EFAULT;
5612 }
5613 
5614 static inline void host_to_target_drmversion(
5615                                           struct target_drm_version *target_ver,
5616                                           struct drm_version *host_ver)
5617 {
5618     __put_user(host_ver->version_major, &target_ver->version_major);
5619     __put_user(host_ver->version_minor, &target_ver->version_minor);
5620     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5621     __put_user(host_ver->name_len, &target_ver->name_len);
5622     __put_user(host_ver->date_len, &target_ver->date_len);
5623     __put_user(host_ver->desc_len, &target_ver->desc_len);
5624     unlock_drm_version(host_ver, target_ver, true);
5625 }
5626 
5627 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5628                              int fd, int cmd, abi_long arg)
5629 {
5630     struct drm_version *ver;
5631     struct target_drm_version *target_ver;
5632     abi_long ret;
5633 
5634     switch (ie->host_cmd) {
5635     case DRM_IOCTL_VERSION:
5636         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5637             return -TARGET_EFAULT;
5638         }
5639         ver = (struct drm_version *)buf_temp;
5640         ret = target_to_host_drmversion(ver, target_ver);
5641         if (!is_error(ret)) {
5642             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5643             if (is_error(ret)) {
5644                 unlock_drm_version(ver, target_ver, false);
5645             } else {
5646                 host_to_target_drmversion(target_ver, ver);
5647             }
5648         }
5649         unlock_user_struct(target_ver, arg, 0);
5650         return ret;
5651     }
5652     return -TARGET_ENOSYS;
5653 }
5654 
5655 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5656                                            struct drm_i915_getparam *gparam,
5657                                            int fd, abi_long arg)
5658 {
5659     abi_long ret;
5660     int value;
5661     struct target_drm_i915_getparam *target_gparam;
5662 
5663     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5664         return -TARGET_EFAULT;
5665     }
5666 
5667     __get_user(gparam->param, &target_gparam->param);
5668     gparam->value = &value;
5669     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5670     put_user_s32(value, target_gparam->value);
5671 
5672     unlock_user_struct(target_gparam, arg, 0);
5673     return ret;
5674 }
5675 
5676 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5677                                   int fd, int cmd, abi_long arg)
5678 {
5679     switch (ie->host_cmd) {
5680     case DRM_IOCTL_I915_GETPARAM:
5681         return do_ioctl_drm_i915_getparam(ie,
5682                                           (struct drm_i915_getparam *)buf_temp,
5683                                           fd, arg);
5684     default:
5685         return -TARGET_ENOSYS;
5686     }
5687 }
5688 
5689 #endif
5690 
5691 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5692                                         int fd, int cmd, abi_long arg)
5693 {
5694     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5695     struct tun_filter *target_filter;
5696     char *target_addr;
5697 
5698     assert(ie->access == IOC_W);
5699 
5700     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5701     if (!target_filter) {
5702         return -TARGET_EFAULT;
5703     }
5704     filter->flags = tswap16(target_filter->flags);
5705     filter->count = tswap16(target_filter->count);
5706     unlock_user(target_filter, arg, 0);
5707 
5708     if (filter->count) {
5709         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5710             MAX_STRUCT_SIZE) {
5711             return -TARGET_EFAULT;
5712         }
5713 
5714         target_addr = lock_user(VERIFY_READ,
5715                                 arg + offsetof(struct tun_filter, addr),
5716                                 filter->count * ETH_ALEN, 1);
5717         if (!target_addr) {
5718             return -TARGET_EFAULT;
5719         }
5720         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5721         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5722     }
5723 
5724     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5725 }
5726 
5727 IOCTLEntry ioctl_entries[] = {
5728 #define IOCTL(cmd, access, ...) \
5729     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5730 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5731     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5732 #define IOCTL_IGNORE(cmd) \
5733     { TARGET_ ## cmd, 0, #cmd },
5734 #include "ioctls.h"
5735     { 0, 0, },
5736 };
5737 
5738 /* ??? Implement proper locking for ioctls.  */
5739 /* do_ioctl() Must return target values and target errnos. */
5740 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5741 {
5742     const IOCTLEntry *ie;
5743     const argtype *arg_type;
5744     abi_long ret;
5745     uint8_t buf_temp[MAX_STRUCT_SIZE];
5746     int target_size;
5747     void *argptr;
5748 
5749     ie = ioctl_entries;
5750     for(;;) {
5751         if (ie->target_cmd == 0) {
5752             qemu_log_mask(
5753                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5754             return -TARGET_ENOSYS;
5755         }
5756         if (ie->target_cmd == cmd)
5757             break;
5758         ie++;
5759     }
5760     arg_type = ie->arg_type;
5761     if (ie->do_ioctl) {
5762         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5763     } else if (!ie->host_cmd) {
5764         /* Some architectures define BSD ioctls in their headers
5765            that are not implemented in Linux.  */
5766         return -TARGET_ENOSYS;
5767     }
5768 
5769     switch(arg_type[0]) {
5770     case TYPE_NULL:
5771         /* no argument */
5772         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5773         break;
5774     case TYPE_PTRVOID:
5775     case TYPE_INT:
5776     case TYPE_LONG:
5777     case TYPE_ULONG:
5778         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5779         break;
5780     case TYPE_PTR:
5781         arg_type++;
5782         target_size = thunk_type_size(arg_type, 0);
5783         switch(ie->access) {
5784         case IOC_R:
5785             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5786             if (!is_error(ret)) {
5787                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5788                 if (!argptr)
5789                     return -TARGET_EFAULT;
5790                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5791                 unlock_user(argptr, arg, target_size);
5792             }
5793             break;
5794         case IOC_W:
5795             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5796             if (!argptr)
5797                 return -TARGET_EFAULT;
5798             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5799             unlock_user(argptr, arg, 0);
5800             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5801             break;
5802         default:
5803         case IOC_RW:
5804             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5805             if (!argptr)
5806                 return -TARGET_EFAULT;
5807             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5808             unlock_user(argptr, arg, 0);
5809             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5810             if (!is_error(ret)) {
5811                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5812                 if (!argptr)
5813                     return -TARGET_EFAULT;
5814                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5815                 unlock_user(argptr, arg, target_size);
5816             }
5817             break;
5818         }
5819         break;
5820     default:
5821         qemu_log_mask(LOG_UNIMP,
5822                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5823                       (long)cmd, arg_type[0]);
5824         ret = -TARGET_ENOSYS;
5825         break;
5826     }
5827     return ret;
5828 }
5829 
5830 static const bitmask_transtbl iflag_tbl[] = {
5831         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5832         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5833         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5834         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5835         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5836         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5837         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5838         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5839         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5840         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5841         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5842         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5843         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5844         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5845         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5846         { 0, 0, 0, 0 }
5847 };
5848 
5849 static const bitmask_transtbl oflag_tbl[] = {
5850 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5851 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5852 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5853 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5854 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5855 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5856 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5857 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5858 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5859 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5860 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5861 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5862 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5863 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5864 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5865 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5866 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5867 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5868 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5869 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5870 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5871 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5872 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5873 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5874 	{ 0, 0, 0, 0 }
5875 };
5876 
5877 static const bitmask_transtbl cflag_tbl[] = {
5878 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5879 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5880 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5881 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5882 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5883 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5884 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5885 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5886 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5887 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5888 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5889 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5890 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5891 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5892 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5893 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5894 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5895 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5896 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5897 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5898 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5899 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5900 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5901 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5902 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5903 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5904 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5905 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5906 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5907 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5908 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5909 	{ 0, 0, 0, 0 }
5910 };
5911 
5912 static const bitmask_transtbl lflag_tbl[] = {
5913   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5914   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5915   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5916   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5917   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5918   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5919   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5920   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5921   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5922   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5923   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5924   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5925   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5926   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5927   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5928   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5929   { 0, 0, 0, 0 }
5930 };
5931 
5932 static void target_to_host_termios (void *dst, const void *src)
5933 {
5934     struct host_termios *host = dst;
5935     const struct target_termios *target = src;
5936 
5937     host->c_iflag =
5938         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5939     host->c_oflag =
5940         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5941     host->c_cflag =
5942         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5943     host->c_lflag =
5944         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5945     host->c_line = target->c_line;
5946 
5947     memset(host->c_cc, 0, sizeof(host->c_cc));
5948     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5949     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5950     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5951     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5952     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5953     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5954     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5955     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5956     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5957     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5958     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5959     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5960     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5961     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5962     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5963     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5964     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5965 }
5966 
5967 static void host_to_target_termios (void *dst, const void *src)
5968 {
5969     struct target_termios *target = dst;
5970     const struct host_termios *host = src;
5971 
5972     target->c_iflag =
5973         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5974     target->c_oflag =
5975         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5976     target->c_cflag =
5977         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5978     target->c_lflag =
5979         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5980     target->c_line = host->c_line;
5981 
5982     memset(target->c_cc, 0, sizeof(target->c_cc));
5983     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5984     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5985     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5986     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5987     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5988     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5989     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5990     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5991     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5992     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5993     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5994     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5995     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5996     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5997     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5998     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5999     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6000 }
6001 
6002 static const StructEntry struct_termios_def = {
6003     .convert = { host_to_target_termios, target_to_host_termios },
6004     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6005     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6006     .print = print_termios,
6007 };
6008 
6009 static const bitmask_transtbl mmap_flags_tbl[] = {
6010     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6011     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6012     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6013     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6014       MAP_ANONYMOUS, MAP_ANONYMOUS },
6015     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6016       MAP_GROWSDOWN, MAP_GROWSDOWN },
6017     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6018       MAP_DENYWRITE, MAP_DENYWRITE },
6019     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6020       MAP_EXECUTABLE, MAP_EXECUTABLE },
6021     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6022     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6023       MAP_NORESERVE, MAP_NORESERVE },
6024     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6025     /* MAP_STACK had been ignored by the kernel for quite some time.
6026        Recognize it for the target insofar as we do not want to pass
6027        it through to the host.  */
6028     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6029     { 0, 0, 0, 0 }
6030 };
6031 
6032 /*
6033  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6034  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6035  */
6036 #if defined(TARGET_I386)
6037 
6038 /* NOTE: there is really one LDT for all the threads */
6039 static uint8_t *ldt_table;
6040 
6041 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6042 {
6043     int size;
6044     void *p;
6045 
6046     if (!ldt_table)
6047         return 0;
6048     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6049     if (size > bytecount)
6050         size = bytecount;
6051     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6052     if (!p)
6053         return -TARGET_EFAULT;
6054     /* ??? Should this by byteswapped?  */
6055     memcpy(p, ldt_table, size);
6056     unlock_user(p, ptr, size);
6057     return size;
6058 }
6059 
6060 /* XXX: add locking support */
6061 static abi_long write_ldt(CPUX86State *env,
6062                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6063 {
6064     struct target_modify_ldt_ldt_s ldt_info;
6065     struct target_modify_ldt_ldt_s *target_ldt_info;
6066     int seg_32bit, contents, read_exec_only, limit_in_pages;
6067     int seg_not_present, useable, lm;
6068     uint32_t *lp, entry_1, entry_2;
6069 
6070     if (bytecount != sizeof(ldt_info))
6071         return -TARGET_EINVAL;
6072     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6073         return -TARGET_EFAULT;
6074     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6075     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6076     ldt_info.limit = tswap32(target_ldt_info->limit);
6077     ldt_info.flags = tswap32(target_ldt_info->flags);
6078     unlock_user_struct(target_ldt_info, ptr, 0);
6079 
6080     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6081         return -TARGET_EINVAL;
6082     seg_32bit = ldt_info.flags & 1;
6083     contents = (ldt_info.flags >> 1) & 3;
6084     read_exec_only = (ldt_info.flags >> 3) & 1;
6085     limit_in_pages = (ldt_info.flags >> 4) & 1;
6086     seg_not_present = (ldt_info.flags >> 5) & 1;
6087     useable = (ldt_info.flags >> 6) & 1;
6088 #ifdef TARGET_ABI32
6089     lm = 0;
6090 #else
6091     lm = (ldt_info.flags >> 7) & 1;
6092 #endif
6093     if (contents == 3) {
6094         if (oldmode)
6095             return -TARGET_EINVAL;
6096         if (seg_not_present == 0)
6097             return -TARGET_EINVAL;
6098     }
6099     /* allocate the LDT */
6100     if (!ldt_table) {
6101         env->ldt.base = target_mmap(0,
6102                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6103                                     PROT_READ|PROT_WRITE,
6104                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6105         if (env->ldt.base == -1)
6106             return -TARGET_ENOMEM;
6107         memset(g2h_untagged(env->ldt.base), 0,
6108                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6109         env->ldt.limit = 0xffff;
6110         ldt_table = g2h_untagged(env->ldt.base);
6111     }
6112 
6113     /* NOTE: same code as Linux kernel */
6114     /* Allow LDTs to be cleared by the user. */
6115     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6116         if (oldmode ||
6117             (contents == 0		&&
6118              read_exec_only == 1	&&
6119              seg_32bit == 0		&&
6120              limit_in_pages == 0	&&
6121              seg_not_present == 1	&&
6122              useable == 0 )) {
6123             entry_1 = 0;
6124             entry_2 = 0;
6125             goto install;
6126         }
6127     }
6128 
6129     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6130         (ldt_info.limit & 0x0ffff);
6131     entry_2 = (ldt_info.base_addr & 0xff000000) |
6132         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6133         (ldt_info.limit & 0xf0000) |
6134         ((read_exec_only ^ 1) << 9) |
6135         (contents << 10) |
6136         ((seg_not_present ^ 1) << 15) |
6137         (seg_32bit << 22) |
6138         (limit_in_pages << 23) |
6139         (lm << 21) |
6140         0x7000;
6141     if (!oldmode)
6142         entry_2 |= (useable << 20);
6143 
6144     /* Install the new entry ...  */
6145 install:
6146     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6147     lp[0] = tswap32(entry_1);
6148     lp[1] = tswap32(entry_2);
6149     return 0;
6150 }
6151 
6152 /* specific and weird i386 syscalls */
6153 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6154                               unsigned long bytecount)
6155 {
6156     abi_long ret;
6157 
6158     switch (func) {
6159     case 0:
6160         ret = read_ldt(ptr, bytecount);
6161         break;
6162     case 1:
6163         ret = write_ldt(env, ptr, bytecount, 1);
6164         break;
6165     case 0x11:
6166         ret = write_ldt(env, ptr, bytecount, 0);
6167         break;
6168     default:
6169         ret = -TARGET_ENOSYS;
6170         break;
6171     }
6172     return ret;
6173 }
6174 
6175 #if defined(TARGET_ABI32)
6176 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6177 {
6178     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6179     struct target_modify_ldt_ldt_s ldt_info;
6180     struct target_modify_ldt_ldt_s *target_ldt_info;
6181     int seg_32bit, contents, read_exec_only, limit_in_pages;
6182     int seg_not_present, useable, lm;
6183     uint32_t *lp, entry_1, entry_2;
6184     int i;
6185 
6186     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6187     if (!target_ldt_info)
6188         return -TARGET_EFAULT;
6189     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6190     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6191     ldt_info.limit = tswap32(target_ldt_info->limit);
6192     ldt_info.flags = tswap32(target_ldt_info->flags);
6193     if (ldt_info.entry_number == -1) {
6194         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6195             if (gdt_table[i] == 0) {
6196                 ldt_info.entry_number = i;
6197                 target_ldt_info->entry_number = tswap32(i);
6198                 break;
6199             }
6200         }
6201     }
6202     unlock_user_struct(target_ldt_info, ptr, 1);
6203 
6204     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6205         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6206            return -TARGET_EINVAL;
6207     seg_32bit = ldt_info.flags & 1;
6208     contents = (ldt_info.flags >> 1) & 3;
6209     read_exec_only = (ldt_info.flags >> 3) & 1;
6210     limit_in_pages = (ldt_info.flags >> 4) & 1;
6211     seg_not_present = (ldt_info.flags >> 5) & 1;
6212     useable = (ldt_info.flags >> 6) & 1;
6213 #ifdef TARGET_ABI32
6214     lm = 0;
6215 #else
6216     lm = (ldt_info.flags >> 7) & 1;
6217 #endif
6218 
6219     if (contents == 3) {
6220         if (seg_not_present == 0)
6221             return -TARGET_EINVAL;
6222     }
6223 
6224     /* NOTE: same code as Linux kernel */
6225     /* Allow LDTs to be cleared by the user. */
6226     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6227         if ((contents == 0             &&
6228              read_exec_only == 1       &&
6229              seg_32bit == 0            &&
6230              limit_in_pages == 0       &&
6231              seg_not_present == 1      &&
6232              useable == 0 )) {
6233             entry_1 = 0;
6234             entry_2 = 0;
6235             goto install;
6236         }
6237     }
6238 
6239     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6240         (ldt_info.limit & 0x0ffff);
6241     entry_2 = (ldt_info.base_addr & 0xff000000) |
6242         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6243         (ldt_info.limit & 0xf0000) |
6244         ((read_exec_only ^ 1) << 9) |
6245         (contents << 10) |
6246         ((seg_not_present ^ 1) << 15) |
6247         (seg_32bit << 22) |
6248         (limit_in_pages << 23) |
6249         (useable << 20) |
6250         (lm << 21) |
6251         0x7000;
6252 
6253     /* Install the new entry ...  */
6254 install:
6255     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6256     lp[0] = tswap32(entry_1);
6257     lp[1] = tswap32(entry_2);
6258     return 0;
6259 }
6260 
6261 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6262 {
6263     struct target_modify_ldt_ldt_s *target_ldt_info;
6264     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6265     uint32_t base_addr, limit, flags;
6266     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6267     int seg_not_present, useable, lm;
6268     uint32_t *lp, entry_1, entry_2;
6269 
6270     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6271     if (!target_ldt_info)
6272         return -TARGET_EFAULT;
6273     idx = tswap32(target_ldt_info->entry_number);
6274     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6275         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6276         unlock_user_struct(target_ldt_info, ptr, 1);
6277         return -TARGET_EINVAL;
6278     }
6279     lp = (uint32_t *)(gdt_table + idx);
6280     entry_1 = tswap32(lp[0]);
6281     entry_2 = tswap32(lp[1]);
6282 
6283     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6284     contents = (entry_2 >> 10) & 3;
6285     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6286     seg_32bit = (entry_2 >> 22) & 1;
6287     limit_in_pages = (entry_2 >> 23) & 1;
6288     useable = (entry_2 >> 20) & 1;
6289 #ifdef TARGET_ABI32
6290     lm = 0;
6291 #else
6292     lm = (entry_2 >> 21) & 1;
6293 #endif
6294     flags = (seg_32bit << 0) | (contents << 1) |
6295         (read_exec_only << 3) | (limit_in_pages << 4) |
6296         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6297     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6298     base_addr = (entry_1 >> 16) |
6299         (entry_2 & 0xff000000) |
6300         ((entry_2 & 0xff) << 16);
6301     target_ldt_info->base_addr = tswapal(base_addr);
6302     target_ldt_info->limit = tswap32(limit);
6303     target_ldt_info->flags = tswap32(flags);
6304     unlock_user_struct(target_ldt_info, ptr, 1);
6305     return 0;
6306 }
6307 
6308 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6309 {
6310     return -TARGET_ENOSYS;
6311 }
6312 #else
6313 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6314 {
6315     abi_long ret = 0;
6316     abi_ulong val;
6317     int idx;
6318 
6319     switch(code) {
6320     case TARGET_ARCH_SET_GS:
6321     case TARGET_ARCH_SET_FS:
6322         if (code == TARGET_ARCH_SET_GS)
6323             idx = R_GS;
6324         else
6325             idx = R_FS;
6326         cpu_x86_load_seg(env, idx, 0);
6327         env->segs[idx].base = addr;
6328         break;
6329     case TARGET_ARCH_GET_GS:
6330     case TARGET_ARCH_GET_FS:
6331         if (code == TARGET_ARCH_GET_GS)
6332             idx = R_GS;
6333         else
6334             idx = R_FS;
6335         val = env->segs[idx].base;
6336         if (put_user(val, addr, abi_ulong))
6337             ret = -TARGET_EFAULT;
6338         break;
6339     default:
6340         ret = -TARGET_EINVAL;
6341         break;
6342     }
6343     return ret;
6344 }
6345 #endif /* defined(TARGET_ABI32 */
6346 #endif /* defined(TARGET_I386) */
6347 
6348 /*
6349  * These constants are generic.  Supply any that are missing from the host.
6350  */
6351 #ifndef PR_SET_NAME
6352 # define PR_SET_NAME    15
6353 # define PR_GET_NAME    16
6354 #endif
6355 #ifndef PR_SET_FP_MODE
6356 # define PR_SET_FP_MODE 45
6357 # define PR_GET_FP_MODE 46
6358 # define PR_FP_MODE_FR   (1 << 0)
6359 # define PR_FP_MODE_FRE  (1 << 1)
6360 #endif
6361 #ifndef PR_SVE_SET_VL
6362 # define PR_SVE_SET_VL  50
6363 # define PR_SVE_GET_VL  51
6364 # define PR_SVE_VL_LEN_MASK  0xffff
6365 # define PR_SVE_VL_INHERIT   (1 << 17)
6366 #endif
6367 #ifndef PR_PAC_RESET_KEYS
6368 # define PR_PAC_RESET_KEYS  54
6369 # define PR_PAC_APIAKEY   (1 << 0)
6370 # define PR_PAC_APIBKEY   (1 << 1)
6371 # define PR_PAC_APDAKEY   (1 << 2)
6372 # define PR_PAC_APDBKEY   (1 << 3)
6373 # define PR_PAC_APGAKEY   (1 << 4)
6374 #endif
6375 #ifndef PR_SET_TAGGED_ADDR_CTRL
6376 # define PR_SET_TAGGED_ADDR_CTRL 55
6377 # define PR_GET_TAGGED_ADDR_CTRL 56
6378 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6379 #endif
6380 #ifndef PR_MTE_TCF_SHIFT
6381 # define PR_MTE_TCF_SHIFT       1
6382 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6383 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6384 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6385 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6386 # define PR_MTE_TAG_SHIFT       3
6387 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6388 #endif
6389 #ifndef PR_SET_IO_FLUSHER
6390 # define PR_SET_IO_FLUSHER 57
6391 # define PR_GET_IO_FLUSHER 58
6392 #endif
6393 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6394 # define PR_SET_SYSCALL_USER_DISPATCH 59
6395 #endif
6396 #ifndef PR_SME_SET_VL
6397 # define PR_SME_SET_VL  63
6398 # define PR_SME_GET_VL  64
6399 # define PR_SME_VL_LEN_MASK  0xffff
6400 # define PR_SME_VL_INHERIT   (1 << 17)
6401 #endif
6402 
6403 #include "target_prctl.h"
6404 
6405 static abi_long do_prctl_inval0(CPUArchState *env)
6406 {
6407     return -TARGET_EINVAL;
6408 }
6409 
6410 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6411 {
6412     return -TARGET_EINVAL;
6413 }
6414 
6415 #ifndef do_prctl_get_fp_mode
6416 #define do_prctl_get_fp_mode do_prctl_inval0
6417 #endif
6418 #ifndef do_prctl_set_fp_mode
6419 #define do_prctl_set_fp_mode do_prctl_inval1
6420 #endif
6421 #ifndef do_prctl_sve_get_vl
6422 #define do_prctl_sve_get_vl do_prctl_inval0
6423 #endif
6424 #ifndef do_prctl_sve_set_vl
6425 #define do_prctl_sve_set_vl do_prctl_inval1
6426 #endif
6427 #ifndef do_prctl_reset_keys
6428 #define do_prctl_reset_keys do_prctl_inval1
6429 #endif
6430 #ifndef do_prctl_set_tagged_addr_ctrl
6431 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6432 #endif
6433 #ifndef do_prctl_get_tagged_addr_ctrl
6434 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6435 #endif
6436 #ifndef do_prctl_get_unalign
6437 #define do_prctl_get_unalign do_prctl_inval1
6438 #endif
6439 #ifndef do_prctl_set_unalign
6440 #define do_prctl_set_unalign do_prctl_inval1
6441 #endif
6442 #ifndef do_prctl_sme_get_vl
6443 #define do_prctl_sme_get_vl do_prctl_inval0
6444 #endif
6445 #ifndef do_prctl_sme_set_vl
6446 #define do_prctl_sme_set_vl do_prctl_inval1
6447 #endif
6448 
6449 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6450                          abi_long arg3, abi_long arg4, abi_long arg5)
6451 {
6452     abi_long ret;
6453 
6454     switch (option) {
6455     case PR_GET_PDEATHSIG:
6456         {
6457             int deathsig;
6458             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6459                                   arg3, arg4, arg5));
6460             if (!is_error(ret) &&
6461                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6462                 return -TARGET_EFAULT;
6463             }
6464             return ret;
6465         }
6466     case PR_SET_PDEATHSIG:
6467         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6468                                arg3, arg4, arg5));
6469     case PR_GET_NAME:
6470         {
6471             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6472             if (!name) {
6473                 return -TARGET_EFAULT;
6474             }
6475             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6476                                   arg3, arg4, arg5));
6477             unlock_user(name, arg2, 16);
6478             return ret;
6479         }
6480     case PR_SET_NAME:
6481         {
6482             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6483             if (!name) {
6484                 return -TARGET_EFAULT;
6485             }
6486             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6487                                   arg3, arg4, arg5));
6488             unlock_user(name, arg2, 0);
6489             return ret;
6490         }
6491     case PR_GET_FP_MODE:
6492         return do_prctl_get_fp_mode(env);
6493     case PR_SET_FP_MODE:
6494         return do_prctl_set_fp_mode(env, arg2);
6495     case PR_SVE_GET_VL:
6496         return do_prctl_sve_get_vl(env);
6497     case PR_SVE_SET_VL:
6498         return do_prctl_sve_set_vl(env, arg2);
6499     case PR_SME_GET_VL:
6500         return do_prctl_sme_get_vl(env);
6501     case PR_SME_SET_VL:
6502         return do_prctl_sme_set_vl(env, arg2);
6503     case PR_PAC_RESET_KEYS:
6504         if (arg3 || arg4 || arg5) {
6505             return -TARGET_EINVAL;
6506         }
6507         return do_prctl_reset_keys(env, arg2);
6508     case PR_SET_TAGGED_ADDR_CTRL:
6509         if (arg3 || arg4 || arg5) {
6510             return -TARGET_EINVAL;
6511         }
6512         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6513     case PR_GET_TAGGED_ADDR_CTRL:
6514         if (arg2 || arg3 || arg4 || arg5) {
6515             return -TARGET_EINVAL;
6516         }
6517         return do_prctl_get_tagged_addr_ctrl(env);
6518 
6519     case PR_GET_UNALIGN:
6520         return do_prctl_get_unalign(env, arg2);
6521     case PR_SET_UNALIGN:
6522         return do_prctl_set_unalign(env, arg2);
6523 
6524     case PR_CAP_AMBIENT:
6525     case PR_CAPBSET_READ:
6526     case PR_CAPBSET_DROP:
6527     case PR_GET_DUMPABLE:
6528     case PR_SET_DUMPABLE:
6529     case PR_GET_KEEPCAPS:
6530     case PR_SET_KEEPCAPS:
6531     case PR_GET_SECUREBITS:
6532     case PR_SET_SECUREBITS:
6533     case PR_GET_TIMING:
6534     case PR_SET_TIMING:
6535     case PR_GET_TIMERSLACK:
6536     case PR_SET_TIMERSLACK:
6537     case PR_MCE_KILL:
6538     case PR_MCE_KILL_GET:
6539     case PR_GET_NO_NEW_PRIVS:
6540     case PR_SET_NO_NEW_PRIVS:
6541     case PR_GET_IO_FLUSHER:
6542     case PR_SET_IO_FLUSHER:
6543         /* Some prctl options have no pointer arguments and we can pass on. */
6544         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6545 
6546     case PR_GET_CHILD_SUBREAPER:
6547     case PR_SET_CHILD_SUBREAPER:
6548     case PR_GET_SPECULATION_CTRL:
6549     case PR_SET_SPECULATION_CTRL:
6550     case PR_GET_TID_ADDRESS:
6551         /* TODO */
6552         return -TARGET_EINVAL;
6553 
6554     case PR_GET_FPEXC:
6555     case PR_SET_FPEXC:
6556         /* Was used for SPE on PowerPC. */
6557         return -TARGET_EINVAL;
6558 
6559     case PR_GET_ENDIAN:
6560     case PR_SET_ENDIAN:
6561     case PR_GET_FPEMU:
6562     case PR_SET_FPEMU:
6563     case PR_SET_MM:
6564     case PR_GET_SECCOMP:
6565     case PR_SET_SECCOMP:
6566     case PR_SET_SYSCALL_USER_DISPATCH:
6567     case PR_GET_THP_DISABLE:
6568     case PR_SET_THP_DISABLE:
6569     case PR_GET_TSC:
6570     case PR_SET_TSC:
6571         /* Disable to prevent the target disabling stuff we need. */
6572         return -TARGET_EINVAL;
6573 
6574     default:
6575         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6576                       option);
6577         return -TARGET_EINVAL;
6578     }
6579 }
6580 
6581 #define NEW_STACK_SIZE 0x40000
6582 
6583 
6584 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6585 typedef struct {
6586     CPUArchState *env;
6587     pthread_mutex_t mutex;
6588     pthread_cond_t cond;
6589     pthread_t thread;
6590     uint32_t tid;
6591     abi_ulong child_tidptr;
6592     abi_ulong parent_tidptr;
6593     sigset_t sigmask;
6594 } new_thread_info;
6595 
6596 static void *clone_func(void *arg)
6597 {
6598     new_thread_info *info = arg;
6599     CPUArchState *env;
6600     CPUState *cpu;
6601     TaskState *ts;
6602 
6603     rcu_register_thread();
6604     tcg_register_thread();
6605     env = info->env;
6606     cpu = env_cpu(env);
6607     thread_cpu = cpu;
6608     ts = (TaskState *)cpu->opaque;
6609     info->tid = sys_gettid();
6610     task_settid(ts);
6611     if (info->child_tidptr)
6612         put_user_u32(info->tid, info->child_tidptr);
6613     if (info->parent_tidptr)
6614         put_user_u32(info->tid, info->parent_tidptr);
6615     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6616     /* Enable signals.  */
6617     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6618     /* Signal to the parent that we're ready.  */
6619     pthread_mutex_lock(&info->mutex);
6620     pthread_cond_broadcast(&info->cond);
6621     pthread_mutex_unlock(&info->mutex);
6622     /* Wait until the parent has finished initializing the tls state.  */
6623     pthread_mutex_lock(&clone_lock);
6624     pthread_mutex_unlock(&clone_lock);
6625     cpu_loop(env);
6626     /* never exits */
6627     return NULL;
6628 }
6629 
6630 /* do_fork() Must return host values and target errnos (unlike most
6631    do_*() functions). */
6632 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6633                    abi_ulong parent_tidptr, target_ulong newtls,
6634                    abi_ulong child_tidptr)
6635 {
6636     CPUState *cpu = env_cpu(env);
6637     int ret;
6638     TaskState *ts;
6639     CPUState *new_cpu;
6640     CPUArchState *new_env;
6641     sigset_t sigmask;
6642 
6643     flags &= ~CLONE_IGNORED_FLAGS;
6644 
6645     /* Emulate vfork() with fork() */
6646     if (flags & CLONE_VFORK)
6647         flags &= ~(CLONE_VFORK | CLONE_VM);
6648 
6649     if (flags & CLONE_VM) {
6650         TaskState *parent_ts = (TaskState *)cpu->opaque;
6651         new_thread_info info;
6652         pthread_attr_t attr;
6653 
6654         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6655             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6656             return -TARGET_EINVAL;
6657         }
6658 
6659         ts = g_new0(TaskState, 1);
6660         init_task_state(ts);
6661 
6662         /* Grab a mutex so that thread setup appears atomic.  */
6663         pthread_mutex_lock(&clone_lock);
6664 
6665         /*
6666          * If this is our first additional thread, we need to ensure we
6667          * generate code for parallel execution and flush old translations.
6668          * Do this now so that the copy gets CF_PARALLEL too.
6669          */
6670         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6671             cpu->tcg_cflags |= CF_PARALLEL;
6672             tb_flush(cpu);
6673         }
6674 
6675         /* we create a new CPU instance. */
6676         new_env = cpu_copy(env);
6677         /* Init regs that differ from the parent.  */
6678         cpu_clone_regs_child(new_env, newsp, flags);
6679         cpu_clone_regs_parent(env, flags);
6680         new_cpu = env_cpu(new_env);
6681         new_cpu->opaque = ts;
6682         ts->bprm = parent_ts->bprm;
6683         ts->info = parent_ts->info;
6684         ts->signal_mask = parent_ts->signal_mask;
6685 
6686         if (flags & CLONE_CHILD_CLEARTID) {
6687             ts->child_tidptr = child_tidptr;
6688         }
6689 
6690         if (flags & CLONE_SETTLS) {
6691             cpu_set_tls (new_env, newtls);
6692         }
6693 
6694         memset(&info, 0, sizeof(info));
6695         pthread_mutex_init(&info.mutex, NULL);
6696         pthread_mutex_lock(&info.mutex);
6697         pthread_cond_init(&info.cond, NULL);
6698         info.env = new_env;
6699         if (flags & CLONE_CHILD_SETTID) {
6700             info.child_tidptr = child_tidptr;
6701         }
6702         if (flags & CLONE_PARENT_SETTID) {
6703             info.parent_tidptr = parent_tidptr;
6704         }
6705 
6706         ret = pthread_attr_init(&attr);
6707         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6708         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6709         /* It is not safe to deliver signals until the child has finished
6710            initializing, so temporarily block all signals.  */
6711         sigfillset(&sigmask);
6712         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6713         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6714 
6715         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6716         /* TODO: Free new CPU state if thread creation failed.  */
6717 
6718         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6719         pthread_attr_destroy(&attr);
6720         if (ret == 0) {
6721             /* Wait for the child to initialize.  */
6722             pthread_cond_wait(&info.cond, &info.mutex);
6723             ret = info.tid;
6724         } else {
6725             ret = -1;
6726         }
6727         pthread_mutex_unlock(&info.mutex);
6728         pthread_cond_destroy(&info.cond);
6729         pthread_mutex_destroy(&info.mutex);
6730         pthread_mutex_unlock(&clone_lock);
6731     } else {
6732         /* if no CLONE_VM, we consider it is a fork */
6733         if (flags & CLONE_INVALID_FORK_FLAGS) {
6734             return -TARGET_EINVAL;
6735         }
6736 
6737         /* We can't support custom termination signals */
6738         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6739             return -TARGET_EINVAL;
6740         }
6741 
6742         if (block_signals()) {
6743             return -QEMU_ERESTARTSYS;
6744         }
6745 
6746         fork_start();
6747         ret = fork();
6748         if (ret == 0) {
6749             /* Child Process.  */
6750             cpu_clone_regs_child(env, newsp, flags);
6751             fork_end(1);
6752             /* There is a race condition here.  The parent process could
6753                theoretically read the TID in the child process before the child
6754                tid is set.  This would require using either ptrace
6755                (not implemented) or having *_tidptr to point at a shared memory
6756                mapping.  We can't repeat the spinlock hack used above because
6757                the child process gets its own copy of the lock.  */
6758             if (flags & CLONE_CHILD_SETTID)
6759                 put_user_u32(sys_gettid(), child_tidptr);
6760             if (flags & CLONE_PARENT_SETTID)
6761                 put_user_u32(sys_gettid(), parent_tidptr);
6762             ts = (TaskState *)cpu->opaque;
6763             if (flags & CLONE_SETTLS)
6764                 cpu_set_tls (env, newtls);
6765             if (flags & CLONE_CHILD_CLEARTID)
6766                 ts->child_tidptr = child_tidptr;
6767         } else {
6768             cpu_clone_regs_parent(env, flags);
6769             fork_end(0);
6770         }
6771     }
6772     return ret;
6773 }
6774 
6775 /* warning : doesn't handle linux specific flags... */
6776 static int target_to_host_fcntl_cmd(int cmd)
6777 {
6778     int ret;
6779 
6780     switch(cmd) {
6781     case TARGET_F_DUPFD:
6782     case TARGET_F_GETFD:
6783     case TARGET_F_SETFD:
6784     case TARGET_F_GETFL:
6785     case TARGET_F_SETFL:
6786     case TARGET_F_OFD_GETLK:
6787     case TARGET_F_OFD_SETLK:
6788     case TARGET_F_OFD_SETLKW:
6789         ret = cmd;
6790         break;
6791     case TARGET_F_GETLK:
6792         ret = F_GETLK64;
6793         break;
6794     case TARGET_F_SETLK:
6795         ret = F_SETLK64;
6796         break;
6797     case TARGET_F_SETLKW:
6798         ret = F_SETLKW64;
6799         break;
6800     case TARGET_F_GETOWN:
6801         ret = F_GETOWN;
6802         break;
6803     case TARGET_F_SETOWN:
6804         ret = F_SETOWN;
6805         break;
6806     case TARGET_F_GETSIG:
6807         ret = F_GETSIG;
6808         break;
6809     case TARGET_F_SETSIG:
6810         ret = F_SETSIG;
6811         break;
6812 #if TARGET_ABI_BITS == 32
6813     case TARGET_F_GETLK64:
6814         ret = F_GETLK64;
6815         break;
6816     case TARGET_F_SETLK64:
6817         ret = F_SETLK64;
6818         break;
6819     case TARGET_F_SETLKW64:
6820         ret = F_SETLKW64;
6821         break;
6822 #endif
6823     case TARGET_F_SETLEASE:
6824         ret = F_SETLEASE;
6825         break;
6826     case TARGET_F_GETLEASE:
6827         ret = F_GETLEASE;
6828         break;
6829 #ifdef F_DUPFD_CLOEXEC
6830     case TARGET_F_DUPFD_CLOEXEC:
6831         ret = F_DUPFD_CLOEXEC;
6832         break;
6833 #endif
6834     case TARGET_F_NOTIFY:
6835         ret = F_NOTIFY;
6836         break;
6837 #ifdef F_GETOWN_EX
6838     case TARGET_F_GETOWN_EX:
6839         ret = F_GETOWN_EX;
6840         break;
6841 #endif
6842 #ifdef F_SETOWN_EX
6843     case TARGET_F_SETOWN_EX:
6844         ret = F_SETOWN_EX;
6845         break;
6846 #endif
6847 #ifdef F_SETPIPE_SZ
6848     case TARGET_F_SETPIPE_SZ:
6849         ret = F_SETPIPE_SZ;
6850         break;
6851     case TARGET_F_GETPIPE_SZ:
6852         ret = F_GETPIPE_SZ;
6853         break;
6854 #endif
6855 #ifdef F_ADD_SEALS
6856     case TARGET_F_ADD_SEALS:
6857         ret = F_ADD_SEALS;
6858         break;
6859     case TARGET_F_GET_SEALS:
6860         ret = F_GET_SEALS;
6861         break;
6862 #endif
6863     default:
6864         ret = -TARGET_EINVAL;
6865         break;
6866     }
6867 
6868 #if defined(__powerpc64__)
6869     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6870      * is not supported by kernel. The glibc fcntl call actually adjusts
6871      * them to 5, 6 and 7 before making the syscall(). Since we make the
6872      * syscall directly, adjust to what is supported by the kernel.
6873      */
6874     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6875         ret -= F_GETLK64 - 5;
6876     }
6877 #endif
6878 
6879     return ret;
6880 }
6881 
6882 #define FLOCK_TRANSTBL \
6883     switch (type) { \
6884     TRANSTBL_CONVERT(F_RDLCK); \
6885     TRANSTBL_CONVERT(F_WRLCK); \
6886     TRANSTBL_CONVERT(F_UNLCK); \
6887     }
6888 
6889 static int target_to_host_flock(int type)
6890 {
6891 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6892     FLOCK_TRANSTBL
6893 #undef  TRANSTBL_CONVERT
6894     return -TARGET_EINVAL;
6895 }
6896 
6897 static int host_to_target_flock(int type)
6898 {
6899 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6900     FLOCK_TRANSTBL
6901 #undef  TRANSTBL_CONVERT
6902     /* if we don't know how to convert the value coming
6903      * from the host we copy to the target field as-is
6904      */
6905     return type;
6906 }
6907 
6908 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6909                                             abi_ulong target_flock_addr)
6910 {
6911     struct target_flock *target_fl;
6912     int l_type;
6913 
6914     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6915         return -TARGET_EFAULT;
6916     }
6917 
6918     __get_user(l_type, &target_fl->l_type);
6919     l_type = target_to_host_flock(l_type);
6920     if (l_type < 0) {
6921         return l_type;
6922     }
6923     fl->l_type = l_type;
6924     __get_user(fl->l_whence, &target_fl->l_whence);
6925     __get_user(fl->l_start, &target_fl->l_start);
6926     __get_user(fl->l_len, &target_fl->l_len);
6927     __get_user(fl->l_pid, &target_fl->l_pid);
6928     unlock_user_struct(target_fl, target_flock_addr, 0);
6929     return 0;
6930 }
6931 
6932 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6933                                           const struct flock64 *fl)
6934 {
6935     struct target_flock *target_fl;
6936     short l_type;
6937 
6938     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6939         return -TARGET_EFAULT;
6940     }
6941 
6942     l_type = host_to_target_flock(fl->l_type);
6943     __put_user(l_type, &target_fl->l_type);
6944     __put_user(fl->l_whence, &target_fl->l_whence);
6945     __put_user(fl->l_start, &target_fl->l_start);
6946     __put_user(fl->l_len, &target_fl->l_len);
6947     __put_user(fl->l_pid, &target_fl->l_pid);
6948     unlock_user_struct(target_fl, target_flock_addr, 1);
6949     return 0;
6950 }
6951 
6952 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6953 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6954 
6955 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6956 struct target_oabi_flock64 {
6957     abi_short l_type;
6958     abi_short l_whence;
6959     abi_llong l_start;
6960     abi_llong l_len;
6961     abi_int   l_pid;
6962 } QEMU_PACKED;
6963 
6964 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6965                                                    abi_ulong target_flock_addr)
6966 {
6967     struct target_oabi_flock64 *target_fl;
6968     int l_type;
6969 
6970     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6971         return -TARGET_EFAULT;
6972     }
6973 
6974     __get_user(l_type, &target_fl->l_type);
6975     l_type = target_to_host_flock(l_type);
6976     if (l_type < 0) {
6977         return l_type;
6978     }
6979     fl->l_type = l_type;
6980     __get_user(fl->l_whence, &target_fl->l_whence);
6981     __get_user(fl->l_start, &target_fl->l_start);
6982     __get_user(fl->l_len, &target_fl->l_len);
6983     __get_user(fl->l_pid, &target_fl->l_pid);
6984     unlock_user_struct(target_fl, target_flock_addr, 0);
6985     return 0;
6986 }
6987 
6988 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6989                                                  const struct flock64 *fl)
6990 {
6991     struct target_oabi_flock64 *target_fl;
6992     short l_type;
6993 
6994     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6995         return -TARGET_EFAULT;
6996     }
6997 
6998     l_type = host_to_target_flock(fl->l_type);
6999     __put_user(l_type, &target_fl->l_type);
7000     __put_user(fl->l_whence, &target_fl->l_whence);
7001     __put_user(fl->l_start, &target_fl->l_start);
7002     __put_user(fl->l_len, &target_fl->l_len);
7003     __put_user(fl->l_pid, &target_fl->l_pid);
7004     unlock_user_struct(target_fl, target_flock_addr, 1);
7005     return 0;
7006 }
7007 #endif
7008 
7009 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7010                                               abi_ulong target_flock_addr)
7011 {
7012     struct target_flock64 *target_fl;
7013     int l_type;
7014 
7015     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7016         return -TARGET_EFAULT;
7017     }
7018 
7019     __get_user(l_type, &target_fl->l_type);
7020     l_type = target_to_host_flock(l_type);
7021     if (l_type < 0) {
7022         return l_type;
7023     }
7024     fl->l_type = l_type;
7025     __get_user(fl->l_whence, &target_fl->l_whence);
7026     __get_user(fl->l_start, &target_fl->l_start);
7027     __get_user(fl->l_len, &target_fl->l_len);
7028     __get_user(fl->l_pid, &target_fl->l_pid);
7029     unlock_user_struct(target_fl, target_flock_addr, 0);
7030     return 0;
7031 }
7032 
7033 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7034                                             const struct flock64 *fl)
7035 {
7036     struct target_flock64 *target_fl;
7037     short l_type;
7038 
7039     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7040         return -TARGET_EFAULT;
7041     }
7042 
7043     l_type = host_to_target_flock(fl->l_type);
7044     __put_user(l_type, &target_fl->l_type);
7045     __put_user(fl->l_whence, &target_fl->l_whence);
7046     __put_user(fl->l_start, &target_fl->l_start);
7047     __put_user(fl->l_len, &target_fl->l_len);
7048     __put_user(fl->l_pid, &target_fl->l_pid);
7049     unlock_user_struct(target_fl, target_flock_addr, 1);
7050     return 0;
7051 }
7052 
7053 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7054 {
7055     struct flock64 fl64;
7056 #ifdef F_GETOWN_EX
7057     struct f_owner_ex fox;
7058     struct target_f_owner_ex *target_fox;
7059 #endif
7060     abi_long ret;
7061     int host_cmd = target_to_host_fcntl_cmd(cmd);
7062 
7063     if (host_cmd == -TARGET_EINVAL)
7064 	    return host_cmd;
7065 
7066     switch(cmd) {
7067     case TARGET_F_GETLK:
7068         ret = copy_from_user_flock(&fl64, arg);
7069         if (ret) {
7070             return ret;
7071         }
7072         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7073         if (ret == 0) {
7074             ret = copy_to_user_flock(arg, &fl64);
7075         }
7076         break;
7077 
7078     case TARGET_F_SETLK:
7079     case TARGET_F_SETLKW:
7080         ret = copy_from_user_flock(&fl64, arg);
7081         if (ret) {
7082             return ret;
7083         }
7084         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7085         break;
7086 
7087     case TARGET_F_GETLK64:
7088     case TARGET_F_OFD_GETLK:
7089         ret = copy_from_user_flock64(&fl64, arg);
7090         if (ret) {
7091             return ret;
7092         }
7093         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7094         if (ret == 0) {
7095             ret = copy_to_user_flock64(arg, &fl64);
7096         }
7097         break;
7098     case TARGET_F_SETLK64:
7099     case TARGET_F_SETLKW64:
7100     case TARGET_F_OFD_SETLK:
7101     case TARGET_F_OFD_SETLKW:
7102         ret = copy_from_user_flock64(&fl64, arg);
7103         if (ret) {
7104             return ret;
7105         }
7106         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7107         break;
7108 
7109     case TARGET_F_GETFL:
7110         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7111         if (ret >= 0) {
7112             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7113         }
7114         break;
7115 
7116     case TARGET_F_SETFL:
7117         ret = get_errno(safe_fcntl(fd, host_cmd,
7118                                    target_to_host_bitmask(arg,
7119                                                           fcntl_flags_tbl)));
7120         break;
7121 
7122 #ifdef F_GETOWN_EX
7123     case TARGET_F_GETOWN_EX:
7124         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7125         if (ret >= 0) {
7126             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7127                 return -TARGET_EFAULT;
7128             target_fox->type = tswap32(fox.type);
7129             target_fox->pid = tswap32(fox.pid);
7130             unlock_user_struct(target_fox, arg, 1);
7131         }
7132         break;
7133 #endif
7134 
7135 #ifdef F_SETOWN_EX
7136     case TARGET_F_SETOWN_EX:
7137         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7138             return -TARGET_EFAULT;
7139         fox.type = tswap32(target_fox->type);
7140         fox.pid = tswap32(target_fox->pid);
7141         unlock_user_struct(target_fox, arg, 0);
7142         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7143         break;
7144 #endif
7145 
7146     case TARGET_F_SETSIG:
7147         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7148         break;
7149 
7150     case TARGET_F_GETSIG:
7151         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7152         break;
7153 
7154     case TARGET_F_SETOWN:
7155     case TARGET_F_GETOWN:
7156     case TARGET_F_SETLEASE:
7157     case TARGET_F_GETLEASE:
7158     case TARGET_F_SETPIPE_SZ:
7159     case TARGET_F_GETPIPE_SZ:
7160     case TARGET_F_ADD_SEALS:
7161     case TARGET_F_GET_SEALS:
7162         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7163         break;
7164 
7165     default:
7166         ret = get_errno(safe_fcntl(fd, cmd, arg));
7167         break;
7168     }
7169     return ret;
7170 }
7171 
7172 #ifdef USE_UID16
7173 
7174 static inline int high2lowuid(int uid)
7175 {
7176     if (uid > 65535)
7177         return 65534;
7178     else
7179         return uid;
7180 }
7181 
7182 static inline int high2lowgid(int gid)
7183 {
7184     if (gid > 65535)
7185         return 65534;
7186     else
7187         return gid;
7188 }
7189 
7190 static inline int low2highuid(int uid)
7191 {
7192     if ((int16_t)uid == -1)
7193         return -1;
7194     else
7195         return uid;
7196 }
7197 
7198 static inline int low2highgid(int gid)
7199 {
7200     if ((int16_t)gid == -1)
7201         return -1;
7202     else
7203         return gid;
7204 }
7205 static inline int tswapid(int id)
7206 {
7207     return tswap16(id);
7208 }
7209 
7210 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7211 
7212 #else /* !USE_UID16 */
7213 static inline int high2lowuid(int uid)
7214 {
7215     return uid;
7216 }
7217 static inline int high2lowgid(int gid)
7218 {
7219     return gid;
7220 }
7221 static inline int low2highuid(int uid)
7222 {
7223     return uid;
7224 }
7225 static inline int low2highgid(int gid)
7226 {
7227     return gid;
7228 }
7229 static inline int tswapid(int id)
7230 {
7231     return tswap32(id);
7232 }
7233 
7234 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7235 
7236 #endif /* USE_UID16 */
7237 
7238 /* We must do direct syscalls for setting UID/GID, because we want to
7239  * implement the Linux system call semantics of "change only for this thread",
7240  * not the libc/POSIX semantics of "change for all threads in process".
7241  * (See http://ewontfix.com/17/ for more details.)
7242  * We use the 32-bit version of the syscalls if present; if it is not
7243  * then either the host architecture supports 32-bit UIDs natively with
7244  * the standard syscall, or the 16-bit UID is the best we can do.
7245  */
7246 #ifdef __NR_setuid32
7247 #define __NR_sys_setuid __NR_setuid32
7248 #else
7249 #define __NR_sys_setuid __NR_setuid
7250 #endif
7251 #ifdef __NR_setgid32
7252 #define __NR_sys_setgid __NR_setgid32
7253 #else
7254 #define __NR_sys_setgid __NR_setgid
7255 #endif
7256 #ifdef __NR_setresuid32
7257 #define __NR_sys_setresuid __NR_setresuid32
7258 #else
7259 #define __NR_sys_setresuid __NR_setresuid
7260 #endif
7261 #ifdef __NR_setresgid32
7262 #define __NR_sys_setresgid __NR_setresgid32
7263 #else
7264 #define __NR_sys_setresgid __NR_setresgid
7265 #endif
7266 
7267 _syscall1(int, sys_setuid, uid_t, uid)
7268 _syscall1(int, sys_setgid, gid_t, gid)
7269 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7270 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7271 
7272 void syscall_init(void)
7273 {
7274     IOCTLEntry *ie;
7275     const argtype *arg_type;
7276     int size;
7277 
7278     thunk_init(STRUCT_MAX);
7279 
7280 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7281 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7282 #include "syscall_types.h"
7283 #undef STRUCT
7284 #undef STRUCT_SPECIAL
7285 
7286     /* we patch the ioctl size if necessary. We rely on the fact that
7287        no ioctl has all the bits at '1' in the size field */
7288     ie = ioctl_entries;
7289     while (ie->target_cmd != 0) {
7290         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7291             TARGET_IOC_SIZEMASK) {
7292             arg_type = ie->arg_type;
7293             if (arg_type[0] != TYPE_PTR) {
7294                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7295                         ie->target_cmd);
7296                 exit(1);
7297             }
7298             arg_type++;
7299             size = thunk_type_size(arg_type, 0);
7300             ie->target_cmd = (ie->target_cmd &
7301                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7302                 (size << TARGET_IOC_SIZESHIFT);
7303         }
7304 
7305         /* automatic consistency check if same arch */
7306 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7307     (defined(__x86_64__) && defined(TARGET_X86_64))
7308         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7309             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7310                     ie->name, ie->target_cmd, ie->host_cmd);
7311         }
7312 #endif
7313         ie++;
7314     }
7315 }
7316 
7317 #ifdef TARGET_NR_truncate64
7318 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7319                                          abi_long arg2,
7320                                          abi_long arg3,
7321                                          abi_long arg4)
7322 {
7323     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7324         arg2 = arg3;
7325         arg3 = arg4;
7326     }
7327     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7328 }
7329 #endif
7330 
7331 #ifdef TARGET_NR_ftruncate64
7332 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7333                                           abi_long arg2,
7334                                           abi_long arg3,
7335                                           abi_long arg4)
7336 {
7337     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7338         arg2 = arg3;
7339         arg3 = arg4;
7340     }
7341     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7342 }
7343 #endif
7344 
7345 #if defined(TARGET_NR_timer_settime) || \
7346     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7347 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7348                                                  abi_ulong target_addr)
7349 {
7350     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7351                                 offsetof(struct target_itimerspec,
7352                                          it_interval)) ||
7353         target_to_host_timespec(&host_its->it_value, target_addr +
7354                                 offsetof(struct target_itimerspec,
7355                                          it_value))) {
7356         return -TARGET_EFAULT;
7357     }
7358 
7359     return 0;
7360 }
7361 #endif
7362 
7363 #if defined(TARGET_NR_timer_settime64) || \
7364     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7365 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7366                                                    abi_ulong target_addr)
7367 {
7368     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7369                                   offsetof(struct target__kernel_itimerspec,
7370                                            it_interval)) ||
7371         target_to_host_timespec64(&host_its->it_value, target_addr +
7372                                   offsetof(struct target__kernel_itimerspec,
7373                                            it_value))) {
7374         return -TARGET_EFAULT;
7375     }
7376 
7377     return 0;
7378 }
7379 #endif
7380 
7381 #if ((defined(TARGET_NR_timerfd_gettime) || \
7382       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7383       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7384 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7385                                                  struct itimerspec *host_its)
7386 {
7387     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7388                                                        it_interval),
7389                                 &host_its->it_interval) ||
7390         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7391                                                        it_value),
7392                                 &host_its->it_value)) {
7393         return -TARGET_EFAULT;
7394     }
7395     return 0;
7396 }
7397 #endif
7398 
7399 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7400       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7401       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7402 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7403                                                    struct itimerspec *host_its)
7404 {
7405     if (host_to_target_timespec64(target_addr +
7406                                   offsetof(struct target__kernel_itimerspec,
7407                                            it_interval),
7408                                   &host_its->it_interval) ||
7409         host_to_target_timespec64(target_addr +
7410                                   offsetof(struct target__kernel_itimerspec,
7411                                            it_value),
7412                                   &host_its->it_value)) {
7413         return -TARGET_EFAULT;
7414     }
7415     return 0;
7416 }
7417 #endif
7418 
7419 #if defined(TARGET_NR_adjtimex) || \
7420     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7421 static inline abi_long target_to_host_timex(struct timex *host_tx,
7422                                             abi_long target_addr)
7423 {
7424     struct target_timex *target_tx;
7425 
7426     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7427         return -TARGET_EFAULT;
7428     }
7429 
7430     __get_user(host_tx->modes, &target_tx->modes);
7431     __get_user(host_tx->offset, &target_tx->offset);
7432     __get_user(host_tx->freq, &target_tx->freq);
7433     __get_user(host_tx->maxerror, &target_tx->maxerror);
7434     __get_user(host_tx->esterror, &target_tx->esterror);
7435     __get_user(host_tx->status, &target_tx->status);
7436     __get_user(host_tx->constant, &target_tx->constant);
7437     __get_user(host_tx->precision, &target_tx->precision);
7438     __get_user(host_tx->tolerance, &target_tx->tolerance);
7439     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7440     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7441     __get_user(host_tx->tick, &target_tx->tick);
7442     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7443     __get_user(host_tx->jitter, &target_tx->jitter);
7444     __get_user(host_tx->shift, &target_tx->shift);
7445     __get_user(host_tx->stabil, &target_tx->stabil);
7446     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7447     __get_user(host_tx->calcnt, &target_tx->calcnt);
7448     __get_user(host_tx->errcnt, &target_tx->errcnt);
7449     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7450     __get_user(host_tx->tai, &target_tx->tai);
7451 
7452     unlock_user_struct(target_tx, target_addr, 0);
7453     return 0;
7454 }
7455 
7456 static inline abi_long host_to_target_timex(abi_long target_addr,
7457                                             struct timex *host_tx)
7458 {
7459     struct target_timex *target_tx;
7460 
7461     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7462         return -TARGET_EFAULT;
7463     }
7464 
7465     __put_user(host_tx->modes, &target_tx->modes);
7466     __put_user(host_tx->offset, &target_tx->offset);
7467     __put_user(host_tx->freq, &target_tx->freq);
7468     __put_user(host_tx->maxerror, &target_tx->maxerror);
7469     __put_user(host_tx->esterror, &target_tx->esterror);
7470     __put_user(host_tx->status, &target_tx->status);
7471     __put_user(host_tx->constant, &target_tx->constant);
7472     __put_user(host_tx->precision, &target_tx->precision);
7473     __put_user(host_tx->tolerance, &target_tx->tolerance);
7474     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7475     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7476     __put_user(host_tx->tick, &target_tx->tick);
7477     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7478     __put_user(host_tx->jitter, &target_tx->jitter);
7479     __put_user(host_tx->shift, &target_tx->shift);
7480     __put_user(host_tx->stabil, &target_tx->stabil);
7481     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7482     __put_user(host_tx->calcnt, &target_tx->calcnt);
7483     __put_user(host_tx->errcnt, &target_tx->errcnt);
7484     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7485     __put_user(host_tx->tai, &target_tx->tai);
7486 
7487     unlock_user_struct(target_tx, target_addr, 1);
7488     return 0;
7489 }
7490 #endif
7491 
7492 
7493 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7494 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7495                                               abi_long target_addr)
7496 {
7497     struct target__kernel_timex *target_tx;
7498 
7499     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7500                                  offsetof(struct target__kernel_timex,
7501                                           time))) {
7502         return -TARGET_EFAULT;
7503     }
7504 
7505     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7506         return -TARGET_EFAULT;
7507     }
7508 
7509     __get_user(host_tx->modes, &target_tx->modes);
7510     __get_user(host_tx->offset, &target_tx->offset);
7511     __get_user(host_tx->freq, &target_tx->freq);
7512     __get_user(host_tx->maxerror, &target_tx->maxerror);
7513     __get_user(host_tx->esterror, &target_tx->esterror);
7514     __get_user(host_tx->status, &target_tx->status);
7515     __get_user(host_tx->constant, &target_tx->constant);
7516     __get_user(host_tx->precision, &target_tx->precision);
7517     __get_user(host_tx->tolerance, &target_tx->tolerance);
7518     __get_user(host_tx->tick, &target_tx->tick);
7519     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7520     __get_user(host_tx->jitter, &target_tx->jitter);
7521     __get_user(host_tx->shift, &target_tx->shift);
7522     __get_user(host_tx->stabil, &target_tx->stabil);
7523     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7524     __get_user(host_tx->calcnt, &target_tx->calcnt);
7525     __get_user(host_tx->errcnt, &target_tx->errcnt);
7526     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7527     __get_user(host_tx->tai, &target_tx->tai);
7528 
7529     unlock_user_struct(target_tx, target_addr, 0);
7530     return 0;
7531 }
7532 
7533 static inline abi_long host_to_target_timex64(abi_long target_addr,
7534                                               struct timex *host_tx)
7535 {
7536     struct target__kernel_timex *target_tx;
7537 
7538    if (copy_to_user_timeval64(target_addr +
7539                               offsetof(struct target__kernel_timex, time),
7540                               &host_tx->time)) {
7541         return -TARGET_EFAULT;
7542     }
7543 
7544     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7545         return -TARGET_EFAULT;
7546     }
7547 
7548     __put_user(host_tx->modes, &target_tx->modes);
7549     __put_user(host_tx->offset, &target_tx->offset);
7550     __put_user(host_tx->freq, &target_tx->freq);
7551     __put_user(host_tx->maxerror, &target_tx->maxerror);
7552     __put_user(host_tx->esterror, &target_tx->esterror);
7553     __put_user(host_tx->status, &target_tx->status);
7554     __put_user(host_tx->constant, &target_tx->constant);
7555     __put_user(host_tx->precision, &target_tx->precision);
7556     __put_user(host_tx->tolerance, &target_tx->tolerance);
7557     __put_user(host_tx->tick, &target_tx->tick);
7558     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7559     __put_user(host_tx->jitter, &target_tx->jitter);
7560     __put_user(host_tx->shift, &target_tx->shift);
7561     __put_user(host_tx->stabil, &target_tx->stabil);
7562     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7563     __put_user(host_tx->calcnt, &target_tx->calcnt);
7564     __put_user(host_tx->errcnt, &target_tx->errcnt);
7565     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7566     __put_user(host_tx->tai, &target_tx->tai);
7567 
7568     unlock_user_struct(target_tx, target_addr, 1);
7569     return 0;
7570 }
7571 #endif
7572 
7573 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7574 #define sigev_notify_thread_id _sigev_un._tid
7575 #endif
7576 
7577 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7578                                                abi_ulong target_addr)
7579 {
7580     struct target_sigevent *target_sevp;
7581 
7582     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7583         return -TARGET_EFAULT;
7584     }
7585 
7586     /* This union is awkward on 64 bit systems because it has a 32 bit
7587      * integer and a pointer in it; we follow the conversion approach
7588      * used for handling sigval types in signal.c so the guest should get
7589      * the correct value back even if we did a 64 bit byteswap and it's
7590      * using the 32 bit integer.
7591      */
7592     host_sevp->sigev_value.sival_ptr =
7593         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7594     host_sevp->sigev_signo =
7595         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7596     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7597     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7598 
7599     unlock_user_struct(target_sevp, target_addr, 1);
7600     return 0;
7601 }
7602 
7603 #if defined(TARGET_NR_mlockall)
7604 static inline int target_to_host_mlockall_arg(int arg)
7605 {
7606     int result = 0;
7607 
7608     if (arg & TARGET_MCL_CURRENT) {
7609         result |= MCL_CURRENT;
7610     }
7611     if (arg & TARGET_MCL_FUTURE) {
7612         result |= MCL_FUTURE;
7613     }
7614 #ifdef MCL_ONFAULT
7615     if (arg & TARGET_MCL_ONFAULT) {
7616         result |= MCL_ONFAULT;
7617     }
7618 #endif
7619 
7620     return result;
7621 }
7622 #endif
7623 
7624 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7625      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7626      defined(TARGET_NR_newfstatat))
7627 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7628                                              abi_ulong target_addr,
7629                                              struct stat *host_st)
7630 {
7631 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7632     if (cpu_env->eabi) {
7633         struct target_eabi_stat64 *target_st;
7634 
7635         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7636             return -TARGET_EFAULT;
7637         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7638         __put_user(host_st->st_dev, &target_st->st_dev);
7639         __put_user(host_st->st_ino, &target_st->st_ino);
7640 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7641         __put_user(host_st->st_ino, &target_st->__st_ino);
7642 #endif
7643         __put_user(host_st->st_mode, &target_st->st_mode);
7644         __put_user(host_st->st_nlink, &target_st->st_nlink);
7645         __put_user(host_st->st_uid, &target_st->st_uid);
7646         __put_user(host_st->st_gid, &target_st->st_gid);
7647         __put_user(host_st->st_rdev, &target_st->st_rdev);
7648         __put_user(host_st->st_size, &target_st->st_size);
7649         __put_user(host_st->st_blksize, &target_st->st_blksize);
7650         __put_user(host_st->st_blocks, &target_st->st_blocks);
7651         __put_user(host_st->st_atime, &target_st->target_st_atime);
7652         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7653         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7654 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7655         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7656         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7657         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7658 #endif
7659         unlock_user_struct(target_st, target_addr, 1);
7660     } else
7661 #endif
7662     {
7663 #if defined(TARGET_HAS_STRUCT_STAT64)
7664         struct target_stat64 *target_st;
7665 #else
7666         struct target_stat *target_st;
7667 #endif
7668 
7669         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7670             return -TARGET_EFAULT;
7671         memset(target_st, 0, sizeof(*target_st));
7672         __put_user(host_st->st_dev, &target_st->st_dev);
7673         __put_user(host_st->st_ino, &target_st->st_ino);
7674 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7675         __put_user(host_st->st_ino, &target_st->__st_ino);
7676 #endif
7677         __put_user(host_st->st_mode, &target_st->st_mode);
7678         __put_user(host_st->st_nlink, &target_st->st_nlink);
7679         __put_user(host_st->st_uid, &target_st->st_uid);
7680         __put_user(host_st->st_gid, &target_st->st_gid);
7681         __put_user(host_st->st_rdev, &target_st->st_rdev);
7682         /* XXX: better use of kernel struct */
7683         __put_user(host_st->st_size, &target_st->st_size);
7684         __put_user(host_st->st_blksize, &target_st->st_blksize);
7685         __put_user(host_st->st_blocks, &target_st->st_blocks);
7686         __put_user(host_st->st_atime, &target_st->target_st_atime);
7687         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7688         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7689 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7690         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7691         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7692         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7693 #endif
7694         unlock_user_struct(target_st, target_addr, 1);
7695     }
7696 
7697     return 0;
7698 }
7699 #endif
7700 
7701 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7702 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7703                                             abi_ulong target_addr)
7704 {
7705     struct target_statx *target_stx;
7706 
7707     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7708         return -TARGET_EFAULT;
7709     }
7710     memset(target_stx, 0, sizeof(*target_stx));
7711 
7712     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7713     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7714     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7715     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7716     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7717     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7718     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7719     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7720     __put_user(host_stx->stx_size, &target_stx->stx_size);
7721     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7722     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7723     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7724     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7725     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7726     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7727     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7728     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7729     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7730     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7731     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7732     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7733     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7734     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7735 
7736     unlock_user_struct(target_stx, target_addr, 1);
7737 
7738     return 0;
7739 }
7740 #endif
7741 
7742 static int do_sys_futex(int *uaddr, int op, int val,
7743                          const struct timespec *timeout, int *uaddr2,
7744                          int val3)
7745 {
7746 #if HOST_LONG_BITS == 64
7747 #if defined(__NR_futex)
7748     /* always a 64-bit time_t, it doesn't define _time64 version  */
7749     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7750 
7751 #endif
7752 #else /* HOST_LONG_BITS == 64 */
7753 #if defined(__NR_futex_time64)
7754     if (sizeof(timeout->tv_sec) == 8) {
7755         /* _time64 function on 32bit arch */
7756         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7757     }
7758 #endif
7759 #if defined(__NR_futex)
7760     /* old function on 32bit arch */
7761     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7762 #endif
7763 #endif /* HOST_LONG_BITS == 64 */
7764     g_assert_not_reached();
7765 }
7766 
7767 static int do_safe_futex(int *uaddr, int op, int val,
7768                          const struct timespec *timeout, int *uaddr2,
7769                          int val3)
7770 {
7771 #if HOST_LONG_BITS == 64
7772 #if defined(__NR_futex)
7773     /* always a 64-bit time_t, it doesn't define _time64 version  */
7774     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7775 #endif
7776 #else /* HOST_LONG_BITS == 64 */
7777 #if defined(__NR_futex_time64)
7778     if (sizeof(timeout->tv_sec) == 8) {
7779         /* _time64 function on 32bit arch */
7780         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7781                                            val3));
7782     }
7783 #endif
7784 #if defined(__NR_futex)
7785     /* old function on 32bit arch */
7786     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7787 #endif
7788 #endif /* HOST_LONG_BITS == 64 */
7789     return -TARGET_ENOSYS;
7790 }
7791 
7792 /* ??? Using host futex calls even when target atomic operations
7793    are not really atomic probably breaks things.  However implementing
7794    futexes locally would make futexes shared between multiple processes
7795    tricky.  However they're probably useless because guest atomic
7796    operations won't work either.  */
7797 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7798 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7799                     int op, int val, target_ulong timeout,
7800                     target_ulong uaddr2, int val3)
7801 {
7802     struct timespec ts, *pts = NULL;
7803     void *haddr2 = NULL;
7804     int base_op;
7805 
7806     /* We assume FUTEX_* constants are the same on both host and target. */
7807 #ifdef FUTEX_CMD_MASK
7808     base_op = op & FUTEX_CMD_MASK;
7809 #else
7810     base_op = op;
7811 #endif
7812     switch (base_op) {
7813     case FUTEX_WAIT:
7814     case FUTEX_WAIT_BITSET:
7815         val = tswap32(val);
7816         break;
7817     case FUTEX_WAIT_REQUEUE_PI:
7818         val = tswap32(val);
7819         haddr2 = g2h(cpu, uaddr2);
7820         break;
7821     case FUTEX_LOCK_PI:
7822     case FUTEX_LOCK_PI2:
7823         break;
7824     case FUTEX_WAKE:
7825     case FUTEX_WAKE_BITSET:
7826     case FUTEX_TRYLOCK_PI:
7827     case FUTEX_UNLOCK_PI:
7828         timeout = 0;
7829         break;
7830     case FUTEX_FD:
7831         val = target_to_host_signal(val);
7832         timeout = 0;
7833         break;
7834     case FUTEX_CMP_REQUEUE:
7835     case FUTEX_CMP_REQUEUE_PI:
7836         val3 = tswap32(val3);
7837         /* fall through */
7838     case FUTEX_REQUEUE:
7839     case FUTEX_WAKE_OP:
7840         /*
7841          * For these, the 4th argument is not TIMEOUT, but VAL2.
7842          * But the prototype of do_safe_futex takes a pointer, so
7843          * insert casts to satisfy the compiler.  We do not need
7844          * to tswap VAL2 since it's not compared to guest memory.
7845           */
7846         pts = (struct timespec *)(uintptr_t)timeout;
7847         timeout = 0;
7848         haddr2 = g2h(cpu, uaddr2);
7849         break;
7850     default:
7851         return -TARGET_ENOSYS;
7852     }
7853     if (timeout) {
7854         pts = &ts;
7855         if (time64
7856             ? target_to_host_timespec64(pts, timeout)
7857             : target_to_host_timespec(pts, timeout)) {
7858             return -TARGET_EFAULT;
7859         }
7860     }
7861     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7862 }
7863 #endif
7864 
7865 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7866 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7867                                      abi_long handle, abi_long mount_id,
7868                                      abi_long flags)
7869 {
7870     struct file_handle *target_fh;
7871     struct file_handle *fh;
7872     int mid = 0;
7873     abi_long ret;
7874     char *name;
7875     unsigned int size, total_size;
7876 
7877     if (get_user_s32(size, handle)) {
7878         return -TARGET_EFAULT;
7879     }
7880 
7881     name = lock_user_string(pathname);
7882     if (!name) {
7883         return -TARGET_EFAULT;
7884     }
7885 
7886     total_size = sizeof(struct file_handle) + size;
7887     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7888     if (!target_fh) {
7889         unlock_user(name, pathname, 0);
7890         return -TARGET_EFAULT;
7891     }
7892 
7893     fh = g_malloc0(total_size);
7894     fh->handle_bytes = size;
7895 
7896     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7897     unlock_user(name, pathname, 0);
7898 
7899     /* man name_to_handle_at(2):
7900      * Other than the use of the handle_bytes field, the caller should treat
7901      * the file_handle structure as an opaque data type
7902      */
7903 
7904     memcpy(target_fh, fh, total_size);
7905     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7906     target_fh->handle_type = tswap32(fh->handle_type);
7907     g_free(fh);
7908     unlock_user(target_fh, handle, total_size);
7909 
7910     if (put_user_s32(mid, mount_id)) {
7911         return -TARGET_EFAULT;
7912     }
7913 
7914     return ret;
7915 
7916 }
7917 #endif
7918 
7919 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7920 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7921                                      abi_long flags)
7922 {
7923     struct file_handle *target_fh;
7924     struct file_handle *fh;
7925     unsigned int size, total_size;
7926     abi_long ret;
7927 
7928     if (get_user_s32(size, handle)) {
7929         return -TARGET_EFAULT;
7930     }
7931 
7932     total_size = sizeof(struct file_handle) + size;
7933     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7934     if (!target_fh) {
7935         return -TARGET_EFAULT;
7936     }
7937 
7938     fh = g_memdup(target_fh, total_size);
7939     fh->handle_bytes = size;
7940     fh->handle_type = tswap32(target_fh->handle_type);
7941 
7942     ret = get_errno(open_by_handle_at(mount_fd, fh,
7943                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7944 
7945     g_free(fh);
7946 
7947     unlock_user(target_fh, handle, total_size);
7948 
7949     return ret;
7950 }
7951 #endif
7952 
7953 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7954 
7955 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7956 {
7957     int host_flags;
7958     target_sigset_t *target_mask;
7959     sigset_t host_mask;
7960     abi_long ret;
7961 
7962     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7963         return -TARGET_EINVAL;
7964     }
7965     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7966         return -TARGET_EFAULT;
7967     }
7968 
7969     target_to_host_sigset(&host_mask, target_mask);
7970 
7971     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7972 
7973     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7974     if (ret >= 0) {
7975         fd_trans_register(ret, &target_signalfd_trans);
7976     }
7977 
7978     unlock_user_struct(target_mask, mask, 0);
7979 
7980     return ret;
7981 }
7982 #endif
7983 
7984 /* Map host to target signal numbers for the wait family of syscalls.
7985    Assume all other status bits are the same.  */
7986 int host_to_target_waitstatus(int status)
7987 {
7988     if (WIFSIGNALED(status)) {
7989         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7990     }
7991     if (WIFSTOPPED(status)) {
7992         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7993                | (status & 0xff);
7994     }
7995     return status;
7996 }
7997 
7998 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7999 {
8000     CPUState *cpu = env_cpu(cpu_env);
8001     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8002     int i;
8003 
8004     for (i = 0; i < bprm->argc; i++) {
8005         size_t len = strlen(bprm->argv[i]) + 1;
8006 
8007         if (write(fd, bprm->argv[i], len) != len) {
8008             return -1;
8009         }
8010     }
8011 
8012     return 0;
8013 }
8014 
8015 static int open_self_maps(CPUArchState *cpu_env, int fd)
8016 {
8017     CPUState *cpu = env_cpu(cpu_env);
8018     TaskState *ts = cpu->opaque;
8019     GSList *map_info = read_self_maps();
8020     GSList *s;
8021     int count;
8022 
8023     for (s = map_info; s; s = g_slist_next(s)) {
8024         MapInfo *e = (MapInfo *) s->data;
8025 
8026         if (h2g_valid(e->start)) {
8027             unsigned long min = e->start;
8028             unsigned long max = e->end;
8029             int flags = page_get_flags(h2g(min));
8030             const char *path;
8031 
8032             max = h2g_valid(max - 1) ?
8033                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8034 
8035             if (page_check_range(h2g(min), max - min, flags) == -1) {
8036                 continue;
8037             }
8038 
8039 #ifdef TARGET_HPPA
8040             if (h2g(max) == ts->info->stack_limit) {
8041 #else
8042             if (h2g(min) == ts->info->stack_limit) {
8043 #endif
8044                 path = "[stack]";
8045             } else {
8046                 path = e->path;
8047             }
8048 
8049             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8050                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8051                             h2g(min), h2g(max - 1) + 1,
8052                             (flags & PAGE_READ) ? 'r' : '-',
8053                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8054                             (flags & PAGE_EXEC) ? 'x' : '-',
8055                             e->is_priv ? 'p' : 's',
8056                             (uint64_t) e->offset, e->dev, e->inode);
8057             if (path) {
8058                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8059             } else {
8060                 dprintf(fd, "\n");
8061             }
8062         }
8063     }
8064 
8065     free_self_maps(map_info);
8066 
8067 #ifdef TARGET_VSYSCALL_PAGE
8068     /*
8069      * We only support execution from the vsyscall page.
8070      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8071      */
8072     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8073                     " --xp 00000000 00:00 0",
8074                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8075     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8076 #endif
8077 
8078     return 0;
8079 }
8080 
8081 static int open_self_stat(CPUArchState *cpu_env, int fd)
8082 {
8083     CPUState *cpu = env_cpu(cpu_env);
8084     TaskState *ts = cpu->opaque;
8085     g_autoptr(GString) buf = g_string_new(NULL);
8086     int i;
8087 
8088     for (i = 0; i < 44; i++) {
8089         if (i == 0) {
8090             /* pid */
8091             g_string_printf(buf, FMT_pid " ", getpid());
8092         } else if (i == 1) {
8093             /* app name */
8094             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8095             bin = bin ? bin + 1 : ts->bprm->argv[0];
8096             g_string_printf(buf, "(%.15s) ", bin);
8097         } else if (i == 3) {
8098             /* ppid */
8099             g_string_printf(buf, FMT_pid " ", getppid());
8100         } else if (i == 21) {
8101             /* starttime */
8102             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8103         } else if (i == 27) {
8104             /* stack bottom */
8105             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8106         } else {
8107             /* for the rest, there is MasterCard */
8108             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8109         }
8110 
8111         if (write(fd, buf->str, buf->len) != buf->len) {
8112             return -1;
8113         }
8114     }
8115 
8116     return 0;
8117 }
8118 
8119 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8120 {
8121     CPUState *cpu = env_cpu(cpu_env);
8122     TaskState *ts = cpu->opaque;
8123     abi_ulong auxv = ts->info->saved_auxv;
8124     abi_ulong len = ts->info->auxv_len;
8125     char *ptr;
8126 
8127     /*
8128      * Auxiliary vector is stored in target process stack.
8129      * read in whole auxv vector and copy it to file
8130      */
8131     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8132     if (ptr != NULL) {
8133         while (len > 0) {
8134             ssize_t r;
8135             r = write(fd, ptr, len);
8136             if (r <= 0) {
8137                 break;
8138             }
8139             len -= r;
8140             ptr += r;
8141         }
8142         lseek(fd, 0, SEEK_SET);
8143         unlock_user(ptr, auxv, len);
8144     }
8145 
8146     return 0;
8147 }
8148 
8149 static int is_proc_myself(const char *filename, const char *entry)
8150 {
8151     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8152         filename += strlen("/proc/");
8153         if (!strncmp(filename, "self/", strlen("self/"))) {
8154             filename += strlen("self/");
8155         } else if (*filename >= '1' && *filename <= '9') {
8156             char myself[80];
8157             snprintf(myself, sizeof(myself), "%d/", getpid());
8158             if (!strncmp(filename, myself, strlen(myself))) {
8159                 filename += strlen(myself);
8160             } else {
8161                 return 0;
8162             }
8163         } else {
8164             return 0;
8165         }
8166         if (!strcmp(filename, entry)) {
8167             return 1;
8168         }
8169     }
8170     return 0;
8171 }
8172 
8173 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8174                       const char *fmt, int code)
8175 {
8176     if (logfile) {
8177         CPUState *cs = env_cpu(env);
8178 
8179         fprintf(logfile, fmt, code);
8180         fprintf(logfile, "Failing executable: %s\n", exec_path);
8181         cpu_dump_state(cs, logfile, 0);
8182         open_self_maps(env, fileno(logfile));
8183     }
8184 }
8185 
8186 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8187 {
8188     /* dump to console */
8189     excp_dump_file(stderr, env, fmt, code);
8190 
8191     /* dump to log file */
8192     if (qemu_log_separate()) {
8193         FILE *logfile = qemu_log_trylock();
8194 
8195         excp_dump_file(logfile, env, fmt, code);
8196         qemu_log_unlock(logfile);
8197     }
8198 }
8199 
8200 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8201     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8202 static int is_proc(const char *filename, const char *entry)
8203 {
8204     return strcmp(filename, entry) == 0;
8205 }
8206 #endif
8207 
8208 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8209 static int open_net_route(CPUArchState *cpu_env, int fd)
8210 {
8211     FILE *fp;
8212     char *line = NULL;
8213     size_t len = 0;
8214     ssize_t read;
8215 
8216     fp = fopen("/proc/net/route", "r");
8217     if (fp == NULL) {
8218         return -1;
8219     }
8220 
8221     /* read header */
8222 
8223     read = getline(&line, &len, fp);
8224     dprintf(fd, "%s", line);
8225 
8226     /* read routes */
8227 
8228     while ((read = getline(&line, &len, fp)) != -1) {
8229         char iface[16];
8230         uint32_t dest, gw, mask;
8231         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8232         int fields;
8233 
8234         fields = sscanf(line,
8235                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8236                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8237                         &mask, &mtu, &window, &irtt);
8238         if (fields != 11) {
8239             continue;
8240         }
8241         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8242                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8243                 metric, tswap32(mask), mtu, window, irtt);
8244     }
8245 
8246     free(line);
8247     fclose(fp);
8248 
8249     return 0;
8250 }
8251 #endif
8252 
8253 #if defined(TARGET_SPARC)
8254 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8255 {
8256     dprintf(fd, "type\t\t: sun4u\n");
8257     return 0;
8258 }
8259 #endif
8260 
8261 #if defined(TARGET_HPPA)
8262 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8263 {
8264     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8265     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8266     dprintf(fd, "capabilities\t: os32\n");
8267     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8268     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8269     return 0;
8270 }
8271 #endif
8272 
8273 #if defined(TARGET_M68K)
8274 static int open_hardware(CPUArchState *cpu_env, int fd)
8275 {
8276     dprintf(fd, "Model:\t\tqemu-m68k\n");
8277     return 0;
8278 }
8279 #endif
8280 
8281 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8282 {
8283     struct fake_open {
8284         const char *filename;
8285         int (*fill)(CPUArchState *cpu_env, int fd);
8286         int (*cmp)(const char *s1, const char *s2);
8287     };
8288     const struct fake_open *fake_open;
8289     static const struct fake_open fakes[] = {
8290         { "maps", open_self_maps, is_proc_myself },
8291         { "stat", open_self_stat, is_proc_myself },
8292         { "auxv", open_self_auxv, is_proc_myself },
8293         { "cmdline", open_self_cmdline, is_proc_myself },
8294 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8295         { "/proc/net/route", open_net_route, is_proc },
8296 #endif
8297 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8298         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8299 #endif
8300 #if defined(TARGET_M68K)
8301         { "/proc/hardware", open_hardware, is_proc },
8302 #endif
8303         { NULL, NULL, NULL }
8304     };
8305 
8306     if (is_proc_myself(pathname, "exe")) {
8307         return safe_openat(dirfd, exec_path, flags, mode);
8308     }
8309 
8310     for (fake_open = fakes; fake_open->filename; fake_open++) {
8311         if (fake_open->cmp(pathname, fake_open->filename)) {
8312             break;
8313         }
8314     }
8315 
8316     if (fake_open->filename) {
8317         const char *tmpdir;
8318         char filename[PATH_MAX];
8319         int fd, r;
8320 
8321         fd = memfd_create("qemu-open", 0);
8322         if (fd < 0) {
8323             if (errno != ENOSYS) {
8324                 return fd;
8325             }
8326             /* create temporary file to map stat to */
8327             tmpdir = getenv("TMPDIR");
8328             if (!tmpdir)
8329                 tmpdir = "/tmp";
8330             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8331             fd = mkstemp(filename);
8332             if (fd < 0) {
8333                 return fd;
8334             }
8335             unlink(filename);
8336         }
8337 
8338         if ((r = fake_open->fill(cpu_env, fd))) {
8339             int e = errno;
8340             close(fd);
8341             errno = e;
8342             return r;
8343         }
8344         lseek(fd, 0, SEEK_SET);
8345 
8346         return fd;
8347     }
8348 
8349     return safe_openat(dirfd, path(pathname), flags, mode);
8350 }
8351 
8352 #define TIMER_MAGIC 0x0caf0000
8353 #define TIMER_MAGIC_MASK 0xffff0000
8354 
8355 /* Convert QEMU provided timer ID back to internal 16bit index format */
8356 static target_timer_t get_timer_id(abi_long arg)
8357 {
8358     target_timer_t timerid = arg;
8359 
8360     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8361         return -TARGET_EINVAL;
8362     }
8363 
8364     timerid &= 0xffff;
8365 
8366     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8367         return -TARGET_EINVAL;
8368     }
8369 
8370     return timerid;
8371 }
8372 
8373 static int target_to_host_cpu_mask(unsigned long *host_mask,
8374                                    size_t host_size,
8375                                    abi_ulong target_addr,
8376                                    size_t target_size)
8377 {
8378     unsigned target_bits = sizeof(abi_ulong) * 8;
8379     unsigned host_bits = sizeof(*host_mask) * 8;
8380     abi_ulong *target_mask;
8381     unsigned i, j;
8382 
8383     assert(host_size >= target_size);
8384 
8385     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8386     if (!target_mask) {
8387         return -TARGET_EFAULT;
8388     }
8389     memset(host_mask, 0, host_size);
8390 
8391     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8392         unsigned bit = i * target_bits;
8393         abi_ulong val;
8394 
8395         __get_user(val, &target_mask[i]);
8396         for (j = 0; j < target_bits; j++, bit++) {
8397             if (val & (1UL << j)) {
8398                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8399             }
8400         }
8401     }
8402 
8403     unlock_user(target_mask, target_addr, 0);
8404     return 0;
8405 }
8406 
8407 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8408                                    size_t host_size,
8409                                    abi_ulong target_addr,
8410                                    size_t target_size)
8411 {
8412     unsigned target_bits = sizeof(abi_ulong) * 8;
8413     unsigned host_bits = sizeof(*host_mask) * 8;
8414     abi_ulong *target_mask;
8415     unsigned i, j;
8416 
8417     assert(host_size >= target_size);
8418 
8419     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8420     if (!target_mask) {
8421         return -TARGET_EFAULT;
8422     }
8423 
8424     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8425         unsigned bit = i * target_bits;
8426         abi_ulong val = 0;
8427 
8428         for (j = 0; j < target_bits; j++, bit++) {
8429             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8430                 val |= 1UL << j;
8431             }
8432         }
8433         __put_user(val, &target_mask[i]);
8434     }
8435 
8436     unlock_user(target_mask, target_addr, target_size);
8437     return 0;
8438 }
8439 
8440 #ifdef TARGET_NR_getdents
8441 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8442 {
8443     g_autofree void *hdirp = NULL;
8444     void *tdirp;
8445     int hlen, hoff, toff;
8446     int hreclen, treclen;
8447     off64_t prev_diroff = 0;
8448 
8449     hdirp = g_try_malloc(count);
8450     if (!hdirp) {
8451         return -TARGET_ENOMEM;
8452     }
8453 
8454 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8455     hlen = sys_getdents(dirfd, hdirp, count);
8456 #else
8457     hlen = sys_getdents64(dirfd, hdirp, count);
8458 #endif
8459 
8460     hlen = get_errno(hlen);
8461     if (is_error(hlen)) {
8462         return hlen;
8463     }
8464 
8465     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8466     if (!tdirp) {
8467         return -TARGET_EFAULT;
8468     }
8469 
8470     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8471 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8472         struct linux_dirent *hde = hdirp + hoff;
8473 #else
8474         struct linux_dirent64 *hde = hdirp + hoff;
8475 #endif
8476         struct target_dirent *tde = tdirp + toff;
8477         int namelen;
8478         uint8_t type;
8479 
8480         namelen = strlen(hde->d_name);
8481         hreclen = hde->d_reclen;
8482         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8483         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8484 
8485         if (toff + treclen > count) {
8486             /*
8487              * If the host struct is smaller than the target struct, or
8488              * requires less alignment and thus packs into less space,
8489              * then the host can return more entries than we can pass
8490              * on to the guest.
8491              */
8492             if (toff == 0) {
8493                 toff = -TARGET_EINVAL; /* result buffer is too small */
8494                 break;
8495             }
8496             /*
8497              * Return what we have, resetting the file pointer to the
8498              * location of the first record not returned.
8499              */
8500             lseek64(dirfd, prev_diroff, SEEK_SET);
8501             break;
8502         }
8503 
8504         prev_diroff = hde->d_off;
8505         tde->d_ino = tswapal(hde->d_ino);
8506         tde->d_off = tswapal(hde->d_off);
8507         tde->d_reclen = tswap16(treclen);
8508         memcpy(tde->d_name, hde->d_name, namelen + 1);
8509 
8510         /*
8511          * The getdents type is in what was formerly a padding byte at the
8512          * end of the structure.
8513          */
8514 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8515         type = *((uint8_t *)hde + hreclen - 1);
8516 #else
8517         type = hde->d_type;
8518 #endif
8519         *((uint8_t *)tde + treclen - 1) = type;
8520     }
8521 
8522     unlock_user(tdirp, arg2, toff);
8523     return toff;
8524 }
8525 #endif /* TARGET_NR_getdents */
8526 
8527 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8528 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8529 {
8530     g_autofree void *hdirp = NULL;
8531     void *tdirp;
8532     int hlen, hoff, toff;
8533     int hreclen, treclen;
8534     off64_t prev_diroff = 0;
8535 
8536     hdirp = g_try_malloc(count);
8537     if (!hdirp) {
8538         return -TARGET_ENOMEM;
8539     }
8540 
8541     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8542     if (is_error(hlen)) {
8543         return hlen;
8544     }
8545 
8546     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8547     if (!tdirp) {
8548         return -TARGET_EFAULT;
8549     }
8550 
8551     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8552         struct linux_dirent64 *hde = hdirp + hoff;
8553         struct target_dirent64 *tde = tdirp + toff;
8554         int namelen;
8555 
8556         namelen = strlen(hde->d_name) + 1;
8557         hreclen = hde->d_reclen;
8558         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8559         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8560 
8561         if (toff + treclen > count) {
8562             /*
8563              * If the host struct is smaller than the target struct, or
8564              * requires less alignment and thus packs into less space,
8565              * then the host can return more entries than we can pass
8566              * on to the guest.
8567              */
8568             if (toff == 0) {
8569                 toff = -TARGET_EINVAL; /* result buffer is too small */
8570                 break;
8571             }
8572             /*
8573              * Return what we have, resetting the file pointer to the
8574              * location of the first record not returned.
8575              */
8576             lseek64(dirfd, prev_diroff, SEEK_SET);
8577             break;
8578         }
8579 
8580         prev_diroff = hde->d_off;
8581         tde->d_ino = tswap64(hde->d_ino);
8582         tde->d_off = tswap64(hde->d_off);
8583         tde->d_reclen = tswap16(treclen);
8584         tde->d_type = hde->d_type;
8585         memcpy(tde->d_name, hde->d_name, namelen);
8586     }
8587 
8588     unlock_user(tdirp, arg2, toff);
8589     return toff;
8590 }
8591 #endif /* TARGET_NR_getdents64 */
8592 
8593 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8594 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8595 #endif
8596 
8597 /* This is an internal helper for do_syscall so that it is easier
8598  * to have a single return point, so that actions, such as logging
8599  * of syscall results, can be performed.
8600  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8601  */
8602 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8603                             abi_long arg2, abi_long arg3, abi_long arg4,
8604                             abi_long arg5, abi_long arg6, abi_long arg7,
8605                             abi_long arg8)
8606 {
8607     CPUState *cpu = env_cpu(cpu_env);
8608     abi_long ret;
8609 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8610     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8611     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8612     || defined(TARGET_NR_statx)
8613     struct stat st;
8614 #endif
8615 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8616     || defined(TARGET_NR_fstatfs)
8617     struct statfs stfs;
8618 #endif
8619     void *p;
8620 
8621     switch(num) {
8622     case TARGET_NR_exit:
8623         /* In old applications this may be used to implement _exit(2).
8624            However in threaded applications it is used for thread termination,
8625            and _exit_group is used for application termination.
8626            Do thread termination if we have more then one thread.  */
8627 
8628         if (block_signals()) {
8629             return -QEMU_ERESTARTSYS;
8630         }
8631 
8632         pthread_mutex_lock(&clone_lock);
8633 
8634         if (CPU_NEXT(first_cpu)) {
8635             TaskState *ts = cpu->opaque;
8636 
8637             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8638             object_unref(OBJECT(cpu));
8639             /*
8640              * At this point the CPU should be unrealized and removed
8641              * from cpu lists. We can clean-up the rest of the thread
8642              * data without the lock held.
8643              */
8644 
8645             pthread_mutex_unlock(&clone_lock);
8646 
8647             if (ts->child_tidptr) {
8648                 put_user_u32(0, ts->child_tidptr);
8649                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8650                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8651             }
8652             thread_cpu = NULL;
8653             g_free(ts);
8654             rcu_unregister_thread();
8655             pthread_exit(NULL);
8656         }
8657 
8658         pthread_mutex_unlock(&clone_lock);
8659         preexit_cleanup(cpu_env, arg1);
8660         _exit(arg1);
8661         return 0; /* avoid warning */
8662     case TARGET_NR_read:
8663         if (arg2 == 0 && arg3 == 0) {
8664             return get_errno(safe_read(arg1, 0, 0));
8665         } else {
8666             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8667                 return -TARGET_EFAULT;
8668             ret = get_errno(safe_read(arg1, p, arg3));
8669             if (ret >= 0 &&
8670                 fd_trans_host_to_target_data(arg1)) {
8671                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8672             }
8673             unlock_user(p, arg2, ret);
8674         }
8675         return ret;
8676     case TARGET_NR_write:
8677         if (arg2 == 0 && arg3 == 0) {
8678             return get_errno(safe_write(arg1, 0, 0));
8679         }
8680         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8681             return -TARGET_EFAULT;
8682         if (fd_trans_target_to_host_data(arg1)) {
8683             void *copy = g_malloc(arg3);
8684             memcpy(copy, p, arg3);
8685             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8686             if (ret >= 0) {
8687                 ret = get_errno(safe_write(arg1, copy, ret));
8688             }
8689             g_free(copy);
8690         } else {
8691             ret = get_errno(safe_write(arg1, p, arg3));
8692         }
8693         unlock_user(p, arg2, 0);
8694         return ret;
8695 
8696 #ifdef TARGET_NR_open
8697     case TARGET_NR_open:
8698         if (!(p = lock_user_string(arg1)))
8699             return -TARGET_EFAULT;
8700         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8701                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8702                                   arg3));
8703         fd_trans_unregister(ret);
8704         unlock_user(p, arg1, 0);
8705         return ret;
8706 #endif
8707     case TARGET_NR_openat:
8708         if (!(p = lock_user_string(arg2)))
8709             return -TARGET_EFAULT;
8710         ret = get_errno(do_openat(cpu_env, arg1, p,
8711                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8712                                   arg4));
8713         fd_trans_unregister(ret);
8714         unlock_user(p, arg2, 0);
8715         return ret;
8716 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8717     case TARGET_NR_name_to_handle_at:
8718         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8719         return ret;
8720 #endif
8721 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8722     case TARGET_NR_open_by_handle_at:
8723         ret = do_open_by_handle_at(arg1, arg2, arg3);
8724         fd_trans_unregister(ret);
8725         return ret;
8726 #endif
8727 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8728     case TARGET_NR_pidfd_open:
8729         return get_errno(pidfd_open(arg1, arg2));
8730 #endif
8731 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8732     case TARGET_NR_pidfd_send_signal:
8733         {
8734             siginfo_t uinfo, *puinfo;
8735 
8736             if (arg3) {
8737                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8738                 if (!p) {
8739                     return -TARGET_EFAULT;
8740                  }
8741                  target_to_host_siginfo(&uinfo, p);
8742                  unlock_user(p, arg3, 0);
8743                  puinfo = &uinfo;
8744             } else {
8745                  puinfo = NULL;
8746             }
8747             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8748                                               puinfo, arg4));
8749         }
8750         return ret;
8751 #endif
8752 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8753     case TARGET_NR_pidfd_getfd:
8754         return get_errno(pidfd_getfd(arg1, arg2, arg3));
8755 #endif
8756     case TARGET_NR_close:
8757         fd_trans_unregister(arg1);
8758         return get_errno(close(arg1));
8759 
8760     case TARGET_NR_brk:
8761         return do_brk(arg1);
8762 #ifdef TARGET_NR_fork
8763     case TARGET_NR_fork:
8764         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8765 #endif
8766 #ifdef TARGET_NR_waitpid
8767     case TARGET_NR_waitpid:
8768         {
8769             int status;
8770             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8771             if (!is_error(ret) && arg2 && ret
8772                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8773                 return -TARGET_EFAULT;
8774         }
8775         return ret;
8776 #endif
8777 #ifdef TARGET_NR_waitid
8778     case TARGET_NR_waitid:
8779         {
8780             siginfo_t info;
8781             info.si_pid = 0;
8782             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8783             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8784                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8785                     return -TARGET_EFAULT;
8786                 host_to_target_siginfo(p, &info);
8787                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8788             }
8789         }
8790         return ret;
8791 #endif
8792 #ifdef TARGET_NR_creat /* not on alpha */
8793     case TARGET_NR_creat:
8794         if (!(p = lock_user_string(arg1)))
8795             return -TARGET_EFAULT;
8796         ret = get_errno(creat(p, arg2));
8797         fd_trans_unregister(ret);
8798         unlock_user(p, arg1, 0);
8799         return ret;
8800 #endif
8801 #ifdef TARGET_NR_link
8802     case TARGET_NR_link:
8803         {
8804             void * p2;
8805             p = lock_user_string(arg1);
8806             p2 = lock_user_string(arg2);
8807             if (!p || !p2)
8808                 ret = -TARGET_EFAULT;
8809             else
8810                 ret = get_errno(link(p, p2));
8811             unlock_user(p2, arg2, 0);
8812             unlock_user(p, arg1, 0);
8813         }
8814         return ret;
8815 #endif
8816 #if defined(TARGET_NR_linkat)
8817     case TARGET_NR_linkat:
8818         {
8819             void * p2 = NULL;
8820             if (!arg2 || !arg4)
8821                 return -TARGET_EFAULT;
8822             p  = lock_user_string(arg2);
8823             p2 = lock_user_string(arg4);
8824             if (!p || !p2)
8825                 ret = -TARGET_EFAULT;
8826             else
8827                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8828             unlock_user(p, arg2, 0);
8829             unlock_user(p2, arg4, 0);
8830         }
8831         return ret;
8832 #endif
8833 #ifdef TARGET_NR_unlink
8834     case TARGET_NR_unlink:
8835         if (!(p = lock_user_string(arg1)))
8836             return -TARGET_EFAULT;
8837         ret = get_errno(unlink(p));
8838         unlock_user(p, arg1, 0);
8839         return ret;
8840 #endif
8841 #if defined(TARGET_NR_unlinkat)
8842     case TARGET_NR_unlinkat:
8843         if (!(p = lock_user_string(arg2)))
8844             return -TARGET_EFAULT;
8845         ret = get_errno(unlinkat(arg1, p, arg3));
8846         unlock_user(p, arg2, 0);
8847         return ret;
8848 #endif
8849     case TARGET_NR_execve:
8850         {
8851             char **argp, **envp;
8852             int argc, envc;
8853             abi_ulong gp;
8854             abi_ulong guest_argp;
8855             abi_ulong guest_envp;
8856             abi_ulong addr;
8857             char **q;
8858 
8859             argc = 0;
8860             guest_argp = arg2;
8861             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8862                 if (get_user_ual(addr, gp))
8863                     return -TARGET_EFAULT;
8864                 if (!addr)
8865                     break;
8866                 argc++;
8867             }
8868             envc = 0;
8869             guest_envp = arg3;
8870             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8871                 if (get_user_ual(addr, gp))
8872                     return -TARGET_EFAULT;
8873                 if (!addr)
8874                     break;
8875                 envc++;
8876             }
8877 
8878             argp = g_new0(char *, argc + 1);
8879             envp = g_new0(char *, envc + 1);
8880 
8881             for (gp = guest_argp, q = argp; gp;
8882                   gp += sizeof(abi_ulong), q++) {
8883                 if (get_user_ual(addr, gp))
8884                     goto execve_efault;
8885                 if (!addr)
8886                     break;
8887                 if (!(*q = lock_user_string(addr)))
8888                     goto execve_efault;
8889             }
8890             *q = NULL;
8891 
8892             for (gp = guest_envp, q = envp; gp;
8893                   gp += sizeof(abi_ulong), q++) {
8894                 if (get_user_ual(addr, gp))
8895                     goto execve_efault;
8896                 if (!addr)
8897                     break;
8898                 if (!(*q = lock_user_string(addr)))
8899                     goto execve_efault;
8900             }
8901             *q = NULL;
8902 
8903             if (!(p = lock_user_string(arg1)))
8904                 goto execve_efault;
8905             /* Although execve() is not an interruptible syscall it is
8906              * a special case where we must use the safe_syscall wrapper:
8907              * if we allow a signal to happen before we make the host
8908              * syscall then we will 'lose' it, because at the point of
8909              * execve the process leaves QEMU's control. So we use the
8910              * safe syscall wrapper to ensure that we either take the
8911              * signal as a guest signal, or else it does not happen
8912              * before the execve completes and makes it the other
8913              * program's problem.
8914              */
8915             if (is_proc_myself(p, "exe")) {
8916                 ret = get_errno(safe_execve(exec_path, argp, envp));
8917             } else {
8918                 ret = get_errno(safe_execve(p, argp, envp));
8919             }
8920             unlock_user(p, arg1, 0);
8921 
8922             goto execve_end;
8923 
8924         execve_efault:
8925             ret = -TARGET_EFAULT;
8926 
8927         execve_end:
8928             for (gp = guest_argp, q = argp; *q;
8929                   gp += sizeof(abi_ulong), q++) {
8930                 if (get_user_ual(addr, gp)
8931                     || !addr)
8932                     break;
8933                 unlock_user(*q, addr, 0);
8934             }
8935             for (gp = guest_envp, q = envp; *q;
8936                   gp += sizeof(abi_ulong), q++) {
8937                 if (get_user_ual(addr, gp)
8938                     || !addr)
8939                     break;
8940                 unlock_user(*q, addr, 0);
8941             }
8942 
8943             g_free(argp);
8944             g_free(envp);
8945         }
8946         return ret;
8947     case TARGET_NR_chdir:
8948         if (!(p = lock_user_string(arg1)))
8949             return -TARGET_EFAULT;
8950         ret = get_errno(chdir(p));
8951         unlock_user(p, arg1, 0);
8952         return ret;
8953 #ifdef TARGET_NR_time
8954     case TARGET_NR_time:
8955         {
8956             time_t host_time;
8957             ret = get_errno(time(&host_time));
8958             if (!is_error(ret)
8959                 && arg1
8960                 && put_user_sal(host_time, arg1))
8961                 return -TARGET_EFAULT;
8962         }
8963         return ret;
8964 #endif
8965 #ifdef TARGET_NR_mknod
8966     case TARGET_NR_mknod:
8967         if (!(p = lock_user_string(arg1)))
8968             return -TARGET_EFAULT;
8969         ret = get_errno(mknod(p, arg2, arg3));
8970         unlock_user(p, arg1, 0);
8971         return ret;
8972 #endif
8973 #if defined(TARGET_NR_mknodat)
8974     case TARGET_NR_mknodat:
8975         if (!(p = lock_user_string(arg2)))
8976             return -TARGET_EFAULT;
8977         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8978         unlock_user(p, arg2, 0);
8979         return ret;
8980 #endif
8981 #ifdef TARGET_NR_chmod
8982     case TARGET_NR_chmod:
8983         if (!(p = lock_user_string(arg1)))
8984             return -TARGET_EFAULT;
8985         ret = get_errno(chmod(p, arg2));
8986         unlock_user(p, arg1, 0);
8987         return ret;
8988 #endif
8989 #ifdef TARGET_NR_lseek
8990     case TARGET_NR_lseek:
8991         return get_errno(lseek(arg1, arg2, arg3));
8992 #endif
8993 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8994     /* Alpha specific */
8995     case TARGET_NR_getxpid:
8996         cpu_env->ir[IR_A4] = getppid();
8997         return get_errno(getpid());
8998 #endif
8999 #ifdef TARGET_NR_getpid
9000     case TARGET_NR_getpid:
9001         return get_errno(getpid());
9002 #endif
9003     case TARGET_NR_mount:
9004         {
9005             /* need to look at the data field */
9006             void *p2, *p3;
9007 
9008             if (arg1) {
9009                 p = lock_user_string(arg1);
9010                 if (!p) {
9011                     return -TARGET_EFAULT;
9012                 }
9013             } else {
9014                 p = NULL;
9015             }
9016 
9017             p2 = lock_user_string(arg2);
9018             if (!p2) {
9019                 if (arg1) {
9020                     unlock_user(p, arg1, 0);
9021                 }
9022                 return -TARGET_EFAULT;
9023             }
9024 
9025             if (arg3) {
9026                 p3 = lock_user_string(arg3);
9027                 if (!p3) {
9028                     if (arg1) {
9029                         unlock_user(p, arg1, 0);
9030                     }
9031                     unlock_user(p2, arg2, 0);
9032                     return -TARGET_EFAULT;
9033                 }
9034             } else {
9035                 p3 = NULL;
9036             }
9037 
9038             /* FIXME - arg5 should be locked, but it isn't clear how to
9039              * do that since it's not guaranteed to be a NULL-terminated
9040              * string.
9041              */
9042             if (!arg5) {
9043                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9044             } else {
9045                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9046             }
9047             ret = get_errno(ret);
9048 
9049             if (arg1) {
9050                 unlock_user(p, arg1, 0);
9051             }
9052             unlock_user(p2, arg2, 0);
9053             if (arg3) {
9054                 unlock_user(p3, arg3, 0);
9055             }
9056         }
9057         return ret;
9058 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9059 #if defined(TARGET_NR_umount)
9060     case TARGET_NR_umount:
9061 #endif
9062 #if defined(TARGET_NR_oldumount)
9063     case TARGET_NR_oldumount:
9064 #endif
9065         if (!(p = lock_user_string(arg1)))
9066             return -TARGET_EFAULT;
9067         ret = get_errno(umount(p));
9068         unlock_user(p, arg1, 0);
9069         return ret;
9070 #endif
9071 #ifdef TARGET_NR_stime /* not on alpha */
9072     case TARGET_NR_stime:
9073         {
9074             struct timespec ts;
9075             ts.tv_nsec = 0;
9076             if (get_user_sal(ts.tv_sec, arg1)) {
9077                 return -TARGET_EFAULT;
9078             }
9079             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9080         }
9081 #endif
9082 #ifdef TARGET_NR_alarm /* not on alpha */
9083     case TARGET_NR_alarm:
9084         return alarm(arg1);
9085 #endif
9086 #ifdef TARGET_NR_pause /* not on alpha */
9087     case TARGET_NR_pause:
9088         if (!block_signals()) {
9089             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9090         }
9091         return -TARGET_EINTR;
9092 #endif
9093 #ifdef TARGET_NR_utime
9094     case TARGET_NR_utime:
9095         {
9096             struct utimbuf tbuf, *host_tbuf;
9097             struct target_utimbuf *target_tbuf;
9098             if (arg2) {
9099                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9100                     return -TARGET_EFAULT;
9101                 tbuf.actime = tswapal(target_tbuf->actime);
9102                 tbuf.modtime = tswapal(target_tbuf->modtime);
9103                 unlock_user_struct(target_tbuf, arg2, 0);
9104                 host_tbuf = &tbuf;
9105             } else {
9106                 host_tbuf = NULL;
9107             }
9108             if (!(p = lock_user_string(arg1)))
9109                 return -TARGET_EFAULT;
9110             ret = get_errno(utime(p, host_tbuf));
9111             unlock_user(p, arg1, 0);
9112         }
9113         return ret;
9114 #endif
9115 #ifdef TARGET_NR_utimes
9116     case TARGET_NR_utimes:
9117         {
9118             struct timeval *tvp, tv[2];
9119             if (arg2) {
9120                 if (copy_from_user_timeval(&tv[0], arg2)
9121                     || copy_from_user_timeval(&tv[1],
9122                                               arg2 + sizeof(struct target_timeval)))
9123                     return -TARGET_EFAULT;
9124                 tvp = tv;
9125             } else {
9126                 tvp = NULL;
9127             }
9128             if (!(p = lock_user_string(arg1)))
9129                 return -TARGET_EFAULT;
9130             ret = get_errno(utimes(p, tvp));
9131             unlock_user(p, arg1, 0);
9132         }
9133         return ret;
9134 #endif
9135 #if defined(TARGET_NR_futimesat)
9136     case TARGET_NR_futimesat:
9137         {
9138             struct timeval *tvp, tv[2];
9139             if (arg3) {
9140                 if (copy_from_user_timeval(&tv[0], arg3)
9141                     || copy_from_user_timeval(&tv[1],
9142                                               arg3 + sizeof(struct target_timeval)))
9143                     return -TARGET_EFAULT;
9144                 tvp = tv;
9145             } else {
9146                 tvp = NULL;
9147             }
9148             if (!(p = lock_user_string(arg2))) {
9149                 return -TARGET_EFAULT;
9150             }
9151             ret = get_errno(futimesat(arg1, path(p), tvp));
9152             unlock_user(p, arg2, 0);
9153         }
9154         return ret;
9155 #endif
9156 #ifdef TARGET_NR_access
9157     case TARGET_NR_access:
9158         if (!(p = lock_user_string(arg1))) {
9159             return -TARGET_EFAULT;
9160         }
9161         ret = get_errno(access(path(p), arg2));
9162         unlock_user(p, arg1, 0);
9163         return ret;
9164 #endif
9165 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9166     case TARGET_NR_faccessat:
9167         if (!(p = lock_user_string(arg2))) {
9168             return -TARGET_EFAULT;
9169         }
9170         ret = get_errno(faccessat(arg1, p, arg3, 0));
9171         unlock_user(p, arg2, 0);
9172         return ret;
9173 #endif
9174 #if defined(TARGET_NR_faccessat2)
9175     case TARGET_NR_faccessat2:
9176         if (!(p = lock_user_string(arg2))) {
9177             return -TARGET_EFAULT;
9178         }
9179         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9180         unlock_user(p, arg2, 0);
9181         return ret;
9182 #endif
9183 #ifdef TARGET_NR_nice /* not on alpha */
9184     case TARGET_NR_nice:
9185         return get_errno(nice(arg1));
9186 #endif
9187     case TARGET_NR_sync:
9188         sync();
9189         return 0;
9190 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9191     case TARGET_NR_syncfs:
9192         return get_errno(syncfs(arg1));
9193 #endif
9194     case TARGET_NR_kill:
9195         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9196 #ifdef TARGET_NR_rename
9197     case TARGET_NR_rename:
9198         {
9199             void *p2;
9200             p = lock_user_string(arg1);
9201             p2 = lock_user_string(arg2);
9202             if (!p || !p2)
9203                 ret = -TARGET_EFAULT;
9204             else
9205                 ret = get_errno(rename(p, p2));
9206             unlock_user(p2, arg2, 0);
9207             unlock_user(p, arg1, 0);
9208         }
9209         return ret;
9210 #endif
9211 #if defined(TARGET_NR_renameat)
9212     case TARGET_NR_renameat:
9213         {
9214             void *p2;
9215             p  = lock_user_string(arg2);
9216             p2 = lock_user_string(arg4);
9217             if (!p || !p2)
9218                 ret = -TARGET_EFAULT;
9219             else
9220                 ret = get_errno(renameat(arg1, p, arg3, p2));
9221             unlock_user(p2, arg4, 0);
9222             unlock_user(p, arg2, 0);
9223         }
9224         return ret;
9225 #endif
9226 #if defined(TARGET_NR_renameat2)
9227     case TARGET_NR_renameat2:
9228         {
9229             void *p2;
9230             p  = lock_user_string(arg2);
9231             p2 = lock_user_string(arg4);
9232             if (!p || !p2) {
9233                 ret = -TARGET_EFAULT;
9234             } else {
9235                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9236             }
9237             unlock_user(p2, arg4, 0);
9238             unlock_user(p, arg2, 0);
9239         }
9240         return ret;
9241 #endif
9242 #ifdef TARGET_NR_mkdir
9243     case TARGET_NR_mkdir:
9244         if (!(p = lock_user_string(arg1)))
9245             return -TARGET_EFAULT;
9246         ret = get_errno(mkdir(p, arg2));
9247         unlock_user(p, arg1, 0);
9248         return ret;
9249 #endif
9250 #if defined(TARGET_NR_mkdirat)
9251     case TARGET_NR_mkdirat:
9252         if (!(p = lock_user_string(arg2)))
9253             return -TARGET_EFAULT;
9254         ret = get_errno(mkdirat(arg1, p, arg3));
9255         unlock_user(p, arg2, 0);
9256         return ret;
9257 #endif
9258 #ifdef TARGET_NR_rmdir
9259     case TARGET_NR_rmdir:
9260         if (!(p = lock_user_string(arg1)))
9261             return -TARGET_EFAULT;
9262         ret = get_errno(rmdir(p));
9263         unlock_user(p, arg1, 0);
9264         return ret;
9265 #endif
9266     case TARGET_NR_dup:
9267         ret = get_errno(dup(arg1));
9268         if (ret >= 0) {
9269             fd_trans_dup(arg1, ret);
9270         }
9271         return ret;
9272 #ifdef TARGET_NR_pipe
9273     case TARGET_NR_pipe:
9274         return do_pipe(cpu_env, arg1, 0, 0);
9275 #endif
9276 #ifdef TARGET_NR_pipe2
9277     case TARGET_NR_pipe2:
9278         return do_pipe(cpu_env, arg1,
9279                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9280 #endif
9281     case TARGET_NR_times:
9282         {
9283             struct target_tms *tmsp;
9284             struct tms tms;
9285             ret = get_errno(times(&tms));
9286             if (arg1) {
9287                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9288                 if (!tmsp)
9289                     return -TARGET_EFAULT;
9290                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9291                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9292                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9293                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9294             }
9295             if (!is_error(ret))
9296                 ret = host_to_target_clock_t(ret);
9297         }
9298         return ret;
9299     case TARGET_NR_acct:
9300         if (arg1 == 0) {
9301             ret = get_errno(acct(NULL));
9302         } else {
9303             if (!(p = lock_user_string(arg1))) {
9304                 return -TARGET_EFAULT;
9305             }
9306             ret = get_errno(acct(path(p)));
9307             unlock_user(p, arg1, 0);
9308         }
9309         return ret;
9310 #ifdef TARGET_NR_umount2
9311     case TARGET_NR_umount2:
9312         if (!(p = lock_user_string(arg1)))
9313             return -TARGET_EFAULT;
9314         ret = get_errno(umount2(p, arg2));
9315         unlock_user(p, arg1, 0);
9316         return ret;
9317 #endif
9318     case TARGET_NR_ioctl:
9319         return do_ioctl(arg1, arg2, arg3);
9320 #ifdef TARGET_NR_fcntl
9321     case TARGET_NR_fcntl:
9322         return do_fcntl(arg1, arg2, arg3);
9323 #endif
9324     case TARGET_NR_setpgid:
9325         return get_errno(setpgid(arg1, arg2));
9326     case TARGET_NR_umask:
9327         return get_errno(umask(arg1));
9328     case TARGET_NR_chroot:
9329         if (!(p = lock_user_string(arg1)))
9330             return -TARGET_EFAULT;
9331         ret = get_errno(chroot(p));
9332         unlock_user(p, arg1, 0);
9333         return ret;
9334 #ifdef TARGET_NR_dup2
9335     case TARGET_NR_dup2:
9336         ret = get_errno(dup2(arg1, arg2));
9337         if (ret >= 0) {
9338             fd_trans_dup(arg1, arg2);
9339         }
9340         return ret;
9341 #endif
9342 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9343     case TARGET_NR_dup3:
9344     {
9345         int host_flags;
9346 
9347         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9348             return -EINVAL;
9349         }
9350         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9351         ret = get_errno(dup3(arg1, arg2, host_flags));
9352         if (ret >= 0) {
9353             fd_trans_dup(arg1, arg2);
9354         }
9355         return ret;
9356     }
9357 #endif
9358 #ifdef TARGET_NR_getppid /* not on alpha */
9359     case TARGET_NR_getppid:
9360         return get_errno(getppid());
9361 #endif
9362 #ifdef TARGET_NR_getpgrp
9363     case TARGET_NR_getpgrp:
9364         return get_errno(getpgrp());
9365 #endif
9366     case TARGET_NR_setsid:
9367         return get_errno(setsid());
9368 #ifdef TARGET_NR_sigaction
9369     case TARGET_NR_sigaction:
9370         {
9371 #if defined(TARGET_MIPS)
9372 	    struct target_sigaction act, oact, *pact, *old_act;
9373 
9374 	    if (arg2) {
9375                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9376                     return -TARGET_EFAULT;
9377 		act._sa_handler = old_act->_sa_handler;
9378 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9379 		act.sa_flags = old_act->sa_flags;
9380 		unlock_user_struct(old_act, arg2, 0);
9381 		pact = &act;
9382 	    } else {
9383 		pact = NULL;
9384 	    }
9385 
9386         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9387 
9388 	    if (!is_error(ret) && arg3) {
9389                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9390                     return -TARGET_EFAULT;
9391 		old_act->_sa_handler = oact._sa_handler;
9392 		old_act->sa_flags = oact.sa_flags;
9393 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9394 		old_act->sa_mask.sig[1] = 0;
9395 		old_act->sa_mask.sig[2] = 0;
9396 		old_act->sa_mask.sig[3] = 0;
9397 		unlock_user_struct(old_act, arg3, 1);
9398 	    }
9399 #else
9400             struct target_old_sigaction *old_act;
9401             struct target_sigaction act, oact, *pact;
9402             if (arg2) {
9403                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9404                     return -TARGET_EFAULT;
9405                 act._sa_handler = old_act->_sa_handler;
9406                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9407                 act.sa_flags = old_act->sa_flags;
9408 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9409                 act.sa_restorer = old_act->sa_restorer;
9410 #endif
9411                 unlock_user_struct(old_act, arg2, 0);
9412                 pact = &act;
9413             } else {
9414                 pact = NULL;
9415             }
9416             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9417             if (!is_error(ret) && arg3) {
9418                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9419                     return -TARGET_EFAULT;
9420                 old_act->_sa_handler = oact._sa_handler;
9421                 old_act->sa_mask = oact.sa_mask.sig[0];
9422                 old_act->sa_flags = oact.sa_flags;
9423 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9424                 old_act->sa_restorer = oact.sa_restorer;
9425 #endif
9426                 unlock_user_struct(old_act, arg3, 1);
9427             }
9428 #endif
9429         }
9430         return ret;
9431 #endif
9432     case TARGET_NR_rt_sigaction:
9433         {
9434             /*
9435              * For Alpha and SPARC this is a 5 argument syscall, with
9436              * a 'restorer' parameter which must be copied into the
9437              * sa_restorer field of the sigaction struct.
9438              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9439              * and arg5 is the sigsetsize.
9440              */
9441 #if defined(TARGET_ALPHA)
9442             target_ulong sigsetsize = arg4;
9443             target_ulong restorer = arg5;
9444 #elif defined(TARGET_SPARC)
9445             target_ulong restorer = arg4;
9446             target_ulong sigsetsize = arg5;
9447 #else
9448             target_ulong sigsetsize = arg4;
9449             target_ulong restorer = 0;
9450 #endif
9451             struct target_sigaction *act = NULL;
9452             struct target_sigaction *oact = NULL;
9453 
9454             if (sigsetsize != sizeof(target_sigset_t)) {
9455                 return -TARGET_EINVAL;
9456             }
9457             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9458                 return -TARGET_EFAULT;
9459             }
9460             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9461                 ret = -TARGET_EFAULT;
9462             } else {
9463                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9464                 if (oact) {
9465                     unlock_user_struct(oact, arg3, 1);
9466                 }
9467             }
9468             if (act) {
9469                 unlock_user_struct(act, arg2, 0);
9470             }
9471         }
9472         return ret;
9473 #ifdef TARGET_NR_sgetmask /* not on alpha */
9474     case TARGET_NR_sgetmask:
9475         {
9476             sigset_t cur_set;
9477             abi_ulong target_set;
9478             ret = do_sigprocmask(0, NULL, &cur_set);
9479             if (!ret) {
9480                 host_to_target_old_sigset(&target_set, &cur_set);
9481                 ret = target_set;
9482             }
9483         }
9484         return ret;
9485 #endif
9486 #ifdef TARGET_NR_ssetmask /* not on alpha */
9487     case TARGET_NR_ssetmask:
9488         {
9489             sigset_t set, oset;
9490             abi_ulong target_set = arg1;
9491             target_to_host_old_sigset(&set, &target_set);
9492             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9493             if (!ret) {
9494                 host_to_target_old_sigset(&target_set, &oset);
9495                 ret = target_set;
9496             }
9497         }
9498         return ret;
9499 #endif
9500 #ifdef TARGET_NR_sigprocmask
9501     case TARGET_NR_sigprocmask:
9502         {
9503 #if defined(TARGET_ALPHA)
9504             sigset_t set, oldset;
9505             abi_ulong mask;
9506             int how;
9507 
9508             switch (arg1) {
9509             case TARGET_SIG_BLOCK:
9510                 how = SIG_BLOCK;
9511                 break;
9512             case TARGET_SIG_UNBLOCK:
9513                 how = SIG_UNBLOCK;
9514                 break;
9515             case TARGET_SIG_SETMASK:
9516                 how = SIG_SETMASK;
9517                 break;
9518             default:
9519                 return -TARGET_EINVAL;
9520             }
9521             mask = arg2;
9522             target_to_host_old_sigset(&set, &mask);
9523 
9524             ret = do_sigprocmask(how, &set, &oldset);
9525             if (!is_error(ret)) {
9526                 host_to_target_old_sigset(&mask, &oldset);
9527                 ret = mask;
9528                 cpu_env->ir[IR_V0] = 0; /* force no error */
9529             }
9530 #else
9531             sigset_t set, oldset, *set_ptr;
9532             int how;
9533 
9534             if (arg2) {
9535                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9536                 if (!p) {
9537                     return -TARGET_EFAULT;
9538                 }
9539                 target_to_host_old_sigset(&set, p);
9540                 unlock_user(p, arg2, 0);
9541                 set_ptr = &set;
9542                 switch (arg1) {
9543                 case TARGET_SIG_BLOCK:
9544                     how = SIG_BLOCK;
9545                     break;
9546                 case TARGET_SIG_UNBLOCK:
9547                     how = SIG_UNBLOCK;
9548                     break;
9549                 case TARGET_SIG_SETMASK:
9550                     how = SIG_SETMASK;
9551                     break;
9552                 default:
9553                     return -TARGET_EINVAL;
9554                 }
9555             } else {
9556                 how = 0;
9557                 set_ptr = NULL;
9558             }
9559             ret = do_sigprocmask(how, set_ptr, &oldset);
9560             if (!is_error(ret) && arg3) {
9561                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9562                     return -TARGET_EFAULT;
9563                 host_to_target_old_sigset(p, &oldset);
9564                 unlock_user(p, arg3, sizeof(target_sigset_t));
9565             }
9566 #endif
9567         }
9568         return ret;
9569 #endif
9570     case TARGET_NR_rt_sigprocmask:
9571         {
9572             int how = arg1;
9573             sigset_t set, oldset, *set_ptr;
9574 
9575             if (arg4 != sizeof(target_sigset_t)) {
9576                 return -TARGET_EINVAL;
9577             }
9578 
9579             if (arg2) {
9580                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9581                 if (!p) {
9582                     return -TARGET_EFAULT;
9583                 }
9584                 target_to_host_sigset(&set, p);
9585                 unlock_user(p, arg2, 0);
9586                 set_ptr = &set;
9587                 switch(how) {
9588                 case TARGET_SIG_BLOCK:
9589                     how = SIG_BLOCK;
9590                     break;
9591                 case TARGET_SIG_UNBLOCK:
9592                     how = SIG_UNBLOCK;
9593                     break;
9594                 case TARGET_SIG_SETMASK:
9595                     how = SIG_SETMASK;
9596                     break;
9597                 default:
9598                     return -TARGET_EINVAL;
9599                 }
9600             } else {
9601                 how = 0;
9602                 set_ptr = NULL;
9603             }
9604             ret = do_sigprocmask(how, set_ptr, &oldset);
9605             if (!is_error(ret) && arg3) {
9606                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9607                     return -TARGET_EFAULT;
9608                 host_to_target_sigset(p, &oldset);
9609                 unlock_user(p, arg3, sizeof(target_sigset_t));
9610             }
9611         }
9612         return ret;
9613 #ifdef TARGET_NR_sigpending
9614     case TARGET_NR_sigpending:
9615         {
9616             sigset_t set;
9617             ret = get_errno(sigpending(&set));
9618             if (!is_error(ret)) {
9619                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9620                     return -TARGET_EFAULT;
9621                 host_to_target_old_sigset(p, &set);
9622                 unlock_user(p, arg1, sizeof(target_sigset_t));
9623             }
9624         }
9625         return ret;
9626 #endif
9627     case TARGET_NR_rt_sigpending:
9628         {
9629             sigset_t set;
9630 
9631             /* Yes, this check is >, not != like most. We follow the kernel's
9632              * logic and it does it like this because it implements
9633              * NR_sigpending through the same code path, and in that case
9634              * the old_sigset_t is smaller in size.
9635              */
9636             if (arg2 > sizeof(target_sigset_t)) {
9637                 return -TARGET_EINVAL;
9638             }
9639 
9640             ret = get_errno(sigpending(&set));
9641             if (!is_error(ret)) {
9642                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9643                     return -TARGET_EFAULT;
9644                 host_to_target_sigset(p, &set);
9645                 unlock_user(p, arg1, sizeof(target_sigset_t));
9646             }
9647         }
9648         return ret;
9649 #ifdef TARGET_NR_sigsuspend
9650     case TARGET_NR_sigsuspend:
9651         {
9652             sigset_t *set;
9653 
9654 #if defined(TARGET_ALPHA)
9655             TaskState *ts = cpu->opaque;
9656             /* target_to_host_old_sigset will bswap back */
9657             abi_ulong mask = tswapal(arg1);
9658             set = &ts->sigsuspend_mask;
9659             target_to_host_old_sigset(set, &mask);
9660 #else
9661             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9662             if (ret != 0) {
9663                 return ret;
9664             }
9665 #endif
9666             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9667             finish_sigsuspend_mask(ret);
9668         }
9669         return ret;
9670 #endif
9671     case TARGET_NR_rt_sigsuspend:
9672         {
9673             sigset_t *set;
9674 
9675             ret = process_sigsuspend_mask(&set, arg1, arg2);
9676             if (ret != 0) {
9677                 return ret;
9678             }
9679             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9680             finish_sigsuspend_mask(ret);
9681         }
9682         return ret;
9683 #ifdef TARGET_NR_rt_sigtimedwait
9684     case TARGET_NR_rt_sigtimedwait:
9685         {
9686             sigset_t set;
9687             struct timespec uts, *puts;
9688             siginfo_t uinfo;
9689 
9690             if (arg4 != sizeof(target_sigset_t)) {
9691                 return -TARGET_EINVAL;
9692             }
9693 
9694             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9695                 return -TARGET_EFAULT;
9696             target_to_host_sigset(&set, p);
9697             unlock_user(p, arg1, 0);
9698             if (arg3) {
9699                 puts = &uts;
9700                 if (target_to_host_timespec(puts, arg3)) {
9701                     return -TARGET_EFAULT;
9702                 }
9703             } else {
9704                 puts = NULL;
9705             }
9706             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9707                                                  SIGSET_T_SIZE));
9708             if (!is_error(ret)) {
9709                 if (arg2) {
9710                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9711                                   0);
9712                     if (!p) {
9713                         return -TARGET_EFAULT;
9714                     }
9715                     host_to_target_siginfo(p, &uinfo);
9716                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9717                 }
9718                 ret = host_to_target_signal(ret);
9719             }
9720         }
9721         return ret;
9722 #endif
9723 #ifdef TARGET_NR_rt_sigtimedwait_time64
9724     case TARGET_NR_rt_sigtimedwait_time64:
9725         {
9726             sigset_t set;
9727             struct timespec uts, *puts;
9728             siginfo_t uinfo;
9729 
9730             if (arg4 != sizeof(target_sigset_t)) {
9731                 return -TARGET_EINVAL;
9732             }
9733 
9734             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9735             if (!p) {
9736                 return -TARGET_EFAULT;
9737             }
9738             target_to_host_sigset(&set, p);
9739             unlock_user(p, arg1, 0);
9740             if (arg3) {
9741                 puts = &uts;
9742                 if (target_to_host_timespec64(puts, arg3)) {
9743                     return -TARGET_EFAULT;
9744                 }
9745             } else {
9746                 puts = NULL;
9747             }
9748             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9749                                                  SIGSET_T_SIZE));
9750             if (!is_error(ret)) {
9751                 if (arg2) {
9752                     p = lock_user(VERIFY_WRITE, arg2,
9753                                   sizeof(target_siginfo_t), 0);
9754                     if (!p) {
9755                         return -TARGET_EFAULT;
9756                     }
9757                     host_to_target_siginfo(p, &uinfo);
9758                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9759                 }
9760                 ret = host_to_target_signal(ret);
9761             }
9762         }
9763         return ret;
9764 #endif
9765     case TARGET_NR_rt_sigqueueinfo:
9766         {
9767             siginfo_t uinfo;
9768 
9769             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9770             if (!p) {
9771                 return -TARGET_EFAULT;
9772             }
9773             target_to_host_siginfo(&uinfo, p);
9774             unlock_user(p, arg3, 0);
9775             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9776         }
9777         return ret;
9778     case TARGET_NR_rt_tgsigqueueinfo:
9779         {
9780             siginfo_t uinfo;
9781 
9782             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9783             if (!p) {
9784                 return -TARGET_EFAULT;
9785             }
9786             target_to_host_siginfo(&uinfo, p);
9787             unlock_user(p, arg4, 0);
9788             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9789         }
9790         return ret;
9791 #ifdef TARGET_NR_sigreturn
9792     case TARGET_NR_sigreturn:
9793         if (block_signals()) {
9794             return -QEMU_ERESTARTSYS;
9795         }
9796         return do_sigreturn(cpu_env);
9797 #endif
9798     case TARGET_NR_rt_sigreturn:
9799         if (block_signals()) {
9800             return -QEMU_ERESTARTSYS;
9801         }
9802         return do_rt_sigreturn(cpu_env);
9803     case TARGET_NR_sethostname:
9804         if (!(p = lock_user_string(arg1)))
9805             return -TARGET_EFAULT;
9806         ret = get_errno(sethostname(p, arg2));
9807         unlock_user(p, arg1, 0);
9808         return ret;
9809 #ifdef TARGET_NR_setrlimit
9810     case TARGET_NR_setrlimit:
9811         {
9812             int resource = target_to_host_resource(arg1);
9813             struct target_rlimit *target_rlim;
9814             struct rlimit rlim;
9815             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9816                 return -TARGET_EFAULT;
9817             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9818             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9819             unlock_user_struct(target_rlim, arg2, 0);
9820             /*
9821              * If we just passed through resource limit settings for memory then
9822              * they would also apply to QEMU's own allocations, and QEMU will
9823              * crash or hang or die if its allocations fail. Ideally we would
9824              * track the guest allocations in QEMU and apply the limits ourselves.
9825              * For now, just tell the guest the call succeeded but don't actually
9826              * limit anything.
9827              */
9828             if (resource != RLIMIT_AS &&
9829                 resource != RLIMIT_DATA &&
9830                 resource != RLIMIT_STACK) {
9831                 return get_errno(setrlimit(resource, &rlim));
9832             } else {
9833                 return 0;
9834             }
9835         }
9836 #endif
9837 #ifdef TARGET_NR_getrlimit
9838     case TARGET_NR_getrlimit:
9839         {
9840             int resource = target_to_host_resource(arg1);
9841             struct target_rlimit *target_rlim;
9842             struct rlimit rlim;
9843 
9844             ret = get_errno(getrlimit(resource, &rlim));
9845             if (!is_error(ret)) {
9846                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9847                     return -TARGET_EFAULT;
9848                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9849                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9850                 unlock_user_struct(target_rlim, arg2, 1);
9851             }
9852         }
9853         return ret;
9854 #endif
9855     case TARGET_NR_getrusage:
9856         {
9857             struct rusage rusage;
9858             ret = get_errno(getrusage(arg1, &rusage));
9859             if (!is_error(ret)) {
9860                 ret = host_to_target_rusage(arg2, &rusage);
9861             }
9862         }
9863         return ret;
9864 #if defined(TARGET_NR_gettimeofday)
9865     case TARGET_NR_gettimeofday:
9866         {
9867             struct timeval tv;
9868             struct timezone tz;
9869 
9870             ret = get_errno(gettimeofday(&tv, &tz));
9871             if (!is_error(ret)) {
9872                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9873                     return -TARGET_EFAULT;
9874                 }
9875                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9876                     return -TARGET_EFAULT;
9877                 }
9878             }
9879         }
9880         return ret;
9881 #endif
9882 #if defined(TARGET_NR_settimeofday)
9883     case TARGET_NR_settimeofday:
9884         {
9885             struct timeval tv, *ptv = NULL;
9886             struct timezone tz, *ptz = NULL;
9887 
9888             if (arg1) {
9889                 if (copy_from_user_timeval(&tv, arg1)) {
9890                     return -TARGET_EFAULT;
9891                 }
9892                 ptv = &tv;
9893             }
9894 
9895             if (arg2) {
9896                 if (copy_from_user_timezone(&tz, arg2)) {
9897                     return -TARGET_EFAULT;
9898                 }
9899                 ptz = &tz;
9900             }
9901 
9902             return get_errno(settimeofday(ptv, ptz));
9903         }
9904 #endif
9905 #if defined(TARGET_NR_select)
9906     case TARGET_NR_select:
9907 #if defined(TARGET_WANT_NI_OLD_SELECT)
9908         /* some architectures used to have old_select here
9909          * but now ENOSYS it.
9910          */
9911         ret = -TARGET_ENOSYS;
9912 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9913         ret = do_old_select(arg1);
9914 #else
9915         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9916 #endif
9917         return ret;
9918 #endif
9919 #ifdef TARGET_NR_pselect6
9920     case TARGET_NR_pselect6:
9921         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9922 #endif
9923 #ifdef TARGET_NR_pselect6_time64
9924     case TARGET_NR_pselect6_time64:
9925         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9926 #endif
9927 #ifdef TARGET_NR_symlink
9928     case TARGET_NR_symlink:
9929         {
9930             void *p2;
9931             p = lock_user_string(arg1);
9932             p2 = lock_user_string(arg2);
9933             if (!p || !p2)
9934                 ret = -TARGET_EFAULT;
9935             else
9936                 ret = get_errno(symlink(p, p2));
9937             unlock_user(p2, arg2, 0);
9938             unlock_user(p, arg1, 0);
9939         }
9940         return ret;
9941 #endif
9942 #if defined(TARGET_NR_symlinkat)
9943     case TARGET_NR_symlinkat:
9944         {
9945             void *p2;
9946             p  = lock_user_string(arg1);
9947             p2 = lock_user_string(arg3);
9948             if (!p || !p2)
9949                 ret = -TARGET_EFAULT;
9950             else
9951                 ret = get_errno(symlinkat(p, arg2, p2));
9952             unlock_user(p2, arg3, 0);
9953             unlock_user(p, arg1, 0);
9954         }
9955         return ret;
9956 #endif
9957 #ifdef TARGET_NR_readlink
9958     case TARGET_NR_readlink:
9959         {
9960             void *p2;
9961             p = lock_user_string(arg1);
9962             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9963             if (!p || !p2) {
9964                 ret = -TARGET_EFAULT;
9965             } else if (!arg3) {
9966                 /* Short circuit this for the magic exe check. */
9967                 ret = -TARGET_EINVAL;
9968             } else if (is_proc_myself((const char *)p, "exe")) {
9969                 char real[PATH_MAX], *temp;
9970                 temp = realpath(exec_path, real);
9971                 /* Return value is # of bytes that we wrote to the buffer. */
9972                 if (temp == NULL) {
9973                     ret = get_errno(-1);
9974                 } else {
9975                     /* Don't worry about sign mismatch as earlier mapping
9976                      * logic would have thrown a bad address error. */
9977                     ret = MIN(strlen(real), arg3);
9978                     /* We cannot NUL terminate the string. */
9979                     memcpy(p2, real, ret);
9980                 }
9981             } else {
9982                 ret = get_errno(readlink(path(p), p2, arg3));
9983             }
9984             unlock_user(p2, arg2, ret);
9985             unlock_user(p, arg1, 0);
9986         }
9987         return ret;
9988 #endif
9989 #if defined(TARGET_NR_readlinkat)
9990     case TARGET_NR_readlinkat:
9991         {
9992             void *p2;
9993             p  = lock_user_string(arg2);
9994             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9995             if (!p || !p2) {
9996                 ret = -TARGET_EFAULT;
9997             } else if (!arg4) {
9998                 /* Short circuit this for the magic exe check. */
9999                 ret = -TARGET_EINVAL;
10000             } else if (is_proc_myself((const char *)p, "exe")) {
10001                 char real[PATH_MAX], *temp;
10002                 temp = realpath(exec_path, real);
10003                 /* Return value is # of bytes that we wrote to the buffer. */
10004                 if (temp == NULL) {
10005                     ret = get_errno(-1);
10006                 } else {
10007                     /* Don't worry about sign mismatch as earlier mapping
10008                      * logic would have thrown a bad address error. */
10009                     ret = MIN(strlen(real), arg4);
10010                     /* We cannot NUL terminate the string. */
10011                     memcpy(p2, real, ret);
10012                 }
10013             } else {
10014                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10015             }
10016             unlock_user(p2, arg3, ret);
10017             unlock_user(p, arg2, 0);
10018         }
10019         return ret;
10020 #endif
10021 #ifdef TARGET_NR_swapon
10022     case TARGET_NR_swapon:
10023         if (!(p = lock_user_string(arg1)))
10024             return -TARGET_EFAULT;
10025         ret = get_errno(swapon(p, arg2));
10026         unlock_user(p, arg1, 0);
10027         return ret;
10028 #endif
10029     case TARGET_NR_reboot:
10030         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10031            /* arg4 must be ignored in all other cases */
10032            p = lock_user_string(arg4);
10033            if (!p) {
10034                return -TARGET_EFAULT;
10035            }
10036            ret = get_errno(reboot(arg1, arg2, arg3, p));
10037            unlock_user(p, arg4, 0);
10038         } else {
10039            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10040         }
10041         return ret;
10042 #ifdef TARGET_NR_mmap
10043     case TARGET_NR_mmap:
10044 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10045     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10046     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10047     || defined(TARGET_S390X)
10048         {
10049             abi_ulong *v;
10050             abi_ulong v1, v2, v3, v4, v5, v6;
10051             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10052                 return -TARGET_EFAULT;
10053             v1 = tswapal(v[0]);
10054             v2 = tswapal(v[1]);
10055             v3 = tswapal(v[2]);
10056             v4 = tswapal(v[3]);
10057             v5 = tswapal(v[4]);
10058             v6 = tswapal(v[5]);
10059             unlock_user(v, arg1, 0);
10060             ret = get_errno(target_mmap(v1, v2, v3,
10061                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10062                                         v5, v6));
10063         }
10064 #else
10065         /* mmap pointers are always untagged */
10066         ret = get_errno(target_mmap(arg1, arg2, arg3,
10067                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10068                                     arg5,
10069                                     arg6));
10070 #endif
10071         return ret;
10072 #endif
10073 #ifdef TARGET_NR_mmap2
10074     case TARGET_NR_mmap2:
10075 #ifndef MMAP_SHIFT
10076 #define MMAP_SHIFT 12
10077 #endif
10078         ret = target_mmap(arg1, arg2, arg3,
10079                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10080                           arg5, arg6 << MMAP_SHIFT);
10081         return get_errno(ret);
10082 #endif
10083     case TARGET_NR_munmap:
10084         arg1 = cpu_untagged_addr(cpu, arg1);
10085         return get_errno(target_munmap(arg1, arg2));
10086     case TARGET_NR_mprotect:
10087         arg1 = cpu_untagged_addr(cpu, arg1);
10088         {
10089             TaskState *ts = cpu->opaque;
10090             /* Special hack to detect libc making the stack executable.  */
10091             if ((arg3 & PROT_GROWSDOWN)
10092                 && arg1 >= ts->info->stack_limit
10093                 && arg1 <= ts->info->start_stack) {
10094                 arg3 &= ~PROT_GROWSDOWN;
10095                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10096                 arg1 = ts->info->stack_limit;
10097             }
10098         }
10099         return get_errno(target_mprotect(arg1, arg2, arg3));
10100 #ifdef TARGET_NR_mremap
10101     case TARGET_NR_mremap:
10102         arg1 = cpu_untagged_addr(cpu, arg1);
10103         /* mremap new_addr (arg5) is always untagged */
10104         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10105 #endif
10106         /* ??? msync/mlock/munlock are broken for softmmu.  */
10107 #ifdef TARGET_NR_msync
10108     case TARGET_NR_msync:
10109         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10110 #endif
10111 #ifdef TARGET_NR_mlock
10112     case TARGET_NR_mlock:
10113         return get_errno(mlock(g2h(cpu, arg1), arg2));
10114 #endif
10115 #ifdef TARGET_NR_munlock
10116     case TARGET_NR_munlock:
10117         return get_errno(munlock(g2h(cpu, arg1), arg2));
10118 #endif
10119 #ifdef TARGET_NR_mlockall
10120     case TARGET_NR_mlockall:
10121         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10122 #endif
10123 #ifdef TARGET_NR_munlockall
10124     case TARGET_NR_munlockall:
10125         return get_errno(munlockall());
10126 #endif
10127 #ifdef TARGET_NR_truncate
10128     case TARGET_NR_truncate:
10129         if (!(p = lock_user_string(arg1)))
10130             return -TARGET_EFAULT;
10131         ret = get_errno(truncate(p, arg2));
10132         unlock_user(p, arg1, 0);
10133         return ret;
10134 #endif
10135 #ifdef TARGET_NR_ftruncate
10136     case TARGET_NR_ftruncate:
10137         return get_errno(ftruncate(arg1, arg2));
10138 #endif
10139     case TARGET_NR_fchmod:
10140         return get_errno(fchmod(arg1, arg2));
10141 #if defined(TARGET_NR_fchmodat)
10142     case TARGET_NR_fchmodat:
10143         if (!(p = lock_user_string(arg2)))
10144             return -TARGET_EFAULT;
10145         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10146         unlock_user(p, arg2, 0);
10147         return ret;
10148 #endif
10149     case TARGET_NR_getpriority:
10150         /* Note that negative values are valid for getpriority, so we must
10151            differentiate based on errno settings.  */
10152         errno = 0;
10153         ret = getpriority(arg1, arg2);
10154         if (ret == -1 && errno != 0) {
10155             return -host_to_target_errno(errno);
10156         }
10157 #ifdef TARGET_ALPHA
10158         /* Return value is the unbiased priority.  Signal no error.  */
10159         cpu_env->ir[IR_V0] = 0;
10160 #else
10161         /* Return value is a biased priority to avoid negative numbers.  */
10162         ret = 20 - ret;
10163 #endif
10164         return ret;
10165     case TARGET_NR_setpriority:
10166         return get_errno(setpriority(arg1, arg2, arg3));
10167 #ifdef TARGET_NR_statfs
10168     case TARGET_NR_statfs:
10169         if (!(p = lock_user_string(arg1))) {
10170             return -TARGET_EFAULT;
10171         }
10172         ret = get_errno(statfs(path(p), &stfs));
10173         unlock_user(p, arg1, 0);
10174     convert_statfs:
10175         if (!is_error(ret)) {
10176             struct target_statfs *target_stfs;
10177 
10178             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10179                 return -TARGET_EFAULT;
10180             __put_user(stfs.f_type, &target_stfs->f_type);
10181             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10182             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10183             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10184             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10185             __put_user(stfs.f_files, &target_stfs->f_files);
10186             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10187             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10188             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10189             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10190             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10191 #ifdef _STATFS_F_FLAGS
10192             __put_user(stfs.f_flags, &target_stfs->f_flags);
10193 #else
10194             __put_user(0, &target_stfs->f_flags);
10195 #endif
10196             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10197             unlock_user_struct(target_stfs, arg2, 1);
10198         }
10199         return ret;
10200 #endif
10201 #ifdef TARGET_NR_fstatfs
10202     case TARGET_NR_fstatfs:
10203         ret = get_errno(fstatfs(arg1, &stfs));
10204         goto convert_statfs;
10205 #endif
10206 #ifdef TARGET_NR_statfs64
10207     case TARGET_NR_statfs64:
10208         if (!(p = lock_user_string(arg1))) {
10209             return -TARGET_EFAULT;
10210         }
10211         ret = get_errno(statfs(path(p), &stfs));
10212         unlock_user(p, arg1, 0);
10213     convert_statfs64:
10214         if (!is_error(ret)) {
10215             struct target_statfs64 *target_stfs;
10216 
10217             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10218                 return -TARGET_EFAULT;
10219             __put_user(stfs.f_type, &target_stfs->f_type);
10220             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10221             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10222             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10223             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10224             __put_user(stfs.f_files, &target_stfs->f_files);
10225             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10226             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10227             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10228             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10229             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10230 #ifdef _STATFS_F_FLAGS
10231             __put_user(stfs.f_flags, &target_stfs->f_flags);
10232 #else
10233             __put_user(0, &target_stfs->f_flags);
10234 #endif
10235             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10236             unlock_user_struct(target_stfs, arg3, 1);
10237         }
10238         return ret;
10239     case TARGET_NR_fstatfs64:
10240         ret = get_errno(fstatfs(arg1, &stfs));
10241         goto convert_statfs64;
10242 #endif
10243 #ifdef TARGET_NR_socketcall
10244     case TARGET_NR_socketcall:
10245         return do_socketcall(arg1, arg2);
10246 #endif
10247 #ifdef TARGET_NR_accept
10248     case TARGET_NR_accept:
10249         return do_accept4(arg1, arg2, arg3, 0);
10250 #endif
10251 #ifdef TARGET_NR_accept4
10252     case TARGET_NR_accept4:
10253         return do_accept4(arg1, arg2, arg3, arg4);
10254 #endif
10255 #ifdef TARGET_NR_bind
10256     case TARGET_NR_bind:
10257         return do_bind(arg1, arg2, arg3);
10258 #endif
10259 #ifdef TARGET_NR_connect
10260     case TARGET_NR_connect:
10261         return do_connect(arg1, arg2, arg3);
10262 #endif
10263 #ifdef TARGET_NR_getpeername
10264     case TARGET_NR_getpeername:
10265         return do_getpeername(arg1, arg2, arg3);
10266 #endif
10267 #ifdef TARGET_NR_getsockname
10268     case TARGET_NR_getsockname:
10269         return do_getsockname(arg1, arg2, arg3);
10270 #endif
10271 #ifdef TARGET_NR_getsockopt
10272     case TARGET_NR_getsockopt:
10273         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10274 #endif
10275 #ifdef TARGET_NR_listen
10276     case TARGET_NR_listen:
10277         return get_errno(listen(arg1, arg2));
10278 #endif
10279 #ifdef TARGET_NR_recv
10280     case TARGET_NR_recv:
10281         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10282 #endif
10283 #ifdef TARGET_NR_recvfrom
10284     case TARGET_NR_recvfrom:
10285         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10286 #endif
10287 #ifdef TARGET_NR_recvmsg
10288     case TARGET_NR_recvmsg:
10289         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10290 #endif
10291 #ifdef TARGET_NR_send
10292     case TARGET_NR_send:
10293         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10294 #endif
10295 #ifdef TARGET_NR_sendmsg
10296     case TARGET_NR_sendmsg:
10297         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10298 #endif
10299 #ifdef TARGET_NR_sendmmsg
10300     case TARGET_NR_sendmmsg:
10301         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10302 #endif
10303 #ifdef TARGET_NR_recvmmsg
10304     case TARGET_NR_recvmmsg:
10305         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10306 #endif
10307 #ifdef TARGET_NR_sendto
10308     case TARGET_NR_sendto:
10309         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10310 #endif
10311 #ifdef TARGET_NR_shutdown
10312     case TARGET_NR_shutdown:
10313         return get_errno(shutdown(arg1, arg2));
10314 #endif
10315 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10316     case TARGET_NR_getrandom:
10317         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10318         if (!p) {
10319             return -TARGET_EFAULT;
10320         }
10321         ret = get_errno(getrandom(p, arg2, arg3));
10322         unlock_user(p, arg1, ret);
10323         return ret;
10324 #endif
10325 #ifdef TARGET_NR_socket
10326     case TARGET_NR_socket:
10327         return do_socket(arg1, arg2, arg3);
10328 #endif
10329 #ifdef TARGET_NR_socketpair
10330     case TARGET_NR_socketpair:
10331         return do_socketpair(arg1, arg2, arg3, arg4);
10332 #endif
10333 #ifdef TARGET_NR_setsockopt
10334     case TARGET_NR_setsockopt:
10335         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10336 #endif
10337 #if defined(TARGET_NR_syslog)
10338     case TARGET_NR_syslog:
10339         {
10340             int len = arg2;
10341 
10342             switch (arg1) {
10343             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10344             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10345             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10346             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10347             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10348             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10349             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10350             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10351                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10352             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10353             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10354             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10355                 {
10356                     if (len < 0) {
10357                         return -TARGET_EINVAL;
10358                     }
10359                     if (len == 0) {
10360                         return 0;
10361                     }
10362                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10363                     if (!p) {
10364                         return -TARGET_EFAULT;
10365                     }
10366                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10367                     unlock_user(p, arg2, arg3);
10368                 }
10369                 return ret;
10370             default:
10371                 return -TARGET_EINVAL;
10372             }
10373         }
10374         break;
10375 #endif
10376     case TARGET_NR_setitimer:
10377         {
10378             struct itimerval value, ovalue, *pvalue;
10379 
10380             if (arg2) {
10381                 pvalue = &value;
10382                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10383                     || copy_from_user_timeval(&pvalue->it_value,
10384                                               arg2 + sizeof(struct target_timeval)))
10385                     return -TARGET_EFAULT;
10386             } else {
10387                 pvalue = NULL;
10388             }
10389             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10390             if (!is_error(ret) && arg3) {
10391                 if (copy_to_user_timeval(arg3,
10392                                          &ovalue.it_interval)
10393                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10394                                             &ovalue.it_value))
10395                     return -TARGET_EFAULT;
10396             }
10397         }
10398         return ret;
10399     case TARGET_NR_getitimer:
10400         {
10401             struct itimerval value;
10402 
10403             ret = get_errno(getitimer(arg1, &value));
10404             if (!is_error(ret) && arg2) {
10405                 if (copy_to_user_timeval(arg2,
10406                                          &value.it_interval)
10407                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10408                                             &value.it_value))
10409                     return -TARGET_EFAULT;
10410             }
10411         }
10412         return ret;
10413 #ifdef TARGET_NR_stat
10414     case TARGET_NR_stat:
10415         if (!(p = lock_user_string(arg1))) {
10416             return -TARGET_EFAULT;
10417         }
10418         ret = get_errno(stat(path(p), &st));
10419         unlock_user(p, arg1, 0);
10420         goto do_stat;
10421 #endif
10422 #ifdef TARGET_NR_lstat
10423     case TARGET_NR_lstat:
10424         if (!(p = lock_user_string(arg1))) {
10425             return -TARGET_EFAULT;
10426         }
10427         ret = get_errno(lstat(path(p), &st));
10428         unlock_user(p, arg1, 0);
10429         goto do_stat;
10430 #endif
10431 #ifdef TARGET_NR_fstat
10432     case TARGET_NR_fstat:
10433         {
10434             ret = get_errno(fstat(arg1, &st));
10435 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10436         do_stat:
10437 #endif
10438             if (!is_error(ret)) {
10439                 struct target_stat *target_st;
10440 
10441                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10442                     return -TARGET_EFAULT;
10443                 memset(target_st, 0, sizeof(*target_st));
10444                 __put_user(st.st_dev, &target_st->st_dev);
10445                 __put_user(st.st_ino, &target_st->st_ino);
10446                 __put_user(st.st_mode, &target_st->st_mode);
10447                 __put_user(st.st_uid, &target_st->st_uid);
10448                 __put_user(st.st_gid, &target_st->st_gid);
10449                 __put_user(st.st_nlink, &target_st->st_nlink);
10450                 __put_user(st.st_rdev, &target_st->st_rdev);
10451                 __put_user(st.st_size, &target_st->st_size);
10452                 __put_user(st.st_blksize, &target_st->st_blksize);
10453                 __put_user(st.st_blocks, &target_st->st_blocks);
10454                 __put_user(st.st_atime, &target_st->target_st_atime);
10455                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10456                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10457 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10458                 __put_user(st.st_atim.tv_nsec,
10459                            &target_st->target_st_atime_nsec);
10460                 __put_user(st.st_mtim.tv_nsec,
10461                            &target_st->target_st_mtime_nsec);
10462                 __put_user(st.st_ctim.tv_nsec,
10463                            &target_st->target_st_ctime_nsec);
10464 #endif
10465                 unlock_user_struct(target_st, arg2, 1);
10466             }
10467         }
10468         return ret;
10469 #endif
10470     case TARGET_NR_vhangup:
10471         return get_errno(vhangup());
10472 #ifdef TARGET_NR_syscall
10473     case TARGET_NR_syscall:
10474         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10475                           arg6, arg7, arg8, 0);
10476 #endif
10477 #if defined(TARGET_NR_wait4)
10478     case TARGET_NR_wait4:
10479         {
10480             int status;
10481             abi_long status_ptr = arg2;
10482             struct rusage rusage, *rusage_ptr;
10483             abi_ulong target_rusage = arg4;
10484             abi_long rusage_err;
10485             if (target_rusage)
10486                 rusage_ptr = &rusage;
10487             else
10488                 rusage_ptr = NULL;
10489             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10490             if (!is_error(ret)) {
10491                 if (status_ptr && ret) {
10492                     status = host_to_target_waitstatus(status);
10493                     if (put_user_s32(status, status_ptr))
10494                         return -TARGET_EFAULT;
10495                 }
10496                 if (target_rusage) {
10497                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10498                     if (rusage_err) {
10499                         ret = rusage_err;
10500                     }
10501                 }
10502             }
10503         }
10504         return ret;
10505 #endif
10506 #ifdef TARGET_NR_swapoff
10507     case TARGET_NR_swapoff:
10508         if (!(p = lock_user_string(arg1)))
10509             return -TARGET_EFAULT;
10510         ret = get_errno(swapoff(p));
10511         unlock_user(p, arg1, 0);
10512         return ret;
10513 #endif
10514     case TARGET_NR_sysinfo:
10515         {
10516             struct target_sysinfo *target_value;
10517             struct sysinfo value;
10518             ret = get_errno(sysinfo(&value));
10519             if (!is_error(ret) && arg1)
10520             {
10521                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10522                     return -TARGET_EFAULT;
10523                 __put_user(value.uptime, &target_value->uptime);
10524                 __put_user(value.loads[0], &target_value->loads[0]);
10525                 __put_user(value.loads[1], &target_value->loads[1]);
10526                 __put_user(value.loads[2], &target_value->loads[2]);
10527                 __put_user(value.totalram, &target_value->totalram);
10528                 __put_user(value.freeram, &target_value->freeram);
10529                 __put_user(value.sharedram, &target_value->sharedram);
10530                 __put_user(value.bufferram, &target_value->bufferram);
10531                 __put_user(value.totalswap, &target_value->totalswap);
10532                 __put_user(value.freeswap, &target_value->freeswap);
10533                 __put_user(value.procs, &target_value->procs);
10534                 __put_user(value.totalhigh, &target_value->totalhigh);
10535                 __put_user(value.freehigh, &target_value->freehigh);
10536                 __put_user(value.mem_unit, &target_value->mem_unit);
10537                 unlock_user_struct(target_value, arg1, 1);
10538             }
10539         }
10540         return ret;
10541 #ifdef TARGET_NR_ipc
10542     case TARGET_NR_ipc:
10543         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10544 #endif
10545 #ifdef TARGET_NR_semget
10546     case TARGET_NR_semget:
10547         return get_errno(semget(arg1, arg2, arg3));
10548 #endif
10549 #ifdef TARGET_NR_semop
10550     case TARGET_NR_semop:
10551         return do_semtimedop(arg1, arg2, arg3, 0, false);
10552 #endif
10553 #ifdef TARGET_NR_semtimedop
10554     case TARGET_NR_semtimedop:
10555         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10556 #endif
10557 #ifdef TARGET_NR_semtimedop_time64
10558     case TARGET_NR_semtimedop_time64:
10559         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10560 #endif
10561 #ifdef TARGET_NR_semctl
10562     case TARGET_NR_semctl:
10563         return do_semctl(arg1, arg2, arg3, arg4);
10564 #endif
10565 #ifdef TARGET_NR_msgctl
10566     case TARGET_NR_msgctl:
10567         return do_msgctl(arg1, arg2, arg3);
10568 #endif
10569 #ifdef TARGET_NR_msgget
10570     case TARGET_NR_msgget:
10571         return get_errno(msgget(arg1, arg2));
10572 #endif
10573 #ifdef TARGET_NR_msgrcv
10574     case TARGET_NR_msgrcv:
10575         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10576 #endif
10577 #ifdef TARGET_NR_msgsnd
10578     case TARGET_NR_msgsnd:
10579         return do_msgsnd(arg1, arg2, arg3, arg4);
10580 #endif
10581 #ifdef TARGET_NR_shmget
10582     case TARGET_NR_shmget:
10583         return get_errno(shmget(arg1, arg2, arg3));
10584 #endif
10585 #ifdef TARGET_NR_shmctl
10586     case TARGET_NR_shmctl:
10587         return do_shmctl(arg1, arg2, arg3);
10588 #endif
10589 #ifdef TARGET_NR_shmat
10590     case TARGET_NR_shmat:
10591         return do_shmat(cpu_env, arg1, arg2, arg3);
10592 #endif
10593 #ifdef TARGET_NR_shmdt
10594     case TARGET_NR_shmdt:
10595         return do_shmdt(arg1);
10596 #endif
10597     case TARGET_NR_fsync:
10598         return get_errno(fsync(arg1));
10599     case TARGET_NR_clone:
10600         /* Linux manages to have three different orderings for its
10601          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10602          * match the kernel's CONFIG_CLONE_* settings.
10603          * Microblaze is further special in that it uses a sixth
10604          * implicit argument to clone for the TLS pointer.
10605          */
10606 #if defined(TARGET_MICROBLAZE)
10607         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10608 #elif defined(TARGET_CLONE_BACKWARDS)
10609         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10610 #elif defined(TARGET_CLONE_BACKWARDS2)
10611         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10612 #else
10613         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10614 #endif
10615         return ret;
10616 #ifdef __NR_exit_group
10617         /* new thread calls */
10618     case TARGET_NR_exit_group:
10619         preexit_cleanup(cpu_env, arg1);
10620         return get_errno(exit_group(arg1));
10621 #endif
10622     case TARGET_NR_setdomainname:
10623         if (!(p = lock_user_string(arg1)))
10624             return -TARGET_EFAULT;
10625         ret = get_errno(setdomainname(p, arg2));
10626         unlock_user(p, arg1, 0);
10627         return ret;
10628     case TARGET_NR_uname:
10629         /* no need to transcode because we use the linux syscall */
10630         {
10631             struct new_utsname * buf;
10632 
10633             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10634                 return -TARGET_EFAULT;
10635             ret = get_errno(sys_uname(buf));
10636             if (!is_error(ret)) {
10637                 /* Overwrite the native machine name with whatever is being
10638                    emulated. */
10639                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10640                           sizeof(buf->machine));
10641                 /* Allow the user to override the reported release.  */
10642                 if (qemu_uname_release && *qemu_uname_release) {
10643                     g_strlcpy(buf->release, qemu_uname_release,
10644                               sizeof(buf->release));
10645                 }
10646             }
10647             unlock_user_struct(buf, arg1, 1);
10648         }
10649         return ret;
10650 #ifdef TARGET_I386
10651     case TARGET_NR_modify_ldt:
10652         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10653 #if !defined(TARGET_X86_64)
10654     case TARGET_NR_vm86:
10655         return do_vm86(cpu_env, arg1, arg2);
10656 #endif
10657 #endif
10658 #if defined(TARGET_NR_adjtimex)
10659     case TARGET_NR_adjtimex:
10660         {
10661             struct timex host_buf;
10662 
10663             if (target_to_host_timex(&host_buf, arg1) != 0) {
10664                 return -TARGET_EFAULT;
10665             }
10666             ret = get_errno(adjtimex(&host_buf));
10667             if (!is_error(ret)) {
10668                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10669                     return -TARGET_EFAULT;
10670                 }
10671             }
10672         }
10673         return ret;
10674 #endif
10675 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10676     case TARGET_NR_clock_adjtime:
10677         {
10678             struct timex htx, *phtx = &htx;
10679 
10680             if (target_to_host_timex(phtx, arg2) != 0) {
10681                 return -TARGET_EFAULT;
10682             }
10683             ret = get_errno(clock_adjtime(arg1, phtx));
10684             if (!is_error(ret) && phtx) {
10685                 if (host_to_target_timex(arg2, phtx) != 0) {
10686                     return -TARGET_EFAULT;
10687                 }
10688             }
10689         }
10690         return ret;
10691 #endif
10692 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10693     case TARGET_NR_clock_adjtime64:
10694         {
10695             struct timex htx;
10696 
10697             if (target_to_host_timex64(&htx, arg2) != 0) {
10698                 return -TARGET_EFAULT;
10699             }
10700             ret = get_errno(clock_adjtime(arg1, &htx));
10701             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10702                     return -TARGET_EFAULT;
10703             }
10704         }
10705         return ret;
10706 #endif
10707     case TARGET_NR_getpgid:
10708         return get_errno(getpgid(arg1));
10709     case TARGET_NR_fchdir:
10710         return get_errno(fchdir(arg1));
10711     case TARGET_NR_personality:
10712         return get_errno(personality(arg1));
10713 #ifdef TARGET_NR__llseek /* Not on alpha */
10714     case TARGET_NR__llseek:
10715         {
10716             int64_t res;
10717 #if !defined(__NR_llseek)
10718             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10719             if (res == -1) {
10720                 ret = get_errno(res);
10721             } else {
10722                 ret = 0;
10723             }
10724 #else
10725             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10726 #endif
10727             if ((ret == 0) && put_user_s64(res, arg4)) {
10728                 return -TARGET_EFAULT;
10729             }
10730         }
10731         return ret;
10732 #endif
10733 #ifdef TARGET_NR_getdents
10734     case TARGET_NR_getdents:
10735         return do_getdents(arg1, arg2, arg3);
10736 #endif /* TARGET_NR_getdents */
10737 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10738     case TARGET_NR_getdents64:
10739         return do_getdents64(arg1, arg2, arg3);
10740 #endif /* TARGET_NR_getdents64 */
10741 #if defined(TARGET_NR__newselect)
10742     case TARGET_NR__newselect:
10743         return do_select(arg1, arg2, arg3, arg4, arg5);
10744 #endif
10745 #ifdef TARGET_NR_poll
10746     case TARGET_NR_poll:
10747         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10748 #endif
10749 #ifdef TARGET_NR_ppoll
10750     case TARGET_NR_ppoll:
10751         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10752 #endif
10753 #ifdef TARGET_NR_ppoll_time64
10754     case TARGET_NR_ppoll_time64:
10755         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10756 #endif
10757     case TARGET_NR_flock:
10758         /* NOTE: the flock constant seems to be the same for every
10759            Linux platform */
10760         return get_errno(safe_flock(arg1, arg2));
10761     case TARGET_NR_readv:
10762         {
10763             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10764             if (vec != NULL) {
10765                 ret = get_errno(safe_readv(arg1, vec, arg3));
10766                 unlock_iovec(vec, arg2, arg3, 1);
10767             } else {
10768                 ret = -host_to_target_errno(errno);
10769             }
10770         }
10771         return ret;
10772     case TARGET_NR_writev:
10773         {
10774             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10775             if (vec != NULL) {
10776                 ret = get_errno(safe_writev(arg1, vec, arg3));
10777                 unlock_iovec(vec, arg2, arg3, 0);
10778             } else {
10779                 ret = -host_to_target_errno(errno);
10780             }
10781         }
10782         return ret;
10783 #if defined(TARGET_NR_preadv)
10784     case TARGET_NR_preadv:
10785         {
10786             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10787             if (vec != NULL) {
10788                 unsigned long low, high;
10789 
10790                 target_to_host_low_high(arg4, arg5, &low, &high);
10791                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10792                 unlock_iovec(vec, arg2, arg3, 1);
10793             } else {
10794                 ret = -host_to_target_errno(errno);
10795            }
10796         }
10797         return ret;
10798 #endif
10799 #if defined(TARGET_NR_pwritev)
10800     case TARGET_NR_pwritev:
10801         {
10802             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10803             if (vec != NULL) {
10804                 unsigned long low, high;
10805 
10806                 target_to_host_low_high(arg4, arg5, &low, &high);
10807                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10808                 unlock_iovec(vec, arg2, arg3, 0);
10809             } else {
10810                 ret = -host_to_target_errno(errno);
10811            }
10812         }
10813         return ret;
10814 #endif
10815     case TARGET_NR_getsid:
10816         return get_errno(getsid(arg1));
10817 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10818     case TARGET_NR_fdatasync:
10819         return get_errno(fdatasync(arg1));
10820 #endif
10821     case TARGET_NR_sched_getaffinity:
10822         {
10823             unsigned int mask_size;
10824             unsigned long *mask;
10825 
10826             /*
10827              * sched_getaffinity needs multiples of ulong, so need to take
10828              * care of mismatches between target ulong and host ulong sizes.
10829              */
10830             if (arg2 & (sizeof(abi_ulong) - 1)) {
10831                 return -TARGET_EINVAL;
10832             }
10833             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10834 
10835             mask = alloca(mask_size);
10836             memset(mask, 0, mask_size);
10837             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10838 
10839             if (!is_error(ret)) {
10840                 if (ret > arg2) {
10841                     /* More data returned than the caller's buffer will fit.
10842                      * This only happens if sizeof(abi_long) < sizeof(long)
10843                      * and the caller passed us a buffer holding an odd number
10844                      * of abi_longs. If the host kernel is actually using the
10845                      * extra 4 bytes then fail EINVAL; otherwise we can just
10846                      * ignore them and only copy the interesting part.
10847                      */
10848                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10849                     if (numcpus > arg2 * 8) {
10850                         return -TARGET_EINVAL;
10851                     }
10852                     ret = arg2;
10853                 }
10854 
10855                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10856                     return -TARGET_EFAULT;
10857                 }
10858             }
10859         }
10860         return ret;
10861     case TARGET_NR_sched_setaffinity:
10862         {
10863             unsigned int mask_size;
10864             unsigned long *mask;
10865 
10866             /*
10867              * sched_setaffinity needs multiples of ulong, so need to take
10868              * care of mismatches between target ulong and host ulong sizes.
10869              */
10870             if (arg2 & (sizeof(abi_ulong) - 1)) {
10871                 return -TARGET_EINVAL;
10872             }
10873             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10874             mask = alloca(mask_size);
10875 
10876             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10877             if (ret) {
10878                 return ret;
10879             }
10880 
10881             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10882         }
10883     case TARGET_NR_getcpu:
10884         {
10885             unsigned cpu, node;
10886             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10887                                        arg2 ? &node : NULL,
10888                                        NULL));
10889             if (is_error(ret)) {
10890                 return ret;
10891             }
10892             if (arg1 && put_user_u32(cpu, arg1)) {
10893                 return -TARGET_EFAULT;
10894             }
10895             if (arg2 && put_user_u32(node, arg2)) {
10896                 return -TARGET_EFAULT;
10897             }
10898         }
10899         return ret;
10900     case TARGET_NR_sched_setparam:
10901         {
10902             struct target_sched_param *target_schp;
10903             struct sched_param schp;
10904 
10905             if (arg2 == 0) {
10906                 return -TARGET_EINVAL;
10907             }
10908             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10909                 return -TARGET_EFAULT;
10910             }
10911             schp.sched_priority = tswap32(target_schp->sched_priority);
10912             unlock_user_struct(target_schp, arg2, 0);
10913             return get_errno(sys_sched_setparam(arg1, &schp));
10914         }
10915     case TARGET_NR_sched_getparam:
10916         {
10917             struct target_sched_param *target_schp;
10918             struct sched_param schp;
10919 
10920             if (arg2 == 0) {
10921                 return -TARGET_EINVAL;
10922             }
10923             ret = get_errno(sys_sched_getparam(arg1, &schp));
10924             if (!is_error(ret)) {
10925                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10926                     return -TARGET_EFAULT;
10927                 }
10928                 target_schp->sched_priority = tswap32(schp.sched_priority);
10929                 unlock_user_struct(target_schp, arg2, 1);
10930             }
10931         }
10932         return ret;
10933     case TARGET_NR_sched_setscheduler:
10934         {
10935             struct target_sched_param *target_schp;
10936             struct sched_param schp;
10937             if (arg3 == 0) {
10938                 return -TARGET_EINVAL;
10939             }
10940             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10941                 return -TARGET_EFAULT;
10942             }
10943             schp.sched_priority = tswap32(target_schp->sched_priority);
10944             unlock_user_struct(target_schp, arg3, 0);
10945             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10946         }
10947     case TARGET_NR_sched_getscheduler:
10948         return get_errno(sys_sched_getscheduler(arg1));
10949     case TARGET_NR_sched_getattr:
10950         {
10951             struct target_sched_attr *target_scha;
10952             struct sched_attr scha;
10953             if (arg2 == 0) {
10954                 return -TARGET_EINVAL;
10955             }
10956             if (arg3 > sizeof(scha)) {
10957                 arg3 = sizeof(scha);
10958             }
10959             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10960             if (!is_error(ret)) {
10961                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10962                 if (!target_scha) {
10963                     return -TARGET_EFAULT;
10964                 }
10965                 target_scha->size = tswap32(scha.size);
10966                 target_scha->sched_policy = tswap32(scha.sched_policy);
10967                 target_scha->sched_flags = tswap64(scha.sched_flags);
10968                 target_scha->sched_nice = tswap32(scha.sched_nice);
10969                 target_scha->sched_priority = tswap32(scha.sched_priority);
10970                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10971                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10972                 target_scha->sched_period = tswap64(scha.sched_period);
10973                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10974                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10975                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10976                 }
10977                 unlock_user(target_scha, arg2, arg3);
10978             }
10979             return ret;
10980         }
10981     case TARGET_NR_sched_setattr:
10982         {
10983             struct target_sched_attr *target_scha;
10984             struct sched_attr scha;
10985             uint32_t size;
10986             int zeroed;
10987             if (arg2 == 0) {
10988                 return -TARGET_EINVAL;
10989             }
10990             if (get_user_u32(size, arg2)) {
10991                 return -TARGET_EFAULT;
10992             }
10993             if (!size) {
10994                 size = offsetof(struct target_sched_attr, sched_util_min);
10995             }
10996             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10997                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10998                     return -TARGET_EFAULT;
10999                 }
11000                 return -TARGET_E2BIG;
11001             }
11002 
11003             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11004             if (zeroed < 0) {
11005                 return zeroed;
11006             } else if (zeroed == 0) {
11007                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11008                     return -TARGET_EFAULT;
11009                 }
11010                 return -TARGET_E2BIG;
11011             }
11012             if (size > sizeof(struct target_sched_attr)) {
11013                 size = sizeof(struct target_sched_attr);
11014             }
11015 
11016             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11017             if (!target_scha) {
11018                 return -TARGET_EFAULT;
11019             }
11020             scha.size = size;
11021             scha.sched_policy = tswap32(target_scha->sched_policy);
11022             scha.sched_flags = tswap64(target_scha->sched_flags);
11023             scha.sched_nice = tswap32(target_scha->sched_nice);
11024             scha.sched_priority = tswap32(target_scha->sched_priority);
11025             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11026             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11027             scha.sched_period = tswap64(target_scha->sched_period);
11028             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11029                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11030                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11031             }
11032             unlock_user(target_scha, arg2, 0);
11033             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11034         }
11035     case TARGET_NR_sched_yield:
11036         return get_errno(sched_yield());
11037     case TARGET_NR_sched_get_priority_max:
11038         return get_errno(sched_get_priority_max(arg1));
11039     case TARGET_NR_sched_get_priority_min:
11040         return get_errno(sched_get_priority_min(arg1));
11041 #ifdef TARGET_NR_sched_rr_get_interval
11042     case TARGET_NR_sched_rr_get_interval:
11043         {
11044             struct timespec ts;
11045             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11046             if (!is_error(ret)) {
11047                 ret = host_to_target_timespec(arg2, &ts);
11048             }
11049         }
11050         return ret;
11051 #endif
11052 #ifdef TARGET_NR_sched_rr_get_interval_time64
11053     case TARGET_NR_sched_rr_get_interval_time64:
11054         {
11055             struct timespec ts;
11056             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11057             if (!is_error(ret)) {
11058                 ret = host_to_target_timespec64(arg2, &ts);
11059             }
11060         }
11061         return ret;
11062 #endif
11063 #if defined(TARGET_NR_nanosleep)
11064     case TARGET_NR_nanosleep:
11065         {
11066             struct timespec req, rem;
11067             target_to_host_timespec(&req, arg1);
11068             ret = get_errno(safe_nanosleep(&req, &rem));
11069             if (is_error(ret) && arg2) {
11070                 host_to_target_timespec(arg2, &rem);
11071             }
11072         }
11073         return ret;
11074 #endif
11075     case TARGET_NR_prctl:
11076         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11077         break;
11078 #ifdef TARGET_NR_arch_prctl
11079     case TARGET_NR_arch_prctl:
11080         return do_arch_prctl(cpu_env, arg1, arg2);
11081 #endif
11082 #ifdef TARGET_NR_pread64
11083     case TARGET_NR_pread64:
11084         if (regpairs_aligned(cpu_env, num)) {
11085             arg4 = arg5;
11086             arg5 = arg6;
11087         }
11088         if (arg2 == 0 && arg3 == 0) {
11089             /* Special-case NULL buffer and zero length, which should succeed */
11090             p = 0;
11091         } else {
11092             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11093             if (!p) {
11094                 return -TARGET_EFAULT;
11095             }
11096         }
11097         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11098         unlock_user(p, arg2, ret);
11099         return ret;
11100     case TARGET_NR_pwrite64:
11101         if (regpairs_aligned(cpu_env, num)) {
11102             arg4 = arg5;
11103             arg5 = arg6;
11104         }
11105         if (arg2 == 0 && arg3 == 0) {
11106             /* Special-case NULL buffer and zero length, which should succeed */
11107             p = 0;
11108         } else {
11109             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11110             if (!p) {
11111                 return -TARGET_EFAULT;
11112             }
11113         }
11114         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11115         unlock_user(p, arg2, 0);
11116         return ret;
11117 #endif
11118     case TARGET_NR_getcwd:
11119         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11120             return -TARGET_EFAULT;
11121         ret = get_errno(sys_getcwd1(p, arg2));
11122         unlock_user(p, arg1, ret);
11123         return ret;
11124     case TARGET_NR_capget:
11125     case TARGET_NR_capset:
11126     {
11127         struct target_user_cap_header *target_header;
11128         struct target_user_cap_data *target_data = NULL;
11129         struct __user_cap_header_struct header;
11130         struct __user_cap_data_struct data[2];
11131         struct __user_cap_data_struct *dataptr = NULL;
11132         int i, target_datalen;
11133         int data_items = 1;
11134 
11135         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11136             return -TARGET_EFAULT;
11137         }
11138         header.version = tswap32(target_header->version);
11139         header.pid = tswap32(target_header->pid);
11140 
11141         if (header.version != _LINUX_CAPABILITY_VERSION) {
11142             /* Version 2 and up takes pointer to two user_data structs */
11143             data_items = 2;
11144         }
11145 
11146         target_datalen = sizeof(*target_data) * data_items;
11147 
11148         if (arg2) {
11149             if (num == TARGET_NR_capget) {
11150                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11151             } else {
11152                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11153             }
11154             if (!target_data) {
11155                 unlock_user_struct(target_header, arg1, 0);
11156                 return -TARGET_EFAULT;
11157             }
11158 
11159             if (num == TARGET_NR_capset) {
11160                 for (i = 0; i < data_items; i++) {
11161                     data[i].effective = tswap32(target_data[i].effective);
11162                     data[i].permitted = tswap32(target_data[i].permitted);
11163                     data[i].inheritable = tswap32(target_data[i].inheritable);
11164                 }
11165             }
11166 
11167             dataptr = data;
11168         }
11169 
11170         if (num == TARGET_NR_capget) {
11171             ret = get_errno(capget(&header, dataptr));
11172         } else {
11173             ret = get_errno(capset(&header, dataptr));
11174         }
11175 
11176         /* The kernel always updates version for both capget and capset */
11177         target_header->version = tswap32(header.version);
11178         unlock_user_struct(target_header, arg1, 1);
11179 
11180         if (arg2) {
11181             if (num == TARGET_NR_capget) {
11182                 for (i = 0; i < data_items; i++) {
11183                     target_data[i].effective = tswap32(data[i].effective);
11184                     target_data[i].permitted = tswap32(data[i].permitted);
11185                     target_data[i].inheritable = tswap32(data[i].inheritable);
11186                 }
11187                 unlock_user(target_data, arg2, target_datalen);
11188             } else {
11189                 unlock_user(target_data, arg2, 0);
11190             }
11191         }
11192         return ret;
11193     }
11194     case TARGET_NR_sigaltstack:
11195         return do_sigaltstack(arg1, arg2, cpu_env);
11196 
11197 #ifdef CONFIG_SENDFILE
11198 #ifdef TARGET_NR_sendfile
11199     case TARGET_NR_sendfile:
11200     {
11201         off_t *offp = NULL;
11202         off_t off;
11203         if (arg3) {
11204             ret = get_user_sal(off, arg3);
11205             if (is_error(ret)) {
11206                 return ret;
11207             }
11208             offp = &off;
11209         }
11210         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11211         if (!is_error(ret) && arg3) {
11212             abi_long ret2 = put_user_sal(off, arg3);
11213             if (is_error(ret2)) {
11214                 ret = ret2;
11215             }
11216         }
11217         return ret;
11218     }
11219 #endif
11220 #ifdef TARGET_NR_sendfile64
11221     case TARGET_NR_sendfile64:
11222     {
11223         off_t *offp = NULL;
11224         off_t off;
11225         if (arg3) {
11226             ret = get_user_s64(off, arg3);
11227             if (is_error(ret)) {
11228                 return ret;
11229             }
11230             offp = &off;
11231         }
11232         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11233         if (!is_error(ret) && arg3) {
11234             abi_long ret2 = put_user_s64(off, arg3);
11235             if (is_error(ret2)) {
11236                 ret = ret2;
11237             }
11238         }
11239         return ret;
11240     }
11241 #endif
11242 #endif
11243 #ifdef TARGET_NR_vfork
11244     case TARGET_NR_vfork:
11245         return get_errno(do_fork(cpu_env,
11246                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11247                          0, 0, 0, 0));
11248 #endif
11249 #ifdef TARGET_NR_ugetrlimit
11250     case TARGET_NR_ugetrlimit:
11251     {
11252 	struct rlimit rlim;
11253 	int resource = target_to_host_resource(arg1);
11254 	ret = get_errno(getrlimit(resource, &rlim));
11255 	if (!is_error(ret)) {
11256 	    struct target_rlimit *target_rlim;
11257             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11258                 return -TARGET_EFAULT;
11259 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11260 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11261             unlock_user_struct(target_rlim, arg2, 1);
11262 	}
11263         return ret;
11264     }
11265 #endif
11266 #ifdef TARGET_NR_truncate64
11267     case TARGET_NR_truncate64:
11268         if (!(p = lock_user_string(arg1)))
11269             return -TARGET_EFAULT;
11270 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11271         unlock_user(p, arg1, 0);
11272         return ret;
11273 #endif
11274 #ifdef TARGET_NR_ftruncate64
11275     case TARGET_NR_ftruncate64:
11276         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11277 #endif
11278 #ifdef TARGET_NR_stat64
11279     case TARGET_NR_stat64:
11280         if (!(p = lock_user_string(arg1))) {
11281             return -TARGET_EFAULT;
11282         }
11283         ret = get_errno(stat(path(p), &st));
11284         unlock_user(p, arg1, 0);
11285         if (!is_error(ret))
11286             ret = host_to_target_stat64(cpu_env, arg2, &st);
11287         return ret;
11288 #endif
11289 #ifdef TARGET_NR_lstat64
11290     case TARGET_NR_lstat64:
11291         if (!(p = lock_user_string(arg1))) {
11292             return -TARGET_EFAULT;
11293         }
11294         ret = get_errno(lstat(path(p), &st));
11295         unlock_user(p, arg1, 0);
11296         if (!is_error(ret))
11297             ret = host_to_target_stat64(cpu_env, arg2, &st);
11298         return ret;
11299 #endif
11300 #ifdef TARGET_NR_fstat64
11301     case TARGET_NR_fstat64:
11302         ret = get_errno(fstat(arg1, &st));
11303         if (!is_error(ret))
11304             ret = host_to_target_stat64(cpu_env, arg2, &st);
11305         return ret;
11306 #endif
11307 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11308 #ifdef TARGET_NR_fstatat64
11309     case TARGET_NR_fstatat64:
11310 #endif
11311 #ifdef TARGET_NR_newfstatat
11312     case TARGET_NR_newfstatat:
11313 #endif
11314         if (!(p = lock_user_string(arg2))) {
11315             return -TARGET_EFAULT;
11316         }
11317         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11318         unlock_user(p, arg2, 0);
11319         if (!is_error(ret))
11320             ret = host_to_target_stat64(cpu_env, arg3, &st);
11321         return ret;
11322 #endif
11323 #if defined(TARGET_NR_statx)
11324     case TARGET_NR_statx:
11325         {
11326             struct target_statx *target_stx;
11327             int dirfd = arg1;
11328             int flags = arg3;
11329 
11330             p = lock_user_string(arg2);
11331             if (p == NULL) {
11332                 return -TARGET_EFAULT;
11333             }
11334 #if defined(__NR_statx)
11335             {
11336                 /*
11337                  * It is assumed that struct statx is architecture independent.
11338                  */
11339                 struct target_statx host_stx;
11340                 int mask = arg4;
11341 
11342                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11343                 if (!is_error(ret)) {
11344                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11345                         unlock_user(p, arg2, 0);
11346                         return -TARGET_EFAULT;
11347                     }
11348                 }
11349 
11350                 if (ret != -TARGET_ENOSYS) {
11351                     unlock_user(p, arg2, 0);
11352                     return ret;
11353                 }
11354             }
11355 #endif
11356             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11357             unlock_user(p, arg2, 0);
11358 
11359             if (!is_error(ret)) {
11360                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11361                     return -TARGET_EFAULT;
11362                 }
11363                 memset(target_stx, 0, sizeof(*target_stx));
11364                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11365                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11366                 __put_user(st.st_ino, &target_stx->stx_ino);
11367                 __put_user(st.st_mode, &target_stx->stx_mode);
11368                 __put_user(st.st_uid, &target_stx->stx_uid);
11369                 __put_user(st.st_gid, &target_stx->stx_gid);
11370                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11371                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11372                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11373                 __put_user(st.st_size, &target_stx->stx_size);
11374                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11375                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11376                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11377                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11378                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11379                 unlock_user_struct(target_stx, arg5, 1);
11380             }
11381         }
11382         return ret;
11383 #endif
11384 #ifdef TARGET_NR_lchown
11385     case TARGET_NR_lchown:
11386         if (!(p = lock_user_string(arg1)))
11387             return -TARGET_EFAULT;
11388         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11389         unlock_user(p, arg1, 0);
11390         return ret;
11391 #endif
11392 #ifdef TARGET_NR_getuid
11393     case TARGET_NR_getuid:
11394         return get_errno(high2lowuid(getuid()));
11395 #endif
11396 #ifdef TARGET_NR_getgid
11397     case TARGET_NR_getgid:
11398         return get_errno(high2lowgid(getgid()));
11399 #endif
11400 #ifdef TARGET_NR_geteuid
11401     case TARGET_NR_geteuid:
11402         return get_errno(high2lowuid(geteuid()));
11403 #endif
11404 #ifdef TARGET_NR_getegid
11405     case TARGET_NR_getegid:
11406         return get_errno(high2lowgid(getegid()));
11407 #endif
11408     case TARGET_NR_setreuid:
11409         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11410     case TARGET_NR_setregid:
11411         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11412     case TARGET_NR_getgroups:
11413         {
11414             int gidsetsize = arg1;
11415             target_id *target_grouplist;
11416             gid_t *grouplist;
11417             int i;
11418 
11419             grouplist = alloca(gidsetsize * sizeof(gid_t));
11420             ret = get_errno(getgroups(gidsetsize, grouplist));
11421             if (gidsetsize == 0)
11422                 return ret;
11423             if (!is_error(ret)) {
11424                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11425                 if (!target_grouplist)
11426                     return -TARGET_EFAULT;
11427                 for(i = 0;i < ret; i++)
11428                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11429                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11430             }
11431         }
11432         return ret;
11433     case TARGET_NR_setgroups:
11434         {
11435             int gidsetsize = arg1;
11436             target_id *target_grouplist;
11437             gid_t *grouplist = NULL;
11438             int i;
11439             if (gidsetsize) {
11440                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11441                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11442                 if (!target_grouplist) {
11443                     return -TARGET_EFAULT;
11444                 }
11445                 for (i = 0; i < gidsetsize; i++) {
11446                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11447                 }
11448                 unlock_user(target_grouplist, arg2, 0);
11449             }
11450             return get_errno(setgroups(gidsetsize, grouplist));
11451         }
11452     case TARGET_NR_fchown:
11453         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11454 #if defined(TARGET_NR_fchownat)
11455     case TARGET_NR_fchownat:
11456         if (!(p = lock_user_string(arg2)))
11457             return -TARGET_EFAULT;
11458         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11459                                  low2highgid(arg4), arg5));
11460         unlock_user(p, arg2, 0);
11461         return ret;
11462 #endif
11463 #ifdef TARGET_NR_setresuid
11464     case TARGET_NR_setresuid:
11465         return get_errno(sys_setresuid(low2highuid(arg1),
11466                                        low2highuid(arg2),
11467                                        low2highuid(arg3)));
11468 #endif
11469 #ifdef TARGET_NR_getresuid
11470     case TARGET_NR_getresuid:
11471         {
11472             uid_t ruid, euid, suid;
11473             ret = get_errno(getresuid(&ruid, &euid, &suid));
11474             if (!is_error(ret)) {
11475                 if (put_user_id(high2lowuid(ruid), arg1)
11476                     || put_user_id(high2lowuid(euid), arg2)
11477                     || put_user_id(high2lowuid(suid), arg3))
11478                     return -TARGET_EFAULT;
11479             }
11480         }
11481         return ret;
11482 #endif
11483 #ifdef TARGET_NR_getresgid
11484     case TARGET_NR_setresgid:
11485         return get_errno(sys_setresgid(low2highgid(arg1),
11486                                        low2highgid(arg2),
11487                                        low2highgid(arg3)));
11488 #endif
11489 #ifdef TARGET_NR_getresgid
11490     case TARGET_NR_getresgid:
11491         {
11492             gid_t rgid, egid, sgid;
11493             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11494             if (!is_error(ret)) {
11495                 if (put_user_id(high2lowgid(rgid), arg1)
11496                     || put_user_id(high2lowgid(egid), arg2)
11497                     || put_user_id(high2lowgid(sgid), arg3))
11498                     return -TARGET_EFAULT;
11499             }
11500         }
11501         return ret;
11502 #endif
11503 #ifdef TARGET_NR_chown
11504     case TARGET_NR_chown:
11505         if (!(p = lock_user_string(arg1)))
11506             return -TARGET_EFAULT;
11507         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11508         unlock_user(p, arg1, 0);
11509         return ret;
11510 #endif
11511     case TARGET_NR_setuid:
11512         return get_errno(sys_setuid(low2highuid(arg1)));
11513     case TARGET_NR_setgid:
11514         return get_errno(sys_setgid(low2highgid(arg1)));
11515     case TARGET_NR_setfsuid:
11516         return get_errno(setfsuid(arg1));
11517     case TARGET_NR_setfsgid:
11518         return get_errno(setfsgid(arg1));
11519 
11520 #ifdef TARGET_NR_lchown32
11521     case TARGET_NR_lchown32:
11522         if (!(p = lock_user_string(arg1)))
11523             return -TARGET_EFAULT;
11524         ret = get_errno(lchown(p, arg2, arg3));
11525         unlock_user(p, arg1, 0);
11526         return ret;
11527 #endif
11528 #ifdef TARGET_NR_getuid32
11529     case TARGET_NR_getuid32:
11530         return get_errno(getuid());
11531 #endif
11532 
11533 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11534    /* Alpha specific */
11535     case TARGET_NR_getxuid:
11536          {
11537             uid_t euid;
11538             euid=geteuid();
11539             cpu_env->ir[IR_A4]=euid;
11540          }
11541         return get_errno(getuid());
11542 #endif
11543 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11544    /* Alpha specific */
11545     case TARGET_NR_getxgid:
11546          {
11547             uid_t egid;
11548             egid=getegid();
11549             cpu_env->ir[IR_A4]=egid;
11550          }
11551         return get_errno(getgid());
11552 #endif
11553 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11554     /* Alpha specific */
11555     case TARGET_NR_osf_getsysinfo:
11556         ret = -TARGET_EOPNOTSUPP;
11557         switch (arg1) {
11558           case TARGET_GSI_IEEE_FP_CONTROL:
11559             {
11560                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11561                 uint64_t swcr = cpu_env->swcr;
11562 
11563                 swcr &= ~SWCR_STATUS_MASK;
11564                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11565 
11566                 if (put_user_u64 (swcr, arg2))
11567                         return -TARGET_EFAULT;
11568                 ret = 0;
11569             }
11570             break;
11571 
11572           /* case GSI_IEEE_STATE_AT_SIGNAL:
11573              -- Not implemented in linux kernel.
11574              case GSI_UACPROC:
11575              -- Retrieves current unaligned access state; not much used.
11576              case GSI_PROC_TYPE:
11577              -- Retrieves implver information; surely not used.
11578              case GSI_GET_HWRPB:
11579              -- Grabs a copy of the HWRPB; surely not used.
11580           */
11581         }
11582         return ret;
11583 #endif
11584 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11585     /* Alpha specific */
11586     case TARGET_NR_osf_setsysinfo:
11587         ret = -TARGET_EOPNOTSUPP;
11588         switch (arg1) {
11589           case TARGET_SSI_IEEE_FP_CONTROL:
11590             {
11591                 uint64_t swcr, fpcr;
11592 
11593                 if (get_user_u64 (swcr, arg2)) {
11594                     return -TARGET_EFAULT;
11595                 }
11596 
11597                 /*
11598                  * The kernel calls swcr_update_status to update the
11599                  * status bits from the fpcr at every point that it
11600                  * could be queried.  Therefore, we store the status
11601                  * bits only in FPCR.
11602                  */
11603                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11604 
11605                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11606                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11607                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11608                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11609                 ret = 0;
11610             }
11611             break;
11612 
11613           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11614             {
11615                 uint64_t exc, fpcr, fex;
11616 
11617                 if (get_user_u64(exc, arg2)) {
11618                     return -TARGET_EFAULT;
11619                 }
11620                 exc &= SWCR_STATUS_MASK;
11621                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11622 
11623                 /* Old exceptions are not signaled.  */
11624                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11625                 fex = exc & ~fex;
11626                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11627                 fex &= (cpu_env)->swcr;
11628 
11629                 /* Update the hardware fpcr.  */
11630                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11631                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11632 
11633                 if (fex) {
11634                     int si_code = TARGET_FPE_FLTUNK;
11635                     target_siginfo_t info;
11636 
11637                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11638                         si_code = TARGET_FPE_FLTUND;
11639                     }
11640                     if (fex & SWCR_TRAP_ENABLE_INE) {
11641                         si_code = TARGET_FPE_FLTRES;
11642                     }
11643                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11644                         si_code = TARGET_FPE_FLTUND;
11645                     }
11646                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11647                         si_code = TARGET_FPE_FLTOVF;
11648                     }
11649                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11650                         si_code = TARGET_FPE_FLTDIV;
11651                     }
11652                     if (fex & SWCR_TRAP_ENABLE_INV) {
11653                         si_code = TARGET_FPE_FLTINV;
11654                     }
11655 
11656                     info.si_signo = SIGFPE;
11657                     info.si_errno = 0;
11658                     info.si_code = si_code;
11659                     info._sifields._sigfault._addr = (cpu_env)->pc;
11660                     queue_signal(cpu_env, info.si_signo,
11661                                  QEMU_SI_FAULT, &info);
11662                 }
11663                 ret = 0;
11664             }
11665             break;
11666 
11667           /* case SSI_NVPAIRS:
11668              -- Used with SSIN_UACPROC to enable unaligned accesses.
11669              case SSI_IEEE_STATE_AT_SIGNAL:
11670              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11671              -- Not implemented in linux kernel
11672           */
11673         }
11674         return ret;
11675 #endif
11676 #ifdef TARGET_NR_osf_sigprocmask
11677     /* Alpha specific.  */
11678     case TARGET_NR_osf_sigprocmask:
11679         {
11680             abi_ulong mask;
11681             int how;
11682             sigset_t set, oldset;
11683 
11684             switch(arg1) {
11685             case TARGET_SIG_BLOCK:
11686                 how = SIG_BLOCK;
11687                 break;
11688             case TARGET_SIG_UNBLOCK:
11689                 how = SIG_UNBLOCK;
11690                 break;
11691             case TARGET_SIG_SETMASK:
11692                 how = SIG_SETMASK;
11693                 break;
11694             default:
11695                 return -TARGET_EINVAL;
11696             }
11697             mask = arg2;
11698             target_to_host_old_sigset(&set, &mask);
11699             ret = do_sigprocmask(how, &set, &oldset);
11700             if (!ret) {
11701                 host_to_target_old_sigset(&mask, &oldset);
11702                 ret = mask;
11703             }
11704         }
11705         return ret;
11706 #endif
11707 
11708 #ifdef TARGET_NR_getgid32
11709     case TARGET_NR_getgid32:
11710         return get_errno(getgid());
11711 #endif
11712 #ifdef TARGET_NR_geteuid32
11713     case TARGET_NR_geteuid32:
11714         return get_errno(geteuid());
11715 #endif
11716 #ifdef TARGET_NR_getegid32
11717     case TARGET_NR_getegid32:
11718         return get_errno(getegid());
11719 #endif
11720 #ifdef TARGET_NR_setreuid32
11721     case TARGET_NR_setreuid32:
11722         return get_errno(setreuid(arg1, arg2));
11723 #endif
11724 #ifdef TARGET_NR_setregid32
11725     case TARGET_NR_setregid32:
11726         return get_errno(setregid(arg1, arg2));
11727 #endif
11728 #ifdef TARGET_NR_getgroups32
11729     case TARGET_NR_getgroups32:
11730         {
11731             int gidsetsize = arg1;
11732             uint32_t *target_grouplist;
11733             gid_t *grouplist;
11734             int i;
11735 
11736             grouplist = alloca(gidsetsize * sizeof(gid_t));
11737             ret = get_errno(getgroups(gidsetsize, grouplist));
11738             if (gidsetsize == 0)
11739                 return ret;
11740             if (!is_error(ret)) {
11741                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11742                 if (!target_grouplist) {
11743                     return -TARGET_EFAULT;
11744                 }
11745                 for(i = 0;i < ret; i++)
11746                     target_grouplist[i] = tswap32(grouplist[i]);
11747                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11748             }
11749         }
11750         return ret;
11751 #endif
11752 #ifdef TARGET_NR_setgroups32
11753     case TARGET_NR_setgroups32:
11754         {
11755             int gidsetsize = arg1;
11756             uint32_t *target_grouplist;
11757             gid_t *grouplist;
11758             int i;
11759 
11760             grouplist = alloca(gidsetsize * sizeof(gid_t));
11761             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11762             if (!target_grouplist) {
11763                 return -TARGET_EFAULT;
11764             }
11765             for(i = 0;i < gidsetsize; i++)
11766                 grouplist[i] = tswap32(target_grouplist[i]);
11767             unlock_user(target_grouplist, arg2, 0);
11768             return get_errno(setgroups(gidsetsize, grouplist));
11769         }
11770 #endif
11771 #ifdef TARGET_NR_fchown32
11772     case TARGET_NR_fchown32:
11773         return get_errno(fchown(arg1, arg2, arg3));
11774 #endif
11775 #ifdef TARGET_NR_setresuid32
11776     case TARGET_NR_setresuid32:
11777         return get_errno(sys_setresuid(arg1, arg2, arg3));
11778 #endif
11779 #ifdef TARGET_NR_getresuid32
11780     case TARGET_NR_getresuid32:
11781         {
11782             uid_t ruid, euid, suid;
11783             ret = get_errno(getresuid(&ruid, &euid, &suid));
11784             if (!is_error(ret)) {
11785                 if (put_user_u32(ruid, arg1)
11786                     || put_user_u32(euid, arg2)
11787                     || put_user_u32(suid, arg3))
11788                     return -TARGET_EFAULT;
11789             }
11790         }
11791         return ret;
11792 #endif
11793 #ifdef TARGET_NR_setresgid32
11794     case TARGET_NR_setresgid32:
11795         return get_errno(sys_setresgid(arg1, arg2, arg3));
11796 #endif
11797 #ifdef TARGET_NR_getresgid32
11798     case TARGET_NR_getresgid32:
11799         {
11800             gid_t rgid, egid, sgid;
11801             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11802             if (!is_error(ret)) {
11803                 if (put_user_u32(rgid, arg1)
11804                     || put_user_u32(egid, arg2)
11805                     || put_user_u32(sgid, arg3))
11806                     return -TARGET_EFAULT;
11807             }
11808         }
11809         return ret;
11810 #endif
11811 #ifdef TARGET_NR_chown32
11812     case TARGET_NR_chown32:
11813         if (!(p = lock_user_string(arg1)))
11814             return -TARGET_EFAULT;
11815         ret = get_errno(chown(p, arg2, arg3));
11816         unlock_user(p, arg1, 0);
11817         return ret;
11818 #endif
11819 #ifdef TARGET_NR_setuid32
11820     case TARGET_NR_setuid32:
11821         return get_errno(sys_setuid(arg1));
11822 #endif
11823 #ifdef TARGET_NR_setgid32
11824     case TARGET_NR_setgid32:
11825         return get_errno(sys_setgid(arg1));
11826 #endif
11827 #ifdef TARGET_NR_setfsuid32
11828     case TARGET_NR_setfsuid32:
11829         return get_errno(setfsuid(arg1));
11830 #endif
11831 #ifdef TARGET_NR_setfsgid32
11832     case TARGET_NR_setfsgid32:
11833         return get_errno(setfsgid(arg1));
11834 #endif
11835 #ifdef TARGET_NR_mincore
11836     case TARGET_NR_mincore:
11837         {
11838             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11839             if (!a) {
11840                 return -TARGET_ENOMEM;
11841             }
11842             p = lock_user_string(arg3);
11843             if (!p) {
11844                 ret = -TARGET_EFAULT;
11845             } else {
11846                 ret = get_errno(mincore(a, arg2, p));
11847                 unlock_user(p, arg3, ret);
11848             }
11849             unlock_user(a, arg1, 0);
11850         }
11851         return ret;
11852 #endif
11853 #ifdef TARGET_NR_arm_fadvise64_64
11854     case TARGET_NR_arm_fadvise64_64:
11855         /* arm_fadvise64_64 looks like fadvise64_64 but
11856          * with different argument order: fd, advice, offset, len
11857          * rather than the usual fd, offset, len, advice.
11858          * Note that offset and len are both 64-bit so appear as
11859          * pairs of 32-bit registers.
11860          */
11861         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11862                             target_offset64(arg5, arg6), arg2);
11863         return -host_to_target_errno(ret);
11864 #endif
11865 
11866 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11867 
11868 #ifdef TARGET_NR_fadvise64_64
11869     case TARGET_NR_fadvise64_64:
11870 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11871         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11872         ret = arg2;
11873         arg2 = arg3;
11874         arg3 = arg4;
11875         arg4 = arg5;
11876         arg5 = arg6;
11877         arg6 = ret;
11878 #else
11879         /* 6 args: fd, offset (high, low), len (high, low), advice */
11880         if (regpairs_aligned(cpu_env, num)) {
11881             /* offset is in (3,4), len in (5,6) and advice in 7 */
11882             arg2 = arg3;
11883             arg3 = arg4;
11884             arg4 = arg5;
11885             arg5 = arg6;
11886             arg6 = arg7;
11887         }
11888 #endif
11889         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11890                             target_offset64(arg4, arg5), arg6);
11891         return -host_to_target_errno(ret);
11892 #endif
11893 
11894 #ifdef TARGET_NR_fadvise64
11895     case TARGET_NR_fadvise64:
11896         /* 5 args: fd, offset (high, low), len, advice */
11897         if (regpairs_aligned(cpu_env, num)) {
11898             /* offset is in (3,4), len in 5 and advice in 6 */
11899             arg2 = arg3;
11900             arg3 = arg4;
11901             arg4 = arg5;
11902             arg5 = arg6;
11903         }
11904         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11905         return -host_to_target_errno(ret);
11906 #endif
11907 
11908 #else /* not a 32-bit ABI */
11909 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11910 #ifdef TARGET_NR_fadvise64_64
11911     case TARGET_NR_fadvise64_64:
11912 #endif
11913 #ifdef TARGET_NR_fadvise64
11914     case TARGET_NR_fadvise64:
11915 #endif
11916 #ifdef TARGET_S390X
11917         switch (arg4) {
11918         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11919         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11920         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11921         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11922         default: break;
11923         }
11924 #endif
11925         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11926 #endif
11927 #endif /* end of 64-bit ABI fadvise handling */
11928 
11929 #ifdef TARGET_NR_madvise
11930     case TARGET_NR_madvise:
11931         return target_madvise(arg1, arg2, arg3);
11932 #endif
11933 #ifdef TARGET_NR_fcntl64
11934     case TARGET_NR_fcntl64:
11935     {
11936         int cmd;
11937         struct flock64 fl;
11938         from_flock64_fn *copyfrom = copy_from_user_flock64;
11939         to_flock64_fn *copyto = copy_to_user_flock64;
11940 
11941 #ifdef TARGET_ARM
11942         if (!cpu_env->eabi) {
11943             copyfrom = copy_from_user_oabi_flock64;
11944             copyto = copy_to_user_oabi_flock64;
11945         }
11946 #endif
11947 
11948         cmd = target_to_host_fcntl_cmd(arg2);
11949         if (cmd == -TARGET_EINVAL) {
11950             return cmd;
11951         }
11952 
11953         switch(arg2) {
11954         case TARGET_F_GETLK64:
11955             ret = copyfrom(&fl, arg3);
11956             if (ret) {
11957                 break;
11958             }
11959             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11960             if (ret == 0) {
11961                 ret = copyto(arg3, &fl);
11962             }
11963 	    break;
11964 
11965         case TARGET_F_SETLK64:
11966         case TARGET_F_SETLKW64:
11967             ret = copyfrom(&fl, arg3);
11968             if (ret) {
11969                 break;
11970             }
11971             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11972 	    break;
11973         default:
11974             ret = do_fcntl(arg1, arg2, arg3);
11975             break;
11976         }
11977         return ret;
11978     }
11979 #endif
11980 #ifdef TARGET_NR_cacheflush
11981     case TARGET_NR_cacheflush:
11982         /* self-modifying code is handled automatically, so nothing needed */
11983         return 0;
11984 #endif
11985 #ifdef TARGET_NR_getpagesize
11986     case TARGET_NR_getpagesize:
11987         return TARGET_PAGE_SIZE;
11988 #endif
11989     case TARGET_NR_gettid:
11990         return get_errno(sys_gettid());
11991 #ifdef TARGET_NR_readahead
11992     case TARGET_NR_readahead:
11993 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11994         if (regpairs_aligned(cpu_env, num)) {
11995             arg2 = arg3;
11996             arg3 = arg4;
11997             arg4 = arg5;
11998         }
11999         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12000 #else
12001         ret = get_errno(readahead(arg1, arg2, arg3));
12002 #endif
12003         return ret;
12004 #endif
12005 #ifdef CONFIG_ATTR
12006 #ifdef TARGET_NR_setxattr
12007     case TARGET_NR_listxattr:
12008     case TARGET_NR_llistxattr:
12009     {
12010         void *p, *b = 0;
12011         if (arg2) {
12012             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12013             if (!b) {
12014                 return -TARGET_EFAULT;
12015             }
12016         }
12017         p = lock_user_string(arg1);
12018         if (p) {
12019             if (num == TARGET_NR_listxattr) {
12020                 ret = get_errno(listxattr(p, b, arg3));
12021             } else {
12022                 ret = get_errno(llistxattr(p, b, arg3));
12023             }
12024         } else {
12025             ret = -TARGET_EFAULT;
12026         }
12027         unlock_user(p, arg1, 0);
12028         unlock_user(b, arg2, arg3);
12029         return ret;
12030     }
12031     case TARGET_NR_flistxattr:
12032     {
12033         void *b = 0;
12034         if (arg2) {
12035             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12036             if (!b) {
12037                 return -TARGET_EFAULT;
12038             }
12039         }
12040         ret = get_errno(flistxattr(arg1, b, arg3));
12041         unlock_user(b, arg2, arg3);
12042         return ret;
12043     }
12044     case TARGET_NR_setxattr:
12045     case TARGET_NR_lsetxattr:
12046         {
12047             void *p, *n, *v = 0;
12048             if (arg3) {
12049                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12050                 if (!v) {
12051                     return -TARGET_EFAULT;
12052                 }
12053             }
12054             p = lock_user_string(arg1);
12055             n = lock_user_string(arg2);
12056             if (p && n) {
12057                 if (num == TARGET_NR_setxattr) {
12058                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12059                 } else {
12060                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12061                 }
12062             } else {
12063                 ret = -TARGET_EFAULT;
12064             }
12065             unlock_user(p, arg1, 0);
12066             unlock_user(n, arg2, 0);
12067             unlock_user(v, arg3, 0);
12068         }
12069         return ret;
12070     case TARGET_NR_fsetxattr:
12071         {
12072             void *n, *v = 0;
12073             if (arg3) {
12074                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12075                 if (!v) {
12076                     return -TARGET_EFAULT;
12077                 }
12078             }
12079             n = lock_user_string(arg2);
12080             if (n) {
12081                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12082             } else {
12083                 ret = -TARGET_EFAULT;
12084             }
12085             unlock_user(n, arg2, 0);
12086             unlock_user(v, arg3, 0);
12087         }
12088         return ret;
12089     case TARGET_NR_getxattr:
12090     case TARGET_NR_lgetxattr:
12091         {
12092             void *p, *n, *v = 0;
12093             if (arg3) {
12094                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12095                 if (!v) {
12096                     return -TARGET_EFAULT;
12097                 }
12098             }
12099             p = lock_user_string(arg1);
12100             n = lock_user_string(arg2);
12101             if (p && n) {
12102                 if (num == TARGET_NR_getxattr) {
12103                     ret = get_errno(getxattr(p, n, v, arg4));
12104                 } else {
12105                     ret = get_errno(lgetxattr(p, n, v, arg4));
12106                 }
12107             } else {
12108                 ret = -TARGET_EFAULT;
12109             }
12110             unlock_user(p, arg1, 0);
12111             unlock_user(n, arg2, 0);
12112             unlock_user(v, arg3, arg4);
12113         }
12114         return ret;
12115     case TARGET_NR_fgetxattr:
12116         {
12117             void *n, *v = 0;
12118             if (arg3) {
12119                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12120                 if (!v) {
12121                     return -TARGET_EFAULT;
12122                 }
12123             }
12124             n = lock_user_string(arg2);
12125             if (n) {
12126                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12127             } else {
12128                 ret = -TARGET_EFAULT;
12129             }
12130             unlock_user(n, arg2, 0);
12131             unlock_user(v, arg3, arg4);
12132         }
12133         return ret;
12134     case TARGET_NR_removexattr:
12135     case TARGET_NR_lremovexattr:
12136         {
12137             void *p, *n;
12138             p = lock_user_string(arg1);
12139             n = lock_user_string(arg2);
12140             if (p && n) {
12141                 if (num == TARGET_NR_removexattr) {
12142                     ret = get_errno(removexattr(p, n));
12143                 } else {
12144                     ret = get_errno(lremovexattr(p, n));
12145                 }
12146             } else {
12147                 ret = -TARGET_EFAULT;
12148             }
12149             unlock_user(p, arg1, 0);
12150             unlock_user(n, arg2, 0);
12151         }
12152         return ret;
12153     case TARGET_NR_fremovexattr:
12154         {
12155             void *n;
12156             n = lock_user_string(arg2);
12157             if (n) {
12158                 ret = get_errno(fremovexattr(arg1, n));
12159             } else {
12160                 ret = -TARGET_EFAULT;
12161             }
12162             unlock_user(n, arg2, 0);
12163         }
12164         return ret;
12165 #endif
12166 #endif /* CONFIG_ATTR */
12167 #ifdef TARGET_NR_set_thread_area
12168     case TARGET_NR_set_thread_area:
12169 #if defined(TARGET_MIPS)
12170       cpu_env->active_tc.CP0_UserLocal = arg1;
12171       return 0;
12172 #elif defined(TARGET_CRIS)
12173       if (arg1 & 0xff)
12174           ret = -TARGET_EINVAL;
12175       else {
12176           cpu_env->pregs[PR_PID] = arg1;
12177           ret = 0;
12178       }
12179       return ret;
12180 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12181       return do_set_thread_area(cpu_env, arg1);
12182 #elif defined(TARGET_M68K)
12183       {
12184           TaskState *ts = cpu->opaque;
12185           ts->tp_value = arg1;
12186           return 0;
12187       }
12188 #else
12189       return -TARGET_ENOSYS;
12190 #endif
12191 #endif
12192 #ifdef TARGET_NR_get_thread_area
12193     case TARGET_NR_get_thread_area:
12194 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12195         return do_get_thread_area(cpu_env, arg1);
12196 #elif defined(TARGET_M68K)
12197         {
12198             TaskState *ts = cpu->opaque;
12199             return ts->tp_value;
12200         }
12201 #else
12202         return -TARGET_ENOSYS;
12203 #endif
12204 #endif
12205 #ifdef TARGET_NR_getdomainname
12206     case TARGET_NR_getdomainname:
12207         return -TARGET_ENOSYS;
12208 #endif
12209 
12210 #ifdef TARGET_NR_clock_settime
12211     case TARGET_NR_clock_settime:
12212     {
12213         struct timespec ts;
12214 
12215         ret = target_to_host_timespec(&ts, arg2);
12216         if (!is_error(ret)) {
12217             ret = get_errno(clock_settime(arg1, &ts));
12218         }
12219         return ret;
12220     }
12221 #endif
12222 #ifdef TARGET_NR_clock_settime64
12223     case TARGET_NR_clock_settime64:
12224     {
12225         struct timespec ts;
12226 
12227         ret = target_to_host_timespec64(&ts, arg2);
12228         if (!is_error(ret)) {
12229             ret = get_errno(clock_settime(arg1, &ts));
12230         }
12231         return ret;
12232     }
12233 #endif
12234 #ifdef TARGET_NR_clock_gettime
12235     case TARGET_NR_clock_gettime:
12236     {
12237         struct timespec ts;
12238         ret = get_errno(clock_gettime(arg1, &ts));
12239         if (!is_error(ret)) {
12240             ret = host_to_target_timespec(arg2, &ts);
12241         }
12242         return ret;
12243     }
12244 #endif
12245 #ifdef TARGET_NR_clock_gettime64
12246     case TARGET_NR_clock_gettime64:
12247     {
12248         struct timespec ts;
12249         ret = get_errno(clock_gettime(arg1, &ts));
12250         if (!is_error(ret)) {
12251             ret = host_to_target_timespec64(arg2, &ts);
12252         }
12253         return ret;
12254     }
12255 #endif
12256 #ifdef TARGET_NR_clock_getres
12257     case TARGET_NR_clock_getres:
12258     {
12259         struct timespec ts;
12260         ret = get_errno(clock_getres(arg1, &ts));
12261         if (!is_error(ret)) {
12262             host_to_target_timespec(arg2, &ts);
12263         }
12264         return ret;
12265     }
12266 #endif
12267 #ifdef TARGET_NR_clock_getres_time64
12268     case TARGET_NR_clock_getres_time64:
12269     {
12270         struct timespec ts;
12271         ret = get_errno(clock_getres(arg1, &ts));
12272         if (!is_error(ret)) {
12273             host_to_target_timespec64(arg2, &ts);
12274         }
12275         return ret;
12276     }
12277 #endif
12278 #ifdef TARGET_NR_clock_nanosleep
12279     case TARGET_NR_clock_nanosleep:
12280     {
12281         struct timespec ts;
12282         if (target_to_host_timespec(&ts, arg3)) {
12283             return -TARGET_EFAULT;
12284         }
12285         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12286                                              &ts, arg4 ? &ts : NULL));
12287         /*
12288          * if the call is interrupted by a signal handler, it fails
12289          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12290          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12291          */
12292         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12293             host_to_target_timespec(arg4, &ts)) {
12294               return -TARGET_EFAULT;
12295         }
12296 
12297         return ret;
12298     }
12299 #endif
12300 #ifdef TARGET_NR_clock_nanosleep_time64
12301     case TARGET_NR_clock_nanosleep_time64:
12302     {
12303         struct timespec ts;
12304 
12305         if (target_to_host_timespec64(&ts, arg3)) {
12306             return -TARGET_EFAULT;
12307         }
12308 
12309         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12310                                              &ts, arg4 ? &ts : NULL));
12311 
12312         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12313             host_to_target_timespec64(arg4, &ts)) {
12314             return -TARGET_EFAULT;
12315         }
12316         return ret;
12317     }
12318 #endif
12319 
12320 #if defined(TARGET_NR_set_tid_address)
12321     case TARGET_NR_set_tid_address:
12322     {
12323         TaskState *ts = cpu->opaque;
12324         ts->child_tidptr = arg1;
12325         /* do not call host set_tid_address() syscall, instead return tid() */
12326         return get_errno(sys_gettid());
12327     }
12328 #endif
12329 
12330     case TARGET_NR_tkill:
12331         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12332 
12333     case TARGET_NR_tgkill:
12334         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12335                          target_to_host_signal(arg3)));
12336 
12337 #ifdef TARGET_NR_set_robust_list
12338     case TARGET_NR_set_robust_list:
12339     case TARGET_NR_get_robust_list:
12340         /* The ABI for supporting robust futexes has userspace pass
12341          * the kernel a pointer to a linked list which is updated by
12342          * userspace after the syscall; the list is walked by the kernel
12343          * when the thread exits. Since the linked list in QEMU guest
12344          * memory isn't a valid linked list for the host and we have
12345          * no way to reliably intercept the thread-death event, we can't
12346          * support these. Silently return ENOSYS so that guest userspace
12347          * falls back to a non-robust futex implementation (which should
12348          * be OK except in the corner case of the guest crashing while
12349          * holding a mutex that is shared with another process via
12350          * shared memory).
12351          */
12352         return -TARGET_ENOSYS;
12353 #endif
12354 
12355 #if defined(TARGET_NR_utimensat)
12356     case TARGET_NR_utimensat:
12357         {
12358             struct timespec *tsp, ts[2];
12359             if (!arg3) {
12360                 tsp = NULL;
12361             } else {
12362                 if (target_to_host_timespec(ts, arg3)) {
12363                     return -TARGET_EFAULT;
12364                 }
12365                 if (target_to_host_timespec(ts + 1, arg3 +
12366                                             sizeof(struct target_timespec))) {
12367                     return -TARGET_EFAULT;
12368                 }
12369                 tsp = ts;
12370             }
12371             if (!arg2)
12372                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12373             else {
12374                 if (!(p = lock_user_string(arg2))) {
12375                     return -TARGET_EFAULT;
12376                 }
12377                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12378                 unlock_user(p, arg2, 0);
12379             }
12380         }
12381         return ret;
12382 #endif
12383 #ifdef TARGET_NR_utimensat_time64
12384     case TARGET_NR_utimensat_time64:
12385         {
12386             struct timespec *tsp, ts[2];
12387             if (!arg3) {
12388                 tsp = NULL;
12389             } else {
12390                 if (target_to_host_timespec64(ts, arg3)) {
12391                     return -TARGET_EFAULT;
12392                 }
12393                 if (target_to_host_timespec64(ts + 1, arg3 +
12394                                      sizeof(struct target__kernel_timespec))) {
12395                     return -TARGET_EFAULT;
12396                 }
12397                 tsp = ts;
12398             }
12399             if (!arg2)
12400                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12401             else {
12402                 p = lock_user_string(arg2);
12403                 if (!p) {
12404                     return -TARGET_EFAULT;
12405                 }
12406                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12407                 unlock_user(p, arg2, 0);
12408             }
12409         }
12410         return ret;
12411 #endif
12412 #ifdef TARGET_NR_futex
12413     case TARGET_NR_futex:
12414         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12415 #endif
12416 #ifdef TARGET_NR_futex_time64
12417     case TARGET_NR_futex_time64:
12418         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12419 #endif
12420 #ifdef CONFIG_INOTIFY
12421 #if defined(TARGET_NR_inotify_init)
12422     case TARGET_NR_inotify_init:
12423         ret = get_errno(inotify_init());
12424         if (ret >= 0) {
12425             fd_trans_register(ret, &target_inotify_trans);
12426         }
12427         return ret;
12428 #endif
12429 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12430     case TARGET_NR_inotify_init1:
12431         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12432                                           fcntl_flags_tbl)));
12433         if (ret >= 0) {
12434             fd_trans_register(ret, &target_inotify_trans);
12435         }
12436         return ret;
12437 #endif
12438 #if defined(TARGET_NR_inotify_add_watch)
12439     case TARGET_NR_inotify_add_watch:
12440         p = lock_user_string(arg2);
12441         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12442         unlock_user(p, arg2, 0);
12443         return ret;
12444 #endif
12445 #if defined(TARGET_NR_inotify_rm_watch)
12446     case TARGET_NR_inotify_rm_watch:
12447         return get_errno(inotify_rm_watch(arg1, arg2));
12448 #endif
12449 #endif
12450 
12451 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12452     case TARGET_NR_mq_open:
12453         {
12454             struct mq_attr posix_mq_attr;
12455             struct mq_attr *pposix_mq_attr;
12456             int host_flags;
12457 
12458             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12459             pposix_mq_attr = NULL;
12460             if (arg4) {
12461                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12462                     return -TARGET_EFAULT;
12463                 }
12464                 pposix_mq_attr = &posix_mq_attr;
12465             }
12466             p = lock_user_string(arg1 - 1);
12467             if (!p) {
12468                 return -TARGET_EFAULT;
12469             }
12470             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12471             unlock_user (p, arg1, 0);
12472         }
12473         return ret;
12474 
12475     case TARGET_NR_mq_unlink:
12476         p = lock_user_string(arg1 - 1);
12477         if (!p) {
12478             return -TARGET_EFAULT;
12479         }
12480         ret = get_errno(mq_unlink(p));
12481         unlock_user (p, arg1, 0);
12482         return ret;
12483 
12484 #ifdef TARGET_NR_mq_timedsend
12485     case TARGET_NR_mq_timedsend:
12486         {
12487             struct timespec ts;
12488 
12489             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12490             if (arg5 != 0) {
12491                 if (target_to_host_timespec(&ts, arg5)) {
12492                     return -TARGET_EFAULT;
12493                 }
12494                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12495                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12496                     return -TARGET_EFAULT;
12497                 }
12498             } else {
12499                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12500             }
12501             unlock_user (p, arg2, arg3);
12502         }
12503         return ret;
12504 #endif
12505 #ifdef TARGET_NR_mq_timedsend_time64
12506     case TARGET_NR_mq_timedsend_time64:
12507         {
12508             struct timespec ts;
12509 
12510             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12511             if (arg5 != 0) {
12512                 if (target_to_host_timespec64(&ts, arg5)) {
12513                     return -TARGET_EFAULT;
12514                 }
12515                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12516                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12517                     return -TARGET_EFAULT;
12518                 }
12519             } else {
12520                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12521             }
12522             unlock_user(p, arg2, arg3);
12523         }
12524         return ret;
12525 #endif
12526 
12527 #ifdef TARGET_NR_mq_timedreceive
12528     case TARGET_NR_mq_timedreceive:
12529         {
12530             struct timespec ts;
12531             unsigned int prio;
12532 
12533             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12534             if (arg5 != 0) {
12535                 if (target_to_host_timespec(&ts, arg5)) {
12536                     return -TARGET_EFAULT;
12537                 }
12538                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12539                                                      &prio, &ts));
12540                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12541                     return -TARGET_EFAULT;
12542                 }
12543             } else {
12544                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12545                                                      &prio, NULL));
12546             }
12547             unlock_user (p, arg2, arg3);
12548             if (arg4 != 0)
12549                 put_user_u32(prio, arg4);
12550         }
12551         return ret;
12552 #endif
12553 #ifdef TARGET_NR_mq_timedreceive_time64
12554     case TARGET_NR_mq_timedreceive_time64:
12555         {
12556             struct timespec ts;
12557             unsigned int prio;
12558 
12559             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12560             if (arg5 != 0) {
12561                 if (target_to_host_timespec64(&ts, arg5)) {
12562                     return -TARGET_EFAULT;
12563                 }
12564                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12565                                                      &prio, &ts));
12566                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12567                     return -TARGET_EFAULT;
12568                 }
12569             } else {
12570                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12571                                                      &prio, NULL));
12572             }
12573             unlock_user(p, arg2, arg3);
12574             if (arg4 != 0) {
12575                 put_user_u32(prio, arg4);
12576             }
12577         }
12578         return ret;
12579 #endif
12580 
12581     /* Not implemented for now... */
12582 /*     case TARGET_NR_mq_notify: */
12583 /*         break; */
12584 
12585     case TARGET_NR_mq_getsetattr:
12586         {
12587             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12588             ret = 0;
12589             if (arg2 != 0) {
12590                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12591                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12592                                            &posix_mq_attr_out));
12593             } else if (arg3 != 0) {
12594                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12595             }
12596             if (ret == 0 && arg3 != 0) {
12597                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12598             }
12599         }
12600         return ret;
12601 #endif
12602 
12603 #ifdef CONFIG_SPLICE
12604 #ifdef TARGET_NR_tee
12605     case TARGET_NR_tee:
12606         {
12607             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12608         }
12609         return ret;
12610 #endif
12611 #ifdef TARGET_NR_splice
12612     case TARGET_NR_splice:
12613         {
12614             loff_t loff_in, loff_out;
12615             loff_t *ploff_in = NULL, *ploff_out = NULL;
12616             if (arg2) {
12617                 if (get_user_u64(loff_in, arg2)) {
12618                     return -TARGET_EFAULT;
12619                 }
12620                 ploff_in = &loff_in;
12621             }
12622             if (arg4) {
12623                 if (get_user_u64(loff_out, arg4)) {
12624                     return -TARGET_EFAULT;
12625                 }
12626                 ploff_out = &loff_out;
12627             }
12628             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12629             if (arg2) {
12630                 if (put_user_u64(loff_in, arg2)) {
12631                     return -TARGET_EFAULT;
12632                 }
12633             }
12634             if (arg4) {
12635                 if (put_user_u64(loff_out, arg4)) {
12636                     return -TARGET_EFAULT;
12637                 }
12638             }
12639         }
12640         return ret;
12641 #endif
12642 #ifdef TARGET_NR_vmsplice
12643 	case TARGET_NR_vmsplice:
12644         {
12645             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12646             if (vec != NULL) {
12647                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12648                 unlock_iovec(vec, arg2, arg3, 0);
12649             } else {
12650                 ret = -host_to_target_errno(errno);
12651             }
12652         }
12653         return ret;
12654 #endif
12655 #endif /* CONFIG_SPLICE */
12656 #ifdef CONFIG_EVENTFD
12657 #if defined(TARGET_NR_eventfd)
12658     case TARGET_NR_eventfd:
12659         ret = get_errno(eventfd(arg1, 0));
12660         if (ret >= 0) {
12661             fd_trans_register(ret, &target_eventfd_trans);
12662         }
12663         return ret;
12664 #endif
12665 #if defined(TARGET_NR_eventfd2)
12666     case TARGET_NR_eventfd2:
12667     {
12668         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12669         if (arg2 & TARGET_O_NONBLOCK) {
12670             host_flags |= O_NONBLOCK;
12671         }
12672         if (arg2 & TARGET_O_CLOEXEC) {
12673             host_flags |= O_CLOEXEC;
12674         }
12675         ret = get_errno(eventfd(arg1, host_flags));
12676         if (ret >= 0) {
12677             fd_trans_register(ret, &target_eventfd_trans);
12678         }
12679         return ret;
12680     }
12681 #endif
12682 #endif /* CONFIG_EVENTFD  */
12683 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12684     case TARGET_NR_fallocate:
12685 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12686         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12687                                   target_offset64(arg5, arg6)));
12688 #else
12689         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12690 #endif
12691         return ret;
12692 #endif
12693 #if defined(CONFIG_SYNC_FILE_RANGE)
12694 #if defined(TARGET_NR_sync_file_range)
12695     case TARGET_NR_sync_file_range:
12696 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12697 #if defined(TARGET_MIPS)
12698         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12699                                         target_offset64(arg5, arg6), arg7));
12700 #else
12701         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12702                                         target_offset64(arg4, arg5), arg6));
12703 #endif /* !TARGET_MIPS */
12704 #else
12705         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12706 #endif
12707         return ret;
12708 #endif
12709 #if defined(TARGET_NR_sync_file_range2) || \
12710     defined(TARGET_NR_arm_sync_file_range)
12711 #if defined(TARGET_NR_sync_file_range2)
12712     case TARGET_NR_sync_file_range2:
12713 #endif
12714 #if defined(TARGET_NR_arm_sync_file_range)
12715     case TARGET_NR_arm_sync_file_range:
12716 #endif
12717         /* This is like sync_file_range but the arguments are reordered */
12718 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12719         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12720                                         target_offset64(arg5, arg6), arg2));
12721 #else
12722         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12723 #endif
12724         return ret;
12725 #endif
12726 #endif
12727 #if defined(TARGET_NR_signalfd4)
12728     case TARGET_NR_signalfd4:
12729         return do_signalfd4(arg1, arg2, arg4);
12730 #endif
12731 #if defined(TARGET_NR_signalfd)
12732     case TARGET_NR_signalfd:
12733         return do_signalfd4(arg1, arg2, 0);
12734 #endif
12735 #if defined(CONFIG_EPOLL)
12736 #if defined(TARGET_NR_epoll_create)
12737     case TARGET_NR_epoll_create:
12738         return get_errno(epoll_create(arg1));
12739 #endif
12740 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12741     case TARGET_NR_epoll_create1:
12742         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12743 #endif
12744 #if defined(TARGET_NR_epoll_ctl)
12745     case TARGET_NR_epoll_ctl:
12746     {
12747         struct epoll_event ep;
12748         struct epoll_event *epp = 0;
12749         if (arg4) {
12750             if (arg2 != EPOLL_CTL_DEL) {
12751                 struct target_epoll_event *target_ep;
12752                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12753                     return -TARGET_EFAULT;
12754                 }
12755                 ep.events = tswap32(target_ep->events);
12756                 /*
12757                  * The epoll_data_t union is just opaque data to the kernel,
12758                  * so we transfer all 64 bits across and need not worry what
12759                  * actual data type it is.
12760                  */
12761                 ep.data.u64 = tswap64(target_ep->data.u64);
12762                 unlock_user_struct(target_ep, arg4, 0);
12763             }
12764             /*
12765              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12766              * non-null pointer, even though this argument is ignored.
12767              *
12768              */
12769             epp = &ep;
12770         }
12771         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12772     }
12773 #endif
12774 
12775 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12776 #if defined(TARGET_NR_epoll_wait)
12777     case TARGET_NR_epoll_wait:
12778 #endif
12779 #if defined(TARGET_NR_epoll_pwait)
12780     case TARGET_NR_epoll_pwait:
12781 #endif
12782     {
12783         struct target_epoll_event *target_ep;
12784         struct epoll_event *ep;
12785         int epfd = arg1;
12786         int maxevents = arg3;
12787         int timeout = arg4;
12788 
12789         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12790             return -TARGET_EINVAL;
12791         }
12792 
12793         target_ep = lock_user(VERIFY_WRITE, arg2,
12794                               maxevents * sizeof(struct target_epoll_event), 1);
12795         if (!target_ep) {
12796             return -TARGET_EFAULT;
12797         }
12798 
12799         ep = g_try_new(struct epoll_event, maxevents);
12800         if (!ep) {
12801             unlock_user(target_ep, arg2, 0);
12802             return -TARGET_ENOMEM;
12803         }
12804 
12805         switch (num) {
12806 #if defined(TARGET_NR_epoll_pwait)
12807         case TARGET_NR_epoll_pwait:
12808         {
12809             sigset_t *set = NULL;
12810 
12811             if (arg5) {
12812                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12813                 if (ret != 0) {
12814                     break;
12815                 }
12816             }
12817 
12818             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12819                                              set, SIGSET_T_SIZE));
12820 
12821             if (set) {
12822                 finish_sigsuspend_mask(ret);
12823             }
12824             break;
12825         }
12826 #endif
12827 #if defined(TARGET_NR_epoll_wait)
12828         case TARGET_NR_epoll_wait:
12829             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12830                                              NULL, 0));
12831             break;
12832 #endif
12833         default:
12834             ret = -TARGET_ENOSYS;
12835         }
12836         if (!is_error(ret)) {
12837             int i;
12838             for (i = 0; i < ret; i++) {
12839                 target_ep[i].events = tswap32(ep[i].events);
12840                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12841             }
12842             unlock_user(target_ep, arg2,
12843                         ret * sizeof(struct target_epoll_event));
12844         } else {
12845             unlock_user(target_ep, arg2, 0);
12846         }
12847         g_free(ep);
12848         return ret;
12849     }
12850 #endif
12851 #endif
12852 #ifdef TARGET_NR_prlimit64
12853     case TARGET_NR_prlimit64:
12854     {
12855         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12856         struct target_rlimit64 *target_rnew, *target_rold;
12857         struct host_rlimit64 rnew, rold, *rnewp = 0;
12858         int resource = target_to_host_resource(arg2);
12859 
12860         if (arg3 && (resource != RLIMIT_AS &&
12861                      resource != RLIMIT_DATA &&
12862                      resource != RLIMIT_STACK)) {
12863             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12864                 return -TARGET_EFAULT;
12865             }
12866             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12867             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12868             unlock_user_struct(target_rnew, arg3, 0);
12869             rnewp = &rnew;
12870         }
12871 
12872         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12873         if (!is_error(ret) && arg4) {
12874             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12875                 return -TARGET_EFAULT;
12876             }
12877             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12878             target_rold->rlim_max = tswap64(rold.rlim_max);
12879             unlock_user_struct(target_rold, arg4, 1);
12880         }
12881         return ret;
12882     }
12883 #endif
12884 #ifdef TARGET_NR_gethostname
12885     case TARGET_NR_gethostname:
12886     {
12887         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12888         if (name) {
12889             ret = get_errno(gethostname(name, arg2));
12890             unlock_user(name, arg1, arg2);
12891         } else {
12892             ret = -TARGET_EFAULT;
12893         }
12894         return ret;
12895     }
12896 #endif
12897 #ifdef TARGET_NR_atomic_cmpxchg_32
12898     case TARGET_NR_atomic_cmpxchg_32:
12899     {
12900         /* should use start_exclusive from main.c */
12901         abi_ulong mem_value;
12902         if (get_user_u32(mem_value, arg6)) {
12903             target_siginfo_t info;
12904             info.si_signo = SIGSEGV;
12905             info.si_errno = 0;
12906             info.si_code = TARGET_SEGV_MAPERR;
12907             info._sifields._sigfault._addr = arg6;
12908             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12909             ret = 0xdeadbeef;
12910 
12911         }
12912         if (mem_value == arg2)
12913             put_user_u32(arg1, arg6);
12914         return mem_value;
12915     }
12916 #endif
12917 #ifdef TARGET_NR_atomic_barrier
12918     case TARGET_NR_atomic_barrier:
12919         /* Like the kernel implementation and the
12920            qemu arm barrier, no-op this? */
12921         return 0;
12922 #endif
12923 
12924 #ifdef TARGET_NR_timer_create
12925     case TARGET_NR_timer_create:
12926     {
12927         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12928 
12929         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12930 
12931         int clkid = arg1;
12932         int timer_index = next_free_host_timer();
12933 
12934         if (timer_index < 0) {
12935             ret = -TARGET_EAGAIN;
12936         } else {
12937             timer_t *phtimer = g_posix_timers  + timer_index;
12938 
12939             if (arg2) {
12940                 phost_sevp = &host_sevp;
12941                 ret = target_to_host_sigevent(phost_sevp, arg2);
12942                 if (ret != 0) {
12943                     free_host_timer_slot(timer_index);
12944                     return ret;
12945                 }
12946             }
12947 
12948             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12949             if (ret) {
12950                 free_host_timer_slot(timer_index);
12951             } else {
12952                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12953                     timer_delete(*phtimer);
12954                     free_host_timer_slot(timer_index);
12955                     return -TARGET_EFAULT;
12956                 }
12957             }
12958         }
12959         return ret;
12960     }
12961 #endif
12962 
12963 #ifdef TARGET_NR_timer_settime
12964     case TARGET_NR_timer_settime:
12965     {
12966         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12967          * struct itimerspec * old_value */
12968         target_timer_t timerid = get_timer_id(arg1);
12969 
12970         if (timerid < 0) {
12971             ret = timerid;
12972         } else if (arg3 == 0) {
12973             ret = -TARGET_EINVAL;
12974         } else {
12975             timer_t htimer = g_posix_timers[timerid];
12976             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12977 
12978             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12979                 return -TARGET_EFAULT;
12980             }
12981             ret = get_errno(
12982                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12983             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12984                 return -TARGET_EFAULT;
12985             }
12986         }
12987         return ret;
12988     }
12989 #endif
12990 
12991 #ifdef TARGET_NR_timer_settime64
12992     case TARGET_NR_timer_settime64:
12993     {
12994         target_timer_t timerid = get_timer_id(arg1);
12995 
12996         if (timerid < 0) {
12997             ret = timerid;
12998         } else if (arg3 == 0) {
12999             ret = -TARGET_EINVAL;
13000         } else {
13001             timer_t htimer = g_posix_timers[timerid];
13002             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13003 
13004             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13005                 return -TARGET_EFAULT;
13006             }
13007             ret = get_errno(
13008                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13009             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13010                 return -TARGET_EFAULT;
13011             }
13012         }
13013         return ret;
13014     }
13015 #endif
13016 
13017 #ifdef TARGET_NR_timer_gettime
13018     case TARGET_NR_timer_gettime:
13019     {
13020         /* args: timer_t timerid, struct itimerspec *curr_value */
13021         target_timer_t timerid = get_timer_id(arg1);
13022 
13023         if (timerid < 0) {
13024             ret = timerid;
13025         } else if (!arg2) {
13026             ret = -TARGET_EFAULT;
13027         } else {
13028             timer_t htimer = g_posix_timers[timerid];
13029             struct itimerspec hspec;
13030             ret = get_errno(timer_gettime(htimer, &hspec));
13031 
13032             if (host_to_target_itimerspec(arg2, &hspec)) {
13033                 ret = -TARGET_EFAULT;
13034             }
13035         }
13036         return ret;
13037     }
13038 #endif
13039 
13040 #ifdef TARGET_NR_timer_gettime64
13041     case TARGET_NR_timer_gettime64:
13042     {
13043         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13044         target_timer_t timerid = get_timer_id(arg1);
13045 
13046         if (timerid < 0) {
13047             ret = timerid;
13048         } else if (!arg2) {
13049             ret = -TARGET_EFAULT;
13050         } else {
13051             timer_t htimer = g_posix_timers[timerid];
13052             struct itimerspec hspec;
13053             ret = get_errno(timer_gettime(htimer, &hspec));
13054 
13055             if (host_to_target_itimerspec64(arg2, &hspec)) {
13056                 ret = -TARGET_EFAULT;
13057             }
13058         }
13059         return ret;
13060     }
13061 #endif
13062 
13063 #ifdef TARGET_NR_timer_getoverrun
13064     case TARGET_NR_timer_getoverrun:
13065     {
13066         /* args: timer_t timerid */
13067         target_timer_t timerid = get_timer_id(arg1);
13068 
13069         if (timerid < 0) {
13070             ret = timerid;
13071         } else {
13072             timer_t htimer = g_posix_timers[timerid];
13073             ret = get_errno(timer_getoverrun(htimer));
13074         }
13075         return ret;
13076     }
13077 #endif
13078 
13079 #ifdef TARGET_NR_timer_delete
13080     case TARGET_NR_timer_delete:
13081     {
13082         /* args: timer_t timerid */
13083         target_timer_t timerid = get_timer_id(arg1);
13084 
13085         if (timerid < 0) {
13086             ret = timerid;
13087         } else {
13088             timer_t htimer = g_posix_timers[timerid];
13089             ret = get_errno(timer_delete(htimer));
13090             free_host_timer_slot(timerid);
13091         }
13092         return ret;
13093     }
13094 #endif
13095 
13096 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13097     case TARGET_NR_timerfd_create:
13098         return get_errno(timerfd_create(arg1,
13099                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13100 #endif
13101 
13102 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13103     case TARGET_NR_timerfd_gettime:
13104         {
13105             struct itimerspec its_curr;
13106 
13107             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13108 
13109             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13110                 return -TARGET_EFAULT;
13111             }
13112         }
13113         return ret;
13114 #endif
13115 
13116 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13117     case TARGET_NR_timerfd_gettime64:
13118         {
13119             struct itimerspec its_curr;
13120 
13121             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13122 
13123             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13124                 return -TARGET_EFAULT;
13125             }
13126         }
13127         return ret;
13128 #endif
13129 
13130 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13131     case TARGET_NR_timerfd_settime:
13132         {
13133             struct itimerspec its_new, its_old, *p_new;
13134 
13135             if (arg3) {
13136                 if (target_to_host_itimerspec(&its_new, arg3)) {
13137                     return -TARGET_EFAULT;
13138                 }
13139                 p_new = &its_new;
13140             } else {
13141                 p_new = NULL;
13142             }
13143 
13144             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13145 
13146             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13147                 return -TARGET_EFAULT;
13148             }
13149         }
13150         return ret;
13151 #endif
13152 
13153 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13154     case TARGET_NR_timerfd_settime64:
13155         {
13156             struct itimerspec its_new, its_old, *p_new;
13157 
13158             if (arg3) {
13159                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13160                     return -TARGET_EFAULT;
13161                 }
13162                 p_new = &its_new;
13163             } else {
13164                 p_new = NULL;
13165             }
13166 
13167             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13168 
13169             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13170                 return -TARGET_EFAULT;
13171             }
13172         }
13173         return ret;
13174 #endif
13175 
13176 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13177     case TARGET_NR_ioprio_get:
13178         return get_errno(ioprio_get(arg1, arg2));
13179 #endif
13180 
13181 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13182     case TARGET_NR_ioprio_set:
13183         return get_errno(ioprio_set(arg1, arg2, arg3));
13184 #endif
13185 
13186 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13187     case TARGET_NR_setns:
13188         return get_errno(setns(arg1, arg2));
13189 #endif
13190 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13191     case TARGET_NR_unshare:
13192         return get_errno(unshare(arg1));
13193 #endif
13194 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13195     case TARGET_NR_kcmp:
13196         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13197 #endif
13198 #ifdef TARGET_NR_swapcontext
13199     case TARGET_NR_swapcontext:
13200         /* PowerPC specific.  */
13201         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13202 #endif
13203 #ifdef TARGET_NR_memfd_create
13204     case TARGET_NR_memfd_create:
13205         p = lock_user_string(arg1);
13206         if (!p) {
13207             return -TARGET_EFAULT;
13208         }
13209         ret = get_errno(memfd_create(p, arg2));
13210         fd_trans_unregister(ret);
13211         unlock_user(p, arg1, 0);
13212         return ret;
13213 #endif
13214 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13215     case TARGET_NR_membarrier:
13216         return get_errno(membarrier(arg1, arg2));
13217 #endif
13218 
13219 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13220     case TARGET_NR_copy_file_range:
13221         {
13222             loff_t inoff, outoff;
13223             loff_t *pinoff = NULL, *poutoff = NULL;
13224 
13225             if (arg2) {
13226                 if (get_user_u64(inoff, arg2)) {
13227                     return -TARGET_EFAULT;
13228                 }
13229                 pinoff = &inoff;
13230             }
13231             if (arg4) {
13232                 if (get_user_u64(outoff, arg4)) {
13233                     return -TARGET_EFAULT;
13234                 }
13235                 poutoff = &outoff;
13236             }
13237             /* Do not sign-extend the count parameter. */
13238             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13239                                                  (abi_ulong)arg5, arg6));
13240             if (!is_error(ret) && ret > 0) {
13241                 if (arg2) {
13242                     if (put_user_u64(inoff, arg2)) {
13243                         return -TARGET_EFAULT;
13244                     }
13245                 }
13246                 if (arg4) {
13247                     if (put_user_u64(outoff, arg4)) {
13248                         return -TARGET_EFAULT;
13249                     }
13250                 }
13251             }
13252         }
13253         return ret;
13254 #endif
13255 
13256 #if defined(TARGET_NR_pivot_root)
13257     case TARGET_NR_pivot_root:
13258         {
13259             void *p2;
13260             p = lock_user_string(arg1); /* new_root */
13261             p2 = lock_user_string(arg2); /* put_old */
13262             if (!p || !p2) {
13263                 ret = -TARGET_EFAULT;
13264             } else {
13265                 ret = get_errno(pivot_root(p, p2));
13266             }
13267             unlock_user(p2, arg2, 0);
13268             unlock_user(p, arg1, 0);
13269         }
13270         return ret;
13271 #endif
13272 
13273     default:
13274         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13275         return -TARGET_ENOSYS;
13276     }
13277     return ret;
13278 }
13279 
13280 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13281                     abi_long arg2, abi_long arg3, abi_long arg4,
13282                     abi_long arg5, abi_long arg6, abi_long arg7,
13283                     abi_long arg8)
13284 {
13285     CPUState *cpu = env_cpu(cpu_env);
13286     abi_long ret;
13287 
13288 #ifdef DEBUG_ERESTARTSYS
13289     /* Debug-only code for exercising the syscall-restart code paths
13290      * in the per-architecture cpu main loops: restart every syscall
13291      * the guest makes once before letting it through.
13292      */
13293     {
13294         static bool flag;
13295         flag = !flag;
13296         if (flag) {
13297             return -QEMU_ERESTARTSYS;
13298         }
13299     }
13300 #endif
13301 
13302     record_syscall_start(cpu, num, arg1,
13303                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13304 
13305     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13306         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13307     }
13308 
13309     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13310                       arg5, arg6, arg7, arg8);
13311 
13312     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13313         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13314                           arg3, arg4, arg5, arg6);
13315     }
13316 
13317     record_syscall_return(cpu, num, ret);
13318     return ret;
13319 }
13320