xref: /openbmc/qemu/linux-user/syscall.c (revision 35a2c85f)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 
99 #ifdef HAVE_SYS_MOUNT_FSCONFIG
100 /*
101  * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h,
102  * which in turn prevents use of linux/fs.h. So we have to
103  * define the constants ourselves for now.
104  */
105 #define FS_IOC_GETFLAGS                _IOR('f', 1, long)
106 #define FS_IOC_SETFLAGS                _IOW('f', 2, long)
107 #define FS_IOC_GETVERSION              _IOR('v', 1, long)
108 #define FS_IOC_SETVERSION              _IOW('v', 2, long)
109 #define FS_IOC_FIEMAP                  _IOWR('f', 11, struct fiemap)
110 #define FS_IOC32_GETFLAGS              _IOR('f', 1, int)
111 #define FS_IOC32_SETFLAGS              _IOW('f', 2, int)
112 #define FS_IOC32_GETVERSION            _IOR('v', 1, int)
113 #define FS_IOC32_SETVERSION            _IOW('v', 2, int)
114 
115 #define BLKGETSIZE64 _IOR(0x12,114,size_t)
116 #define BLKDISCARD _IO(0x12,119)
117 #define BLKIOMIN _IO(0x12,120)
118 #define BLKIOOPT _IO(0x12,121)
119 #define BLKALIGNOFF _IO(0x12,122)
120 #define BLKPBSZGET _IO(0x12,123)
121 #define BLKDISCARDZEROES _IO(0x12,124)
122 #define BLKSECDISCARD _IO(0x12,125)
123 #define BLKROTATIONAL _IO(0x12,126)
124 #define BLKZEROOUT _IO(0x12,127)
125 
126 #define FIBMAP     _IO(0x00,1)
127 #define FIGETBSZ   _IO(0x00,2)
128 
129 struct file_clone_range {
130         __s64 src_fd;
131         __u64 src_offset;
132         __u64 src_length;
133         __u64 dest_offset;
134 };
135 
136 #define FICLONE         _IOW(0x94, 9, int)
137 #define FICLONERANGE    _IOW(0x94, 13, struct file_clone_range)
138 
139 #else
140 #include <linux/fs.h>
141 #endif
142 #include <linux/fd.h>
143 #if defined(CONFIG_FIEMAP)
144 #include <linux/fiemap.h>
145 #endif
146 #include <linux/fb.h>
147 #if defined(CONFIG_USBFS)
148 #include <linux/usbdevice_fs.h>
149 #include <linux/usb/ch9.h>
150 #endif
151 #include <linux/vt.h>
152 #include <linux/dm-ioctl.h>
153 #include <linux/reboot.h>
154 #include <linux/route.h>
155 #include <linux/filter.h>
156 #include <linux/blkpg.h>
157 #include <netpacket/packet.h>
158 #include <linux/netlink.h>
159 #include <linux/if_alg.h>
160 #include <linux/rtc.h>
161 #include <sound/asound.h>
162 #ifdef HAVE_BTRFS_H
163 #include <linux/btrfs.h>
164 #endif
165 #ifdef HAVE_DRM_H
166 #include <libdrm/drm.h>
167 #include <libdrm/i915_drm.h>
168 #endif
169 #include "linux_loop.h"
170 #include "uname.h"
171 
172 #include "qemu.h"
173 #include "user-internals.h"
174 #include "strace.h"
175 #include "signal-common.h"
176 #include "loader.h"
177 #include "user-mmap.h"
178 #include "user/safe-syscall.h"
179 #include "qemu/guest-random.h"
180 #include "qemu/selfmap.h"
181 #include "user/syscall-trace.h"
182 #include "special-errno.h"
183 #include "qapi/error.h"
184 #include "fd-trans.h"
185 #include "tcg/tcg.h"
186 
187 #ifndef CLONE_IO
188 #define CLONE_IO                0x80000000      /* Clone io context */
189 #endif
190 
191 /* We can't directly call the host clone syscall, because this will
192  * badly confuse libc (breaking mutexes, for example). So we must
193  * divide clone flags into:
194  *  * flag combinations that look like pthread_create()
195  *  * flag combinations that look like fork()
196  *  * flags we can implement within QEMU itself
197  *  * flags we can't support and will return an error for
198  */
199 /* For thread creation, all these flags must be present; for
200  * fork, none must be present.
201  */
202 #define CLONE_THREAD_FLAGS                              \
203     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
204      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
205 
206 /* These flags are ignored:
207  * CLONE_DETACHED is now ignored by the kernel;
208  * CLONE_IO is just an optimisation hint to the I/O scheduler
209  */
210 #define CLONE_IGNORED_FLAGS                     \
211     (CLONE_DETACHED | CLONE_IO)
212 
213 /* Flags for fork which we can implement within QEMU itself */
214 #define CLONE_OPTIONAL_FORK_FLAGS               \
215     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
216      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
217 
218 /* Flags for thread creation which we can implement within QEMU itself */
219 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
220     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
221      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
222 
223 #define CLONE_INVALID_FORK_FLAGS                                        \
224     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
225 
226 #define CLONE_INVALID_THREAD_FLAGS                                      \
227     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
228        CLONE_IGNORED_FLAGS))
229 
230 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
231  * have almost all been allocated. We cannot support any of
232  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
233  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
234  * The checks against the invalid thread masks above will catch these.
235  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
236  */
237 
238 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
239  * once. This exercises the codepaths for restart.
240  */
241 //#define DEBUG_ERESTARTSYS
242 
243 //#include <linux/msdos_fs.h>
244 #define VFAT_IOCTL_READDIR_BOTH \
245     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
246 #define VFAT_IOCTL_READDIR_SHORT \
247     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
248 
249 #undef _syscall0
250 #undef _syscall1
251 #undef _syscall2
252 #undef _syscall3
253 #undef _syscall4
254 #undef _syscall5
255 #undef _syscall6
256 
257 #define _syscall0(type,name)		\
258 static type name (void)			\
259 {					\
260 	return syscall(__NR_##name);	\
261 }
262 
263 #define _syscall1(type,name,type1,arg1)		\
264 static type name (type1 arg1)			\
265 {						\
266 	return syscall(__NR_##name, arg1);	\
267 }
268 
269 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
270 static type name (type1 arg1,type2 arg2)		\
271 {							\
272 	return syscall(__NR_##name, arg1, arg2);	\
273 }
274 
275 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
276 static type name (type1 arg1,type2 arg2,type3 arg3)		\
277 {								\
278 	return syscall(__NR_##name, arg1, arg2, arg3);		\
279 }
280 
281 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
282 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
283 {										\
284 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
285 }
286 
287 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
288 		  type5,arg5)							\
289 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
290 {										\
291 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
292 }
293 
294 
295 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
296 		  type5,arg5,type6,arg6)					\
297 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
298                   type6 arg6)							\
299 {										\
300 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
301 }
302 
303 
304 #define __NR_sys_uname __NR_uname
305 #define __NR_sys_getcwd1 __NR_getcwd
306 #define __NR_sys_getdents __NR_getdents
307 #define __NR_sys_getdents64 __NR_getdents64
308 #define __NR_sys_getpriority __NR_getpriority
309 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
310 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
311 #define __NR_sys_syslog __NR_syslog
312 #if defined(__NR_futex)
313 # define __NR_sys_futex __NR_futex
314 #endif
315 #if defined(__NR_futex_time64)
316 # define __NR_sys_futex_time64 __NR_futex_time64
317 #endif
318 #define __NR_sys_statx __NR_statx
319 
320 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
321 #define __NR__llseek __NR_lseek
322 #endif
323 
324 /* Newer kernel ports have llseek() instead of _llseek() */
325 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
326 #define TARGET_NR__llseek TARGET_NR_llseek
327 #endif
328 
329 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
330 #ifndef TARGET_O_NONBLOCK_MASK
331 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
332 #endif
333 
334 #define __NR_sys_gettid __NR_gettid
335 _syscall0(int, sys_gettid)
336 
337 /* For the 64-bit guest on 32-bit host case we must emulate
338  * getdents using getdents64, because otherwise the host
339  * might hand us back more dirent records than we can fit
340  * into the guest buffer after structure format conversion.
341  * Otherwise we emulate getdents with getdents if the host has it.
342  */
343 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
344 #define EMULATE_GETDENTS_WITH_GETDENTS
345 #endif
346 
347 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
348 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
349 #endif
350 #if (defined(TARGET_NR_getdents) && \
351       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
352     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
353 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
354 #endif
355 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
356 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
357           loff_t *, res, uint, wh);
358 #endif
359 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
360 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
361           siginfo_t *, uinfo)
362 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
363 #ifdef __NR_exit_group
364 _syscall1(int,exit_group,int,error_code)
365 #endif
366 #if defined(__NR_futex)
367 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
368           const struct timespec *,timeout,int *,uaddr2,int,val3)
369 #endif
370 #if defined(__NR_futex_time64)
371 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
372           const struct timespec *,timeout,int *,uaddr2,int,val3)
373 #endif
374 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
375 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
376 #endif
377 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
378 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
379                              unsigned int, flags);
380 #endif
381 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
382 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
383 #endif
384 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
385 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
386           unsigned long *, user_mask_ptr);
387 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
388 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
389           unsigned long *, user_mask_ptr);
390 /* sched_attr is not defined in glibc */
391 struct sched_attr {
392     uint32_t size;
393     uint32_t sched_policy;
394     uint64_t sched_flags;
395     int32_t sched_nice;
396     uint32_t sched_priority;
397     uint64_t sched_runtime;
398     uint64_t sched_deadline;
399     uint64_t sched_period;
400     uint32_t sched_util_min;
401     uint32_t sched_util_max;
402 };
403 #define __NR_sys_sched_getattr __NR_sched_getattr
404 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
405           unsigned int, size, unsigned int, flags);
406 #define __NR_sys_sched_setattr __NR_sched_setattr
407 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
408           unsigned int, flags);
409 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
410 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
411 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
412 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
413           const struct sched_param *, param);
414 #define __NR_sys_sched_getparam __NR_sched_getparam
415 _syscall2(int, sys_sched_getparam, pid_t, pid,
416           struct sched_param *, param);
417 #define __NR_sys_sched_setparam __NR_sched_setparam
418 _syscall2(int, sys_sched_setparam, pid_t, pid,
419           const struct sched_param *, param);
420 #define __NR_sys_getcpu __NR_getcpu
421 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
422 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
423           void *, arg);
424 _syscall2(int, capget, struct __user_cap_header_struct *, header,
425           struct __user_cap_data_struct *, data);
426 _syscall2(int, capset, struct __user_cap_header_struct *, header,
427           struct __user_cap_data_struct *, data);
428 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
429 _syscall2(int, ioprio_get, int, which, int, who)
430 #endif
431 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
432 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
433 #endif
434 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
435 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
436 #endif
437 
438 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
439 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
440           unsigned long, idx1, unsigned long, idx2)
441 #endif
442 
443 /*
444  * It is assumed that struct statx is architecture independent.
445  */
446 #if defined(TARGET_NR_statx) && defined(__NR_statx)
447 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
448           unsigned int, mask, struct target_statx *, statxbuf)
449 #endif
450 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
451 _syscall2(int, membarrier, int, cmd, int, flags)
452 #endif
453 
454 static const bitmask_transtbl fcntl_flags_tbl[] = {
455   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
456   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
457   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
458   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
459   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
460   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
461   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
462   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
463   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
464   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
465   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
466   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
467   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
468 #if defined(O_DIRECT)
469   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
470 #endif
471 #if defined(O_NOATIME)
472   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
473 #endif
474 #if defined(O_CLOEXEC)
475   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
476 #endif
477 #if defined(O_PATH)
478   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
479 #endif
480 #if defined(O_TMPFILE)
481   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
482 #endif
483   /* Don't terminate the list prematurely on 64-bit host+guest.  */
484 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
485   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
486 #endif
487   { 0, 0, 0, 0 }
488 };
489 
490 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
491 
492 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
493 #if defined(__NR_utimensat)
494 #define __NR_sys_utimensat __NR_utimensat
495 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
496           const struct timespec *,tsp,int,flags)
497 #else
498 static int sys_utimensat(int dirfd, const char *pathname,
499                          const struct timespec times[2], int flags)
500 {
501     errno = ENOSYS;
502     return -1;
503 }
504 #endif
505 #endif /* TARGET_NR_utimensat */
506 
507 #ifdef TARGET_NR_renameat2
508 #if defined(__NR_renameat2)
509 #define __NR_sys_renameat2 __NR_renameat2
510 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
511           const char *, new, unsigned int, flags)
512 #else
513 static int sys_renameat2(int oldfd, const char *old,
514                          int newfd, const char *new, int flags)
515 {
516     if (flags == 0) {
517         return renameat(oldfd, old, newfd, new);
518     }
519     errno = ENOSYS;
520     return -1;
521 }
522 #endif
523 #endif /* TARGET_NR_renameat2 */
524 
525 #ifdef CONFIG_INOTIFY
526 #include <sys/inotify.h>
527 #else
528 /* Userspace can usually survive runtime without inotify */
529 #undef TARGET_NR_inotify_init
530 #undef TARGET_NR_inotify_init1
531 #undef TARGET_NR_inotify_add_watch
532 #undef TARGET_NR_inotify_rm_watch
533 #endif /* CONFIG_INOTIFY  */
534 
535 #if defined(TARGET_NR_prlimit64)
536 #ifndef __NR_prlimit64
537 # define __NR_prlimit64 -1
538 #endif
539 #define __NR_sys_prlimit64 __NR_prlimit64
540 /* The glibc rlimit structure may not be that used by the underlying syscall */
541 struct host_rlimit64 {
542     uint64_t rlim_cur;
543     uint64_t rlim_max;
544 };
545 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
546           const struct host_rlimit64 *, new_limit,
547           struct host_rlimit64 *, old_limit)
548 #endif
549 
550 
551 #if defined(TARGET_NR_timer_create)
552 /* Maximum of 32 active POSIX timers allowed at any one time. */
553 #define GUEST_TIMER_MAX 32
554 static timer_t g_posix_timers[GUEST_TIMER_MAX];
555 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
556 
557 static inline int next_free_host_timer(void)
558 {
559     int k;
560     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
561         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
562             return k;
563         }
564     }
565     return -1;
566 }
567 
568 static inline void free_host_timer_slot(int id)
569 {
570     qatomic_store_release(g_posix_timer_allocated + id, 0);
571 }
572 #endif
573 
574 static inline int host_to_target_errno(int host_errno)
575 {
576     switch (host_errno) {
577 #define E(X)  case X: return TARGET_##X;
578 #include "errnos.c.inc"
579 #undef E
580     default:
581         return host_errno;
582     }
583 }
584 
585 static inline int target_to_host_errno(int target_errno)
586 {
587     switch (target_errno) {
588 #define E(X)  case TARGET_##X: return X;
589 #include "errnos.c.inc"
590 #undef E
591     default:
592         return target_errno;
593     }
594 }
595 
596 abi_long get_errno(abi_long ret)
597 {
598     if (ret == -1)
599         return -host_to_target_errno(errno);
600     else
601         return ret;
602 }
603 
604 const char *target_strerror(int err)
605 {
606     if (err == QEMU_ERESTARTSYS) {
607         return "To be restarted";
608     }
609     if (err == QEMU_ESIGRETURN) {
610         return "Successful exit from sigreturn";
611     }
612 
613     return strerror(target_to_host_errno(err));
614 }
615 
616 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
617 {
618     int i;
619     uint8_t b;
620     if (usize <= ksize) {
621         return 1;
622     }
623     for (i = ksize; i < usize; i++) {
624         if (get_user_u8(b, addr + i)) {
625             return -TARGET_EFAULT;
626         }
627         if (b != 0) {
628             return 0;
629         }
630     }
631     return 1;
632 }
633 
634 #define safe_syscall0(type, name) \
635 static type safe_##name(void) \
636 { \
637     return safe_syscall(__NR_##name); \
638 }
639 
640 #define safe_syscall1(type, name, type1, arg1) \
641 static type safe_##name(type1 arg1) \
642 { \
643     return safe_syscall(__NR_##name, arg1); \
644 }
645 
646 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
647 static type safe_##name(type1 arg1, type2 arg2) \
648 { \
649     return safe_syscall(__NR_##name, arg1, arg2); \
650 }
651 
652 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
653 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
654 { \
655     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
656 }
657 
658 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
659     type4, arg4) \
660 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
661 { \
662     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
663 }
664 
665 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
666     type4, arg4, type5, arg5) \
667 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
668     type5 arg5) \
669 { \
670     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
671 }
672 
673 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
674     type4, arg4, type5, arg5, type6, arg6) \
675 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
676     type5 arg5, type6 arg6) \
677 { \
678     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
679 }
680 
681 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
682 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
683 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
684               int, flags, mode_t, mode)
685 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
686 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
687               struct rusage *, rusage)
688 #endif
689 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
690               int, options, struct rusage *, rusage)
691 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
692 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
693     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
694 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
695               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
696 #endif
697 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
698 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
699               struct timespec *, tsp, const sigset_t *, sigmask,
700               size_t, sigsetsize)
701 #endif
702 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
703               int, maxevents, int, timeout, const sigset_t *, sigmask,
704               size_t, sigsetsize)
705 #if defined(__NR_futex)
706 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
707               const struct timespec *,timeout,int *,uaddr2,int,val3)
708 #endif
709 #if defined(__NR_futex_time64)
710 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
711               const struct timespec *,timeout,int *,uaddr2,int,val3)
712 #endif
713 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
714 safe_syscall2(int, kill, pid_t, pid, int, sig)
715 safe_syscall2(int, tkill, int, tid, int, sig)
716 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
717 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
718 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
719 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
720               unsigned long, pos_l, unsigned long, pos_h)
721 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
722               unsigned long, pos_l, unsigned long, pos_h)
723 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
724               socklen_t, addrlen)
725 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
726               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
727 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
728               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
729 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
730 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
731 safe_syscall2(int, flock, int, fd, int, operation)
732 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
733 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
734               const struct timespec *, uts, size_t, sigsetsize)
735 #endif
736 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
737               int, flags)
738 #if defined(TARGET_NR_nanosleep)
739 safe_syscall2(int, nanosleep, const struct timespec *, req,
740               struct timespec *, rem)
741 #endif
742 #if defined(TARGET_NR_clock_nanosleep) || \
743     defined(TARGET_NR_clock_nanosleep_time64)
744 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
745               const struct timespec *, req, struct timespec *, rem)
746 #endif
747 #ifdef __NR_ipc
748 #ifdef __s390x__
749 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
750               void *, ptr)
751 #else
752 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
753               void *, ptr, long, fifth)
754 #endif
755 #endif
756 #ifdef __NR_msgsnd
757 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
758               int, flags)
759 #endif
760 #ifdef __NR_msgrcv
761 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
762               long, msgtype, int, flags)
763 #endif
764 #ifdef __NR_semtimedop
765 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
766               unsigned, nsops, const struct timespec *, timeout)
767 #endif
768 #if defined(TARGET_NR_mq_timedsend) || \
769     defined(TARGET_NR_mq_timedsend_time64)
770 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
771               size_t, len, unsigned, prio, const struct timespec *, timeout)
772 #endif
773 #if defined(TARGET_NR_mq_timedreceive) || \
774     defined(TARGET_NR_mq_timedreceive_time64)
775 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
776               size_t, len, unsigned *, prio, const struct timespec *, timeout)
777 #endif
778 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
779 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
780               int, outfd, loff_t *, poutoff, size_t, length,
781               unsigned int, flags)
782 #endif
783 
784 /* We do ioctl like this rather than via safe_syscall3 to preserve the
785  * "third argument might be integer or pointer or not present" behaviour of
786  * the libc function.
787  */
788 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
789 /* Similarly for fcntl. Note that callers must always:
790  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
791  *  use the flock64 struct rather than unsuffixed flock
792  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
793  */
794 #ifdef __NR_fcntl64
795 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
796 #else
797 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
798 #endif
799 
800 static inline int host_to_target_sock_type(int host_type)
801 {
802     int target_type;
803 
804     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
805     case SOCK_DGRAM:
806         target_type = TARGET_SOCK_DGRAM;
807         break;
808     case SOCK_STREAM:
809         target_type = TARGET_SOCK_STREAM;
810         break;
811     default:
812         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
813         break;
814     }
815 
816 #if defined(SOCK_CLOEXEC)
817     if (host_type & SOCK_CLOEXEC) {
818         target_type |= TARGET_SOCK_CLOEXEC;
819     }
820 #endif
821 
822 #if defined(SOCK_NONBLOCK)
823     if (host_type & SOCK_NONBLOCK) {
824         target_type |= TARGET_SOCK_NONBLOCK;
825     }
826 #endif
827 
828     return target_type;
829 }
830 
831 static abi_ulong target_brk;
832 static abi_ulong target_original_brk;
833 static abi_ulong brk_page;
834 
835 void target_set_brk(abi_ulong new_brk)
836 {
837     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
838     brk_page = HOST_PAGE_ALIGN(target_brk);
839 }
840 
841 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
842 #define DEBUGF_BRK(message, args...)
843 
844 /* do_brk() must return target values and target errnos. */
845 abi_long do_brk(abi_ulong new_brk)
846 {
847     abi_long mapped_addr;
848     abi_ulong new_alloc_size;
849 
850     /* brk pointers are always untagged */
851 
852     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
853 
854     if (!new_brk) {
855         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
856         return target_brk;
857     }
858     if (new_brk < target_original_brk) {
859         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
860                    target_brk);
861         return target_brk;
862     }
863 
864     /* If the new brk is less than the highest page reserved to the
865      * target heap allocation, set it and we're almost done...  */
866     if (new_brk <= brk_page) {
867         /* Heap contents are initialized to zero, as for anonymous
868          * mapped pages.  */
869         if (new_brk > target_brk) {
870             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
871         }
872 	target_brk = new_brk;
873         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
874 	return target_brk;
875     }
876 
877     /* We need to allocate more memory after the brk... Note that
878      * we don't use MAP_FIXED because that will map over the top of
879      * any existing mapping (like the one with the host libc or qemu
880      * itself); instead we treat "mapped but at wrong address" as
881      * a failure and unmap again.
882      */
883     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
884     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
885                                         PROT_READ|PROT_WRITE,
886                                         MAP_ANON|MAP_PRIVATE, 0, 0));
887 
888     if (mapped_addr == brk_page) {
889         /* Heap contents are initialized to zero, as for anonymous
890          * mapped pages.  Technically the new pages are already
891          * initialized to zero since they *are* anonymous mapped
892          * pages, however we have to take care with the contents that
893          * come from the remaining part of the previous page: it may
894          * contains garbage data due to a previous heap usage (grown
895          * then shrunken).  */
896         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
897 
898         target_brk = new_brk;
899         brk_page = HOST_PAGE_ALIGN(target_brk);
900         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
901             target_brk);
902         return target_brk;
903     } else if (mapped_addr != -1) {
904         /* Mapped but at wrong address, meaning there wasn't actually
905          * enough space for this brk.
906          */
907         target_munmap(mapped_addr, new_alloc_size);
908         mapped_addr = -1;
909         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
910     }
911     else {
912         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
913     }
914 
915 #if defined(TARGET_ALPHA)
916     /* We (partially) emulate OSF/1 on Alpha, which requires we
917        return a proper errno, not an unchanged brk value.  */
918     return -TARGET_ENOMEM;
919 #endif
920     /* For everything else, return the previous break. */
921     return target_brk;
922 }
923 
924 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
925     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
926 static inline abi_long copy_from_user_fdset(fd_set *fds,
927                                             abi_ulong target_fds_addr,
928                                             int n)
929 {
930     int i, nw, j, k;
931     abi_ulong b, *target_fds;
932 
933     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
934     if (!(target_fds = lock_user(VERIFY_READ,
935                                  target_fds_addr,
936                                  sizeof(abi_ulong) * nw,
937                                  1)))
938         return -TARGET_EFAULT;
939 
940     FD_ZERO(fds);
941     k = 0;
942     for (i = 0; i < nw; i++) {
943         /* grab the abi_ulong */
944         __get_user(b, &target_fds[i]);
945         for (j = 0; j < TARGET_ABI_BITS; j++) {
946             /* check the bit inside the abi_ulong */
947             if ((b >> j) & 1)
948                 FD_SET(k, fds);
949             k++;
950         }
951     }
952 
953     unlock_user(target_fds, target_fds_addr, 0);
954 
955     return 0;
956 }
957 
958 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
959                                                  abi_ulong target_fds_addr,
960                                                  int n)
961 {
962     if (target_fds_addr) {
963         if (copy_from_user_fdset(fds, target_fds_addr, n))
964             return -TARGET_EFAULT;
965         *fds_ptr = fds;
966     } else {
967         *fds_ptr = NULL;
968     }
969     return 0;
970 }
971 
972 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
973                                           const fd_set *fds,
974                                           int n)
975 {
976     int i, nw, j, k;
977     abi_long v;
978     abi_ulong *target_fds;
979 
980     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
981     if (!(target_fds = lock_user(VERIFY_WRITE,
982                                  target_fds_addr,
983                                  sizeof(abi_ulong) * nw,
984                                  0)))
985         return -TARGET_EFAULT;
986 
987     k = 0;
988     for (i = 0; i < nw; i++) {
989         v = 0;
990         for (j = 0; j < TARGET_ABI_BITS; j++) {
991             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
992             k++;
993         }
994         __put_user(v, &target_fds[i]);
995     }
996 
997     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
998 
999     return 0;
1000 }
1001 #endif
1002 
1003 #if defined(__alpha__)
1004 #define HOST_HZ 1024
1005 #else
1006 #define HOST_HZ 100
1007 #endif
1008 
1009 static inline abi_long host_to_target_clock_t(long ticks)
1010 {
1011 #if HOST_HZ == TARGET_HZ
1012     return ticks;
1013 #else
1014     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1015 #endif
1016 }
1017 
1018 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1019                                              const struct rusage *rusage)
1020 {
1021     struct target_rusage *target_rusage;
1022 
1023     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1024         return -TARGET_EFAULT;
1025     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1026     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1027     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1028     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1029     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1030     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1031     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1032     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1033     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1034     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1035     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1036     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1037     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1038     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1039     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1040     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1041     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1042     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1043     unlock_user_struct(target_rusage, target_addr, 1);
1044 
1045     return 0;
1046 }
1047 
1048 #ifdef TARGET_NR_setrlimit
1049 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1050 {
1051     abi_ulong target_rlim_swap;
1052     rlim_t result;
1053 
1054     target_rlim_swap = tswapal(target_rlim);
1055     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1056         return RLIM_INFINITY;
1057 
1058     result = target_rlim_swap;
1059     if (target_rlim_swap != (rlim_t)result)
1060         return RLIM_INFINITY;
1061 
1062     return result;
1063 }
1064 #endif
1065 
1066 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1067 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1068 {
1069     abi_ulong target_rlim_swap;
1070     abi_ulong result;
1071 
1072     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1073         target_rlim_swap = TARGET_RLIM_INFINITY;
1074     else
1075         target_rlim_swap = rlim;
1076     result = tswapal(target_rlim_swap);
1077 
1078     return result;
1079 }
1080 #endif
1081 
1082 static inline int target_to_host_resource(int code)
1083 {
1084     switch (code) {
1085     case TARGET_RLIMIT_AS:
1086         return RLIMIT_AS;
1087     case TARGET_RLIMIT_CORE:
1088         return RLIMIT_CORE;
1089     case TARGET_RLIMIT_CPU:
1090         return RLIMIT_CPU;
1091     case TARGET_RLIMIT_DATA:
1092         return RLIMIT_DATA;
1093     case TARGET_RLIMIT_FSIZE:
1094         return RLIMIT_FSIZE;
1095     case TARGET_RLIMIT_LOCKS:
1096         return RLIMIT_LOCKS;
1097     case TARGET_RLIMIT_MEMLOCK:
1098         return RLIMIT_MEMLOCK;
1099     case TARGET_RLIMIT_MSGQUEUE:
1100         return RLIMIT_MSGQUEUE;
1101     case TARGET_RLIMIT_NICE:
1102         return RLIMIT_NICE;
1103     case TARGET_RLIMIT_NOFILE:
1104         return RLIMIT_NOFILE;
1105     case TARGET_RLIMIT_NPROC:
1106         return RLIMIT_NPROC;
1107     case TARGET_RLIMIT_RSS:
1108         return RLIMIT_RSS;
1109     case TARGET_RLIMIT_RTPRIO:
1110         return RLIMIT_RTPRIO;
1111 #ifdef RLIMIT_RTTIME
1112     case TARGET_RLIMIT_RTTIME:
1113         return RLIMIT_RTTIME;
1114 #endif
1115     case TARGET_RLIMIT_SIGPENDING:
1116         return RLIMIT_SIGPENDING;
1117     case TARGET_RLIMIT_STACK:
1118         return RLIMIT_STACK;
1119     default:
1120         return code;
1121     }
1122 }
1123 
1124 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1125                                               abi_ulong target_tv_addr)
1126 {
1127     struct target_timeval *target_tv;
1128 
1129     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1130         return -TARGET_EFAULT;
1131     }
1132 
1133     __get_user(tv->tv_sec, &target_tv->tv_sec);
1134     __get_user(tv->tv_usec, &target_tv->tv_usec);
1135 
1136     unlock_user_struct(target_tv, target_tv_addr, 0);
1137 
1138     return 0;
1139 }
1140 
1141 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1142                                             const struct timeval *tv)
1143 {
1144     struct target_timeval *target_tv;
1145 
1146     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1147         return -TARGET_EFAULT;
1148     }
1149 
1150     __put_user(tv->tv_sec, &target_tv->tv_sec);
1151     __put_user(tv->tv_usec, &target_tv->tv_usec);
1152 
1153     unlock_user_struct(target_tv, target_tv_addr, 1);
1154 
1155     return 0;
1156 }
1157 
1158 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1159 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1160                                                 abi_ulong target_tv_addr)
1161 {
1162     struct target__kernel_sock_timeval *target_tv;
1163 
1164     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1165         return -TARGET_EFAULT;
1166     }
1167 
1168     __get_user(tv->tv_sec, &target_tv->tv_sec);
1169     __get_user(tv->tv_usec, &target_tv->tv_usec);
1170 
1171     unlock_user_struct(target_tv, target_tv_addr, 0);
1172 
1173     return 0;
1174 }
1175 #endif
1176 
1177 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1178                                               const struct timeval *tv)
1179 {
1180     struct target__kernel_sock_timeval *target_tv;
1181 
1182     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1183         return -TARGET_EFAULT;
1184     }
1185 
1186     __put_user(tv->tv_sec, &target_tv->tv_sec);
1187     __put_user(tv->tv_usec, &target_tv->tv_usec);
1188 
1189     unlock_user_struct(target_tv, target_tv_addr, 1);
1190 
1191     return 0;
1192 }
1193 
1194 #if defined(TARGET_NR_futex) || \
1195     defined(TARGET_NR_rt_sigtimedwait) || \
1196     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1197     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1198     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1199     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1200     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1201     defined(TARGET_NR_timer_settime) || \
1202     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1203 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1204                                                abi_ulong target_addr)
1205 {
1206     struct target_timespec *target_ts;
1207 
1208     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1209         return -TARGET_EFAULT;
1210     }
1211     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1212     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1213     unlock_user_struct(target_ts, target_addr, 0);
1214     return 0;
1215 }
1216 #endif
1217 
1218 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1219     defined(TARGET_NR_timer_settime64) || \
1220     defined(TARGET_NR_mq_timedsend_time64) || \
1221     defined(TARGET_NR_mq_timedreceive_time64) || \
1222     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1223     defined(TARGET_NR_clock_nanosleep_time64) || \
1224     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1225     defined(TARGET_NR_utimensat) || \
1226     defined(TARGET_NR_utimensat_time64) || \
1227     defined(TARGET_NR_semtimedop_time64) || \
1228     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1229 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1230                                                  abi_ulong target_addr)
1231 {
1232     struct target__kernel_timespec *target_ts;
1233 
1234     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1235         return -TARGET_EFAULT;
1236     }
1237     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1238     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1239     /* in 32bit mode, this drops the padding */
1240     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1241     unlock_user_struct(target_ts, target_addr, 0);
1242     return 0;
1243 }
1244 #endif
1245 
1246 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1247                                                struct timespec *host_ts)
1248 {
1249     struct target_timespec *target_ts;
1250 
1251     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1252         return -TARGET_EFAULT;
1253     }
1254     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1255     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1256     unlock_user_struct(target_ts, target_addr, 1);
1257     return 0;
1258 }
1259 
1260 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1261                                                  struct timespec *host_ts)
1262 {
1263     struct target__kernel_timespec *target_ts;
1264 
1265     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1266         return -TARGET_EFAULT;
1267     }
1268     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1269     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1270     unlock_user_struct(target_ts, target_addr, 1);
1271     return 0;
1272 }
1273 
1274 #if defined(TARGET_NR_gettimeofday)
1275 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1276                                              struct timezone *tz)
1277 {
1278     struct target_timezone *target_tz;
1279 
1280     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1281         return -TARGET_EFAULT;
1282     }
1283 
1284     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1285     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1286 
1287     unlock_user_struct(target_tz, target_tz_addr, 1);
1288 
1289     return 0;
1290 }
1291 #endif
1292 
1293 #if defined(TARGET_NR_settimeofday)
1294 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1295                                                abi_ulong target_tz_addr)
1296 {
1297     struct target_timezone *target_tz;
1298 
1299     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1300         return -TARGET_EFAULT;
1301     }
1302 
1303     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1304     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1305 
1306     unlock_user_struct(target_tz, target_tz_addr, 0);
1307 
1308     return 0;
1309 }
1310 #endif
1311 
1312 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1313 #include <mqueue.h>
1314 
1315 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1316                                               abi_ulong target_mq_attr_addr)
1317 {
1318     struct target_mq_attr *target_mq_attr;
1319 
1320     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1321                           target_mq_attr_addr, 1))
1322         return -TARGET_EFAULT;
1323 
1324     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1325     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1326     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1327     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1328 
1329     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1330 
1331     return 0;
1332 }
1333 
1334 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1335                                             const struct mq_attr *attr)
1336 {
1337     struct target_mq_attr *target_mq_attr;
1338 
1339     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1340                           target_mq_attr_addr, 0))
1341         return -TARGET_EFAULT;
1342 
1343     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1344     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1345     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1346     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1347 
1348     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1349 
1350     return 0;
1351 }
1352 #endif
1353 
1354 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1355 /* do_select() must return target values and target errnos. */
1356 static abi_long do_select(int n,
1357                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1358                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1359 {
1360     fd_set rfds, wfds, efds;
1361     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1362     struct timeval tv;
1363     struct timespec ts, *ts_ptr;
1364     abi_long ret;
1365 
1366     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1367     if (ret) {
1368         return ret;
1369     }
1370     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1371     if (ret) {
1372         return ret;
1373     }
1374     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1375     if (ret) {
1376         return ret;
1377     }
1378 
1379     if (target_tv_addr) {
1380         if (copy_from_user_timeval(&tv, target_tv_addr))
1381             return -TARGET_EFAULT;
1382         ts.tv_sec = tv.tv_sec;
1383         ts.tv_nsec = tv.tv_usec * 1000;
1384         ts_ptr = &ts;
1385     } else {
1386         ts_ptr = NULL;
1387     }
1388 
1389     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1390                                   ts_ptr, NULL));
1391 
1392     if (!is_error(ret)) {
1393         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1394             return -TARGET_EFAULT;
1395         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1396             return -TARGET_EFAULT;
1397         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1398             return -TARGET_EFAULT;
1399 
1400         if (target_tv_addr) {
1401             tv.tv_sec = ts.tv_sec;
1402             tv.tv_usec = ts.tv_nsec / 1000;
1403             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1404                 return -TARGET_EFAULT;
1405             }
1406         }
1407     }
1408 
1409     return ret;
1410 }
1411 
1412 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1413 static abi_long do_old_select(abi_ulong arg1)
1414 {
1415     struct target_sel_arg_struct *sel;
1416     abi_ulong inp, outp, exp, tvp;
1417     long nsel;
1418 
1419     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1420         return -TARGET_EFAULT;
1421     }
1422 
1423     nsel = tswapal(sel->n);
1424     inp = tswapal(sel->inp);
1425     outp = tswapal(sel->outp);
1426     exp = tswapal(sel->exp);
1427     tvp = tswapal(sel->tvp);
1428 
1429     unlock_user_struct(sel, arg1, 0);
1430 
1431     return do_select(nsel, inp, outp, exp, tvp);
1432 }
1433 #endif
1434 #endif
1435 
1436 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1437 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1438                             abi_long arg4, abi_long arg5, abi_long arg6,
1439                             bool time64)
1440 {
1441     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1442     fd_set rfds, wfds, efds;
1443     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1444     struct timespec ts, *ts_ptr;
1445     abi_long ret;
1446 
1447     /*
1448      * The 6th arg is actually two args smashed together,
1449      * so we cannot use the C library.
1450      */
1451     struct {
1452         sigset_t *set;
1453         size_t size;
1454     } sig, *sig_ptr;
1455 
1456     abi_ulong arg_sigset, arg_sigsize, *arg7;
1457 
1458     n = arg1;
1459     rfd_addr = arg2;
1460     wfd_addr = arg3;
1461     efd_addr = arg4;
1462     ts_addr = arg5;
1463 
1464     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1465     if (ret) {
1466         return ret;
1467     }
1468     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1469     if (ret) {
1470         return ret;
1471     }
1472     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1473     if (ret) {
1474         return ret;
1475     }
1476 
1477     /*
1478      * This takes a timespec, and not a timeval, so we cannot
1479      * use the do_select() helper ...
1480      */
1481     if (ts_addr) {
1482         if (time64) {
1483             if (target_to_host_timespec64(&ts, ts_addr)) {
1484                 return -TARGET_EFAULT;
1485             }
1486         } else {
1487             if (target_to_host_timespec(&ts, ts_addr)) {
1488                 return -TARGET_EFAULT;
1489             }
1490         }
1491             ts_ptr = &ts;
1492     } else {
1493         ts_ptr = NULL;
1494     }
1495 
1496     /* Extract the two packed args for the sigset */
1497     sig_ptr = NULL;
1498     if (arg6) {
1499         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1500         if (!arg7) {
1501             return -TARGET_EFAULT;
1502         }
1503         arg_sigset = tswapal(arg7[0]);
1504         arg_sigsize = tswapal(arg7[1]);
1505         unlock_user(arg7, arg6, 0);
1506 
1507         if (arg_sigset) {
1508             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1509             if (ret != 0) {
1510                 return ret;
1511             }
1512             sig_ptr = &sig;
1513             sig.size = SIGSET_T_SIZE;
1514         }
1515     }
1516 
1517     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1518                                   ts_ptr, sig_ptr));
1519 
1520     if (sig_ptr) {
1521         finish_sigsuspend_mask(ret);
1522     }
1523 
1524     if (!is_error(ret)) {
1525         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1526             return -TARGET_EFAULT;
1527         }
1528         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1529             return -TARGET_EFAULT;
1530         }
1531         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1532             return -TARGET_EFAULT;
1533         }
1534         if (time64) {
1535             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1536                 return -TARGET_EFAULT;
1537             }
1538         } else {
1539             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1540                 return -TARGET_EFAULT;
1541             }
1542         }
1543     }
1544     return ret;
1545 }
1546 #endif
1547 
1548 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1549     defined(TARGET_NR_ppoll_time64)
1550 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1551                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1552 {
1553     struct target_pollfd *target_pfd;
1554     unsigned int nfds = arg2;
1555     struct pollfd *pfd;
1556     unsigned int i;
1557     abi_long ret;
1558 
1559     pfd = NULL;
1560     target_pfd = NULL;
1561     if (nfds) {
1562         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1563             return -TARGET_EINVAL;
1564         }
1565         target_pfd = lock_user(VERIFY_WRITE, arg1,
1566                                sizeof(struct target_pollfd) * nfds, 1);
1567         if (!target_pfd) {
1568             return -TARGET_EFAULT;
1569         }
1570 
1571         pfd = alloca(sizeof(struct pollfd) * nfds);
1572         for (i = 0; i < nfds; i++) {
1573             pfd[i].fd = tswap32(target_pfd[i].fd);
1574             pfd[i].events = tswap16(target_pfd[i].events);
1575         }
1576     }
1577     if (ppoll) {
1578         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1579         sigset_t *set = NULL;
1580 
1581         if (arg3) {
1582             if (time64) {
1583                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1584                     unlock_user(target_pfd, arg1, 0);
1585                     return -TARGET_EFAULT;
1586                 }
1587             } else {
1588                 if (target_to_host_timespec(timeout_ts, arg3)) {
1589                     unlock_user(target_pfd, arg1, 0);
1590                     return -TARGET_EFAULT;
1591                 }
1592             }
1593         } else {
1594             timeout_ts = NULL;
1595         }
1596 
1597         if (arg4) {
1598             ret = process_sigsuspend_mask(&set, arg4, arg5);
1599             if (ret != 0) {
1600                 unlock_user(target_pfd, arg1, 0);
1601                 return ret;
1602             }
1603         }
1604 
1605         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1606                                    set, SIGSET_T_SIZE));
1607 
1608         if (set) {
1609             finish_sigsuspend_mask(ret);
1610         }
1611         if (!is_error(ret) && arg3) {
1612             if (time64) {
1613                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1614                     return -TARGET_EFAULT;
1615                 }
1616             } else {
1617                 if (host_to_target_timespec(arg3, timeout_ts)) {
1618                     return -TARGET_EFAULT;
1619                 }
1620             }
1621         }
1622     } else {
1623           struct timespec ts, *pts;
1624 
1625           if (arg3 >= 0) {
1626               /* Convert ms to secs, ns */
1627               ts.tv_sec = arg3 / 1000;
1628               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1629               pts = &ts;
1630           } else {
1631               /* -ve poll() timeout means "infinite" */
1632               pts = NULL;
1633           }
1634           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1635     }
1636 
1637     if (!is_error(ret)) {
1638         for (i = 0; i < nfds; i++) {
1639             target_pfd[i].revents = tswap16(pfd[i].revents);
1640         }
1641     }
1642     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1643     return ret;
1644 }
1645 #endif
1646 
1647 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1648                         int flags, int is_pipe2)
1649 {
1650     int host_pipe[2];
1651     abi_long ret;
1652     ret = pipe2(host_pipe, flags);
1653 
1654     if (is_error(ret))
1655         return get_errno(ret);
1656 
1657     /* Several targets have special calling conventions for the original
1658        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1659     if (!is_pipe2) {
1660 #if defined(TARGET_ALPHA)
1661         cpu_env->ir[IR_A4] = host_pipe[1];
1662         return host_pipe[0];
1663 #elif defined(TARGET_MIPS)
1664         cpu_env->active_tc.gpr[3] = host_pipe[1];
1665         return host_pipe[0];
1666 #elif defined(TARGET_SH4)
1667         cpu_env->gregs[1] = host_pipe[1];
1668         return host_pipe[0];
1669 #elif defined(TARGET_SPARC)
1670         cpu_env->regwptr[1] = host_pipe[1];
1671         return host_pipe[0];
1672 #endif
1673     }
1674 
1675     if (put_user_s32(host_pipe[0], pipedes)
1676         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1677         return -TARGET_EFAULT;
1678     return get_errno(ret);
1679 }
1680 
1681 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1682                                               abi_ulong target_addr,
1683                                               socklen_t len)
1684 {
1685     struct target_ip_mreqn *target_smreqn;
1686 
1687     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1688     if (!target_smreqn)
1689         return -TARGET_EFAULT;
1690     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1691     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1692     if (len == sizeof(struct target_ip_mreqn))
1693         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1694     unlock_user(target_smreqn, target_addr, 0);
1695 
1696     return 0;
1697 }
1698 
1699 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1700                                                abi_ulong target_addr,
1701                                                socklen_t len)
1702 {
1703     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1704     sa_family_t sa_family;
1705     struct target_sockaddr *target_saddr;
1706 
1707     if (fd_trans_target_to_host_addr(fd)) {
1708         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1709     }
1710 
1711     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1712     if (!target_saddr)
1713         return -TARGET_EFAULT;
1714 
1715     sa_family = tswap16(target_saddr->sa_family);
1716 
1717     /* Oops. The caller might send a incomplete sun_path; sun_path
1718      * must be terminated by \0 (see the manual page), but
1719      * unfortunately it is quite common to specify sockaddr_un
1720      * length as "strlen(x->sun_path)" while it should be
1721      * "strlen(...) + 1". We'll fix that here if needed.
1722      * Linux kernel has a similar feature.
1723      */
1724 
1725     if (sa_family == AF_UNIX) {
1726         if (len < unix_maxlen && len > 0) {
1727             char *cp = (char*)target_saddr;
1728 
1729             if ( cp[len-1] && !cp[len] )
1730                 len++;
1731         }
1732         if (len > unix_maxlen)
1733             len = unix_maxlen;
1734     }
1735 
1736     memcpy(addr, target_saddr, len);
1737     addr->sa_family = sa_family;
1738     if (sa_family == AF_NETLINK) {
1739         struct sockaddr_nl *nladdr;
1740 
1741         nladdr = (struct sockaddr_nl *)addr;
1742         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1743         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1744     } else if (sa_family == AF_PACKET) {
1745 	struct target_sockaddr_ll *lladdr;
1746 
1747 	lladdr = (struct target_sockaddr_ll *)addr;
1748 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1749 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1750     }
1751     unlock_user(target_saddr, target_addr, 0);
1752 
1753     return 0;
1754 }
1755 
1756 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1757                                                struct sockaddr *addr,
1758                                                socklen_t len)
1759 {
1760     struct target_sockaddr *target_saddr;
1761 
1762     if (len == 0) {
1763         return 0;
1764     }
1765     assert(addr);
1766 
1767     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1768     if (!target_saddr)
1769         return -TARGET_EFAULT;
1770     memcpy(target_saddr, addr, len);
1771     if (len >= offsetof(struct target_sockaddr, sa_family) +
1772         sizeof(target_saddr->sa_family)) {
1773         target_saddr->sa_family = tswap16(addr->sa_family);
1774     }
1775     if (addr->sa_family == AF_NETLINK &&
1776         len >= sizeof(struct target_sockaddr_nl)) {
1777         struct target_sockaddr_nl *target_nl =
1778                (struct target_sockaddr_nl *)target_saddr;
1779         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1780         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1781     } else if (addr->sa_family == AF_PACKET) {
1782         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1783         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1784         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1785     } else if (addr->sa_family == AF_INET6 &&
1786                len >= sizeof(struct target_sockaddr_in6)) {
1787         struct target_sockaddr_in6 *target_in6 =
1788                (struct target_sockaddr_in6 *)target_saddr;
1789         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1790     }
1791     unlock_user(target_saddr, target_addr, len);
1792 
1793     return 0;
1794 }
1795 
1796 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1797                                            struct target_msghdr *target_msgh)
1798 {
1799     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1800     abi_long msg_controllen;
1801     abi_ulong target_cmsg_addr;
1802     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1803     socklen_t space = 0;
1804 
1805     msg_controllen = tswapal(target_msgh->msg_controllen);
1806     if (msg_controllen < sizeof (struct target_cmsghdr))
1807         goto the_end;
1808     target_cmsg_addr = tswapal(target_msgh->msg_control);
1809     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1810     target_cmsg_start = target_cmsg;
1811     if (!target_cmsg)
1812         return -TARGET_EFAULT;
1813 
1814     while (cmsg && target_cmsg) {
1815         void *data = CMSG_DATA(cmsg);
1816         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1817 
1818         int len = tswapal(target_cmsg->cmsg_len)
1819             - sizeof(struct target_cmsghdr);
1820 
1821         space += CMSG_SPACE(len);
1822         if (space > msgh->msg_controllen) {
1823             space -= CMSG_SPACE(len);
1824             /* This is a QEMU bug, since we allocated the payload
1825              * area ourselves (unlike overflow in host-to-target
1826              * conversion, which is just the guest giving us a buffer
1827              * that's too small). It can't happen for the payload types
1828              * we currently support; if it becomes an issue in future
1829              * we would need to improve our allocation strategy to
1830              * something more intelligent than "twice the size of the
1831              * target buffer we're reading from".
1832              */
1833             qemu_log_mask(LOG_UNIMP,
1834                           ("Unsupported ancillary data %d/%d: "
1835                            "unhandled msg size\n"),
1836                           tswap32(target_cmsg->cmsg_level),
1837                           tswap32(target_cmsg->cmsg_type));
1838             break;
1839         }
1840 
1841         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1842             cmsg->cmsg_level = SOL_SOCKET;
1843         } else {
1844             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1845         }
1846         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1847         cmsg->cmsg_len = CMSG_LEN(len);
1848 
1849         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1850             int *fd = (int *)data;
1851             int *target_fd = (int *)target_data;
1852             int i, numfds = len / sizeof(int);
1853 
1854             for (i = 0; i < numfds; i++) {
1855                 __get_user(fd[i], target_fd + i);
1856             }
1857         } else if (cmsg->cmsg_level == SOL_SOCKET
1858                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1859             struct ucred *cred = (struct ucred *)data;
1860             struct target_ucred *target_cred =
1861                 (struct target_ucred *)target_data;
1862 
1863             __get_user(cred->pid, &target_cred->pid);
1864             __get_user(cred->uid, &target_cred->uid);
1865             __get_user(cred->gid, &target_cred->gid);
1866         } else {
1867             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1868                           cmsg->cmsg_level, cmsg->cmsg_type);
1869             memcpy(data, target_data, len);
1870         }
1871 
1872         cmsg = CMSG_NXTHDR(msgh, cmsg);
1873         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1874                                          target_cmsg_start);
1875     }
1876     unlock_user(target_cmsg, target_cmsg_addr, 0);
1877  the_end:
1878     msgh->msg_controllen = space;
1879     return 0;
1880 }
1881 
1882 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1883                                            struct msghdr *msgh)
1884 {
1885     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1886     abi_long msg_controllen;
1887     abi_ulong target_cmsg_addr;
1888     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1889     socklen_t space = 0;
1890 
1891     msg_controllen = tswapal(target_msgh->msg_controllen);
1892     if (msg_controllen < sizeof (struct target_cmsghdr))
1893         goto the_end;
1894     target_cmsg_addr = tswapal(target_msgh->msg_control);
1895     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1896     target_cmsg_start = target_cmsg;
1897     if (!target_cmsg)
1898         return -TARGET_EFAULT;
1899 
1900     while (cmsg && target_cmsg) {
1901         void *data = CMSG_DATA(cmsg);
1902         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1903 
1904         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1905         int tgt_len, tgt_space;
1906 
1907         /* We never copy a half-header but may copy half-data;
1908          * this is Linux's behaviour in put_cmsg(). Note that
1909          * truncation here is a guest problem (which we report
1910          * to the guest via the CTRUNC bit), unlike truncation
1911          * in target_to_host_cmsg, which is a QEMU bug.
1912          */
1913         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1914             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1915             break;
1916         }
1917 
1918         if (cmsg->cmsg_level == SOL_SOCKET) {
1919             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1920         } else {
1921             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1922         }
1923         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1924 
1925         /* Payload types which need a different size of payload on
1926          * the target must adjust tgt_len here.
1927          */
1928         tgt_len = len;
1929         switch (cmsg->cmsg_level) {
1930         case SOL_SOCKET:
1931             switch (cmsg->cmsg_type) {
1932             case SO_TIMESTAMP:
1933                 tgt_len = sizeof(struct target_timeval);
1934                 break;
1935             default:
1936                 break;
1937             }
1938             break;
1939         default:
1940             break;
1941         }
1942 
1943         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1944             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1945             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1946         }
1947 
1948         /* We must now copy-and-convert len bytes of payload
1949          * into tgt_len bytes of destination space. Bear in mind
1950          * that in both source and destination we may be dealing
1951          * with a truncated value!
1952          */
1953         switch (cmsg->cmsg_level) {
1954         case SOL_SOCKET:
1955             switch (cmsg->cmsg_type) {
1956             case SCM_RIGHTS:
1957             {
1958                 int *fd = (int *)data;
1959                 int *target_fd = (int *)target_data;
1960                 int i, numfds = tgt_len / sizeof(int);
1961 
1962                 for (i = 0; i < numfds; i++) {
1963                     __put_user(fd[i], target_fd + i);
1964                 }
1965                 break;
1966             }
1967             case SO_TIMESTAMP:
1968             {
1969                 struct timeval *tv = (struct timeval *)data;
1970                 struct target_timeval *target_tv =
1971                     (struct target_timeval *)target_data;
1972 
1973                 if (len != sizeof(struct timeval) ||
1974                     tgt_len != sizeof(struct target_timeval)) {
1975                     goto unimplemented;
1976                 }
1977 
1978                 /* copy struct timeval to target */
1979                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1980                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1981                 break;
1982             }
1983             case SCM_CREDENTIALS:
1984             {
1985                 struct ucred *cred = (struct ucred *)data;
1986                 struct target_ucred *target_cred =
1987                     (struct target_ucred *)target_data;
1988 
1989                 __put_user(cred->pid, &target_cred->pid);
1990                 __put_user(cred->uid, &target_cred->uid);
1991                 __put_user(cred->gid, &target_cred->gid);
1992                 break;
1993             }
1994             default:
1995                 goto unimplemented;
1996             }
1997             break;
1998 
1999         case SOL_IP:
2000             switch (cmsg->cmsg_type) {
2001             case IP_TTL:
2002             {
2003                 uint32_t *v = (uint32_t *)data;
2004                 uint32_t *t_int = (uint32_t *)target_data;
2005 
2006                 if (len != sizeof(uint32_t) ||
2007                     tgt_len != sizeof(uint32_t)) {
2008                     goto unimplemented;
2009                 }
2010                 __put_user(*v, t_int);
2011                 break;
2012             }
2013             case IP_RECVERR:
2014             {
2015                 struct errhdr_t {
2016                    struct sock_extended_err ee;
2017                    struct sockaddr_in offender;
2018                 };
2019                 struct errhdr_t *errh = (struct errhdr_t *)data;
2020                 struct errhdr_t *target_errh =
2021                     (struct errhdr_t *)target_data;
2022 
2023                 if (len != sizeof(struct errhdr_t) ||
2024                     tgt_len != sizeof(struct errhdr_t)) {
2025                     goto unimplemented;
2026                 }
2027                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2028                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2029                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2030                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2031                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2032                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2033                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2034                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2035                     (void *) &errh->offender, sizeof(errh->offender));
2036                 break;
2037             }
2038             default:
2039                 goto unimplemented;
2040             }
2041             break;
2042 
2043         case SOL_IPV6:
2044             switch (cmsg->cmsg_type) {
2045             case IPV6_HOPLIMIT:
2046             {
2047                 uint32_t *v = (uint32_t *)data;
2048                 uint32_t *t_int = (uint32_t *)target_data;
2049 
2050                 if (len != sizeof(uint32_t) ||
2051                     tgt_len != sizeof(uint32_t)) {
2052                     goto unimplemented;
2053                 }
2054                 __put_user(*v, t_int);
2055                 break;
2056             }
2057             case IPV6_RECVERR:
2058             {
2059                 struct errhdr6_t {
2060                    struct sock_extended_err ee;
2061                    struct sockaddr_in6 offender;
2062                 };
2063                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2064                 struct errhdr6_t *target_errh =
2065                     (struct errhdr6_t *)target_data;
2066 
2067                 if (len != sizeof(struct errhdr6_t) ||
2068                     tgt_len != sizeof(struct errhdr6_t)) {
2069                     goto unimplemented;
2070                 }
2071                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2072                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2073                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2074                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2075                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2076                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2077                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2078                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2079                     (void *) &errh->offender, sizeof(errh->offender));
2080                 break;
2081             }
2082             default:
2083                 goto unimplemented;
2084             }
2085             break;
2086 
2087         default:
2088         unimplemented:
2089             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2090                           cmsg->cmsg_level, cmsg->cmsg_type);
2091             memcpy(target_data, data, MIN(len, tgt_len));
2092             if (tgt_len > len) {
2093                 memset(target_data + len, 0, tgt_len - len);
2094             }
2095         }
2096 
2097         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2098         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2099         if (msg_controllen < tgt_space) {
2100             tgt_space = msg_controllen;
2101         }
2102         msg_controllen -= tgt_space;
2103         space += tgt_space;
2104         cmsg = CMSG_NXTHDR(msgh, cmsg);
2105         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2106                                          target_cmsg_start);
2107     }
2108     unlock_user(target_cmsg, target_cmsg_addr, space);
2109  the_end:
2110     target_msgh->msg_controllen = tswapal(space);
2111     return 0;
2112 }
2113 
2114 /* do_setsockopt() Must return target values and target errnos. */
2115 static abi_long do_setsockopt(int sockfd, int level, int optname,
2116                               abi_ulong optval_addr, socklen_t optlen)
2117 {
2118     abi_long ret;
2119     int val;
2120     struct ip_mreqn *ip_mreq;
2121     struct ip_mreq_source *ip_mreq_source;
2122 
2123     switch(level) {
2124     case SOL_TCP:
2125     case SOL_UDP:
2126         /* TCP and UDP options all take an 'int' value.  */
2127         if (optlen < sizeof(uint32_t))
2128             return -TARGET_EINVAL;
2129 
2130         if (get_user_u32(val, optval_addr))
2131             return -TARGET_EFAULT;
2132         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2133         break;
2134     case SOL_IP:
2135         switch(optname) {
2136         case IP_TOS:
2137         case IP_TTL:
2138         case IP_HDRINCL:
2139         case IP_ROUTER_ALERT:
2140         case IP_RECVOPTS:
2141         case IP_RETOPTS:
2142         case IP_PKTINFO:
2143         case IP_MTU_DISCOVER:
2144         case IP_RECVERR:
2145         case IP_RECVTTL:
2146         case IP_RECVTOS:
2147 #ifdef IP_FREEBIND
2148         case IP_FREEBIND:
2149 #endif
2150         case IP_MULTICAST_TTL:
2151         case IP_MULTICAST_LOOP:
2152             val = 0;
2153             if (optlen >= sizeof(uint32_t)) {
2154                 if (get_user_u32(val, optval_addr))
2155                     return -TARGET_EFAULT;
2156             } else if (optlen >= 1) {
2157                 if (get_user_u8(val, optval_addr))
2158                     return -TARGET_EFAULT;
2159             }
2160             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2161             break;
2162         case IP_ADD_MEMBERSHIP:
2163         case IP_DROP_MEMBERSHIP:
2164             if (optlen < sizeof (struct target_ip_mreq) ||
2165                 optlen > sizeof (struct target_ip_mreqn))
2166                 return -TARGET_EINVAL;
2167 
2168             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2169             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2170             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2171             break;
2172 
2173         case IP_BLOCK_SOURCE:
2174         case IP_UNBLOCK_SOURCE:
2175         case IP_ADD_SOURCE_MEMBERSHIP:
2176         case IP_DROP_SOURCE_MEMBERSHIP:
2177             if (optlen != sizeof (struct target_ip_mreq_source))
2178                 return -TARGET_EINVAL;
2179 
2180             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2181             if (!ip_mreq_source) {
2182                 return -TARGET_EFAULT;
2183             }
2184             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2185             unlock_user (ip_mreq_source, optval_addr, 0);
2186             break;
2187 
2188         default:
2189             goto unimplemented;
2190         }
2191         break;
2192     case SOL_IPV6:
2193         switch (optname) {
2194         case IPV6_MTU_DISCOVER:
2195         case IPV6_MTU:
2196         case IPV6_V6ONLY:
2197         case IPV6_RECVPKTINFO:
2198         case IPV6_UNICAST_HOPS:
2199         case IPV6_MULTICAST_HOPS:
2200         case IPV6_MULTICAST_LOOP:
2201         case IPV6_RECVERR:
2202         case IPV6_RECVHOPLIMIT:
2203         case IPV6_2292HOPLIMIT:
2204         case IPV6_CHECKSUM:
2205         case IPV6_ADDRFORM:
2206         case IPV6_2292PKTINFO:
2207         case IPV6_RECVTCLASS:
2208         case IPV6_RECVRTHDR:
2209         case IPV6_2292RTHDR:
2210         case IPV6_RECVHOPOPTS:
2211         case IPV6_2292HOPOPTS:
2212         case IPV6_RECVDSTOPTS:
2213         case IPV6_2292DSTOPTS:
2214         case IPV6_TCLASS:
2215         case IPV6_ADDR_PREFERENCES:
2216 #ifdef IPV6_RECVPATHMTU
2217         case IPV6_RECVPATHMTU:
2218 #endif
2219 #ifdef IPV6_TRANSPARENT
2220         case IPV6_TRANSPARENT:
2221 #endif
2222 #ifdef IPV6_FREEBIND
2223         case IPV6_FREEBIND:
2224 #endif
2225 #ifdef IPV6_RECVORIGDSTADDR
2226         case IPV6_RECVORIGDSTADDR:
2227 #endif
2228             val = 0;
2229             if (optlen < sizeof(uint32_t)) {
2230                 return -TARGET_EINVAL;
2231             }
2232             if (get_user_u32(val, optval_addr)) {
2233                 return -TARGET_EFAULT;
2234             }
2235             ret = get_errno(setsockopt(sockfd, level, optname,
2236                                        &val, sizeof(val)));
2237             break;
2238         case IPV6_PKTINFO:
2239         {
2240             struct in6_pktinfo pki;
2241 
2242             if (optlen < sizeof(pki)) {
2243                 return -TARGET_EINVAL;
2244             }
2245 
2246             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2247                 return -TARGET_EFAULT;
2248             }
2249 
2250             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2251 
2252             ret = get_errno(setsockopt(sockfd, level, optname,
2253                                        &pki, sizeof(pki)));
2254             break;
2255         }
2256         case IPV6_ADD_MEMBERSHIP:
2257         case IPV6_DROP_MEMBERSHIP:
2258         {
2259             struct ipv6_mreq ipv6mreq;
2260 
2261             if (optlen < sizeof(ipv6mreq)) {
2262                 return -TARGET_EINVAL;
2263             }
2264 
2265             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2266                 return -TARGET_EFAULT;
2267             }
2268 
2269             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2270 
2271             ret = get_errno(setsockopt(sockfd, level, optname,
2272                                        &ipv6mreq, sizeof(ipv6mreq)));
2273             break;
2274         }
2275         default:
2276             goto unimplemented;
2277         }
2278         break;
2279     case SOL_ICMPV6:
2280         switch (optname) {
2281         case ICMPV6_FILTER:
2282         {
2283             struct icmp6_filter icmp6f;
2284 
2285             if (optlen > sizeof(icmp6f)) {
2286                 optlen = sizeof(icmp6f);
2287             }
2288 
2289             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2290                 return -TARGET_EFAULT;
2291             }
2292 
2293             for (val = 0; val < 8; val++) {
2294                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2295             }
2296 
2297             ret = get_errno(setsockopt(sockfd, level, optname,
2298                                        &icmp6f, optlen));
2299             break;
2300         }
2301         default:
2302             goto unimplemented;
2303         }
2304         break;
2305     case SOL_RAW:
2306         switch (optname) {
2307         case ICMP_FILTER:
2308         case IPV6_CHECKSUM:
2309             /* those take an u32 value */
2310             if (optlen < sizeof(uint32_t)) {
2311                 return -TARGET_EINVAL;
2312             }
2313 
2314             if (get_user_u32(val, optval_addr)) {
2315                 return -TARGET_EFAULT;
2316             }
2317             ret = get_errno(setsockopt(sockfd, level, optname,
2318                                        &val, sizeof(val)));
2319             break;
2320 
2321         default:
2322             goto unimplemented;
2323         }
2324         break;
2325 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2326     case SOL_ALG:
2327         switch (optname) {
2328         case ALG_SET_KEY:
2329         {
2330             char *alg_key = g_malloc(optlen);
2331 
2332             if (!alg_key) {
2333                 return -TARGET_ENOMEM;
2334             }
2335             if (copy_from_user(alg_key, optval_addr, optlen)) {
2336                 g_free(alg_key);
2337                 return -TARGET_EFAULT;
2338             }
2339             ret = get_errno(setsockopt(sockfd, level, optname,
2340                                        alg_key, optlen));
2341             g_free(alg_key);
2342             break;
2343         }
2344         case ALG_SET_AEAD_AUTHSIZE:
2345         {
2346             ret = get_errno(setsockopt(sockfd, level, optname,
2347                                        NULL, optlen));
2348             break;
2349         }
2350         default:
2351             goto unimplemented;
2352         }
2353         break;
2354 #endif
2355     case TARGET_SOL_SOCKET:
2356         switch (optname) {
2357         case TARGET_SO_RCVTIMEO:
2358         {
2359                 struct timeval tv;
2360 
2361                 optname = SO_RCVTIMEO;
2362 
2363 set_timeout:
2364                 if (optlen != sizeof(struct target_timeval)) {
2365                     return -TARGET_EINVAL;
2366                 }
2367 
2368                 if (copy_from_user_timeval(&tv, optval_addr)) {
2369                     return -TARGET_EFAULT;
2370                 }
2371 
2372                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2373                                 &tv, sizeof(tv)));
2374                 return ret;
2375         }
2376         case TARGET_SO_SNDTIMEO:
2377                 optname = SO_SNDTIMEO;
2378                 goto set_timeout;
2379         case TARGET_SO_ATTACH_FILTER:
2380         {
2381                 struct target_sock_fprog *tfprog;
2382                 struct target_sock_filter *tfilter;
2383                 struct sock_fprog fprog;
2384                 struct sock_filter *filter;
2385                 int i;
2386 
2387                 if (optlen != sizeof(*tfprog)) {
2388                     return -TARGET_EINVAL;
2389                 }
2390                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2391                     return -TARGET_EFAULT;
2392                 }
2393                 if (!lock_user_struct(VERIFY_READ, tfilter,
2394                                       tswapal(tfprog->filter), 0)) {
2395                     unlock_user_struct(tfprog, optval_addr, 1);
2396                     return -TARGET_EFAULT;
2397                 }
2398 
2399                 fprog.len = tswap16(tfprog->len);
2400                 filter = g_try_new(struct sock_filter, fprog.len);
2401                 if (filter == NULL) {
2402                     unlock_user_struct(tfilter, tfprog->filter, 1);
2403                     unlock_user_struct(tfprog, optval_addr, 1);
2404                     return -TARGET_ENOMEM;
2405                 }
2406                 for (i = 0; i < fprog.len; i++) {
2407                     filter[i].code = tswap16(tfilter[i].code);
2408                     filter[i].jt = tfilter[i].jt;
2409                     filter[i].jf = tfilter[i].jf;
2410                     filter[i].k = tswap32(tfilter[i].k);
2411                 }
2412                 fprog.filter = filter;
2413 
2414                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2415                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2416                 g_free(filter);
2417 
2418                 unlock_user_struct(tfilter, tfprog->filter, 1);
2419                 unlock_user_struct(tfprog, optval_addr, 1);
2420                 return ret;
2421         }
2422 	case TARGET_SO_BINDTODEVICE:
2423 	{
2424 		char *dev_ifname, *addr_ifname;
2425 
2426 		if (optlen > IFNAMSIZ - 1) {
2427 		    optlen = IFNAMSIZ - 1;
2428 		}
2429 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2430 		if (!dev_ifname) {
2431 		    return -TARGET_EFAULT;
2432 		}
2433 		optname = SO_BINDTODEVICE;
2434 		addr_ifname = alloca(IFNAMSIZ);
2435 		memcpy(addr_ifname, dev_ifname, optlen);
2436 		addr_ifname[optlen] = 0;
2437 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2438                                            addr_ifname, optlen));
2439 		unlock_user (dev_ifname, optval_addr, 0);
2440 		return ret;
2441 	}
2442         case TARGET_SO_LINGER:
2443         {
2444                 struct linger lg;
2445                 struct target_linger *tlg;
2446 
2447                 if (optlen != sizeof(struct target_linger)) {
2448                     return -TARGET_EINVAL;
2449                 }
2450                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2451                     return -TARGET_EFAULT;
2452                 }
2453                 __get_user(lg.l_onoff, &tlg->l_onoff);
2454                 __get_user(lg.l_linger, &tlg->l_linger);
2455                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2456                                 &lg, sizeof(lg)));
2457                 unlock_user_struct(tlg, optval_addr, 0);
2458                 return ret;
2459         }
2460             /* Options with 'int' argument.  */
2461         case TARGET_SO_DEBUG:
2462 		optname = SO_DEBUG;
2463 		break;
2464         case TARGET_SO_REUSEADDR:
2465 		optname = SO_REUSEADDR;
2466 		break;
2467 #ifdef SO_REUSEPORT
2468         case TARGET_SO_REUSEPORT:
2469                 optname = SO_REUSEPORT;
2470                 break;
2471 #endif
2472         case TARGET_SO_TYPE:
2473 		optname = SO_TYPE;
2474 		break;
2475         case TARGET_SO_ERROR:
2476 		optname = SO_ERROR;
2477 		break;
2478         case TARGET_SO_DONTROUTE:
2479 		optname = SO_DONTROUTE;
2480 		break;
2481         case TARGET_SO_BROADCAST:
2482 		optname = SO_BROADCAST;
2483 		break;
2484         case TARGET_SO_SNDBUF:
2485 		optname = SO_SNDBUF;
2486 		break;
2487         case TARGET_SO_SNDBUFFORCE:
2488                 optname = SO_SNDBUFFORCE;
2489                 break;
2490         case TARGET_SO_RCVBUF:
2491 		optname = SO_RCVBUF;
2492 		break;
2493         case TARGET_SO_RCVBUFFORCE:
2494                 optname = SO_RCVBUFFORCE;
2495                 break;
2496         case TARGET_SO_KEEPALIVE:
2497 		optname = SO_KEEPALIVE;
2498 		break;
2499         case TARGET_SO_OOBINLINE:
2500 		optname = SO_OOBINLINE;
2501 		break;
2502         case TARGET_SO_NO_CHECK:
2503 		optname = SO_NO_CHECK;
2504 		break;
2505         case TARGET_SO_PRIORITY:
2506 		optname = SO_PRIORITY;
2507 		break;
2508 #ifdef SO_BSDCOMPAT
2509         case TARGET_SO_BSDCOMPAT:
2510 		optname = SO_BSDCOMPAT;
2511 		break;
2512 #endif
2513         case TARGET_SO_PASSCRED:
2514 		optname = SO_PASSCRED;
2515 		break;
2516         case TARGET_SO_PASSSEC:
2517                 optname = SO_PASSSEC;
2518                 break;
2519         case TARGET_SO_TIMESTAMP:
2520 		optname = SO_TIMESTAMP;
2521 		break;
2522         case TARGET_SO_RCVLOWAT:
2523 		optname = SO_RCVLOWAT;
2524 		break;
2525         default:
2526             goto unimplemented;
2527         }
2528 	if (optlen < sizeof(uint32_t))
2529             return -TARGET_EINVAL;
2530 
2531 	if (get_user_u32(val, optval_addr))
2532             return -TARGET_EFAULT;
2533 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2534         break;
2535 #ifdef SOL_NETLINK
2536     case SOL_NETLINK:
2537         switch (optname) {
2538         case NETLINK_PKTINFO:
2539         case NETLINK_ADD_MEMBERSHIP:
2540         case NETLINK_DROP_MEMBERSHIP:
2541         case NETLINK_BROADCAST_ERROR:
2542         case NETLINK_NO_ENOBUFS:
2543 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2544         case NETLINK_LISTEN_ALL_NSID:
2545         case NETLINK_CAP_ACK:
2546 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2547 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2548         case NETLINK_EXT_ACK:
2549 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2550 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2551         case NETLINK_GET_STRICT_CHK:
2552 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2553             break;
2554         default:
2555             goto unimplemented;
2556         }
2557         val = 0;
2558         if (optlen < sizeof(uint32_t)) {
2559             return -TARGET_EINVAL;
2560         }
2561         if (get_user_u32(val, optval_addr)) {
2562             return -TARGET_EFAULT;
2563         }
2564         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2565                                    sizeof(val)));
2566         break;
2567 #endif /* SOL_NETLINK */
2568     default:
2569     unimplemented:
2570         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2571                       level, optname);
2572         ret = -TARGET_ENOPROTOOPT;
2573     }
2574     return ret;
2575 }
2576 
2577 /* do_getsockopt() Must return target values and target errnos. */
2578 static abi_long do_getsockopt(int sockfd, int level, int optname,
2579                               abi_ulong optval_addr, abi_ulong optlen)
2580 {
2581     abi_long ret;
2582     int len, val;
2583     socklen_t lv;
2584 
2585     switch(level) {
2586     case TARGET_SOL_SOCKET:
2587         level = SOL_SOCKET;
2588         switch (optname) {
2589         /* These don't just return a single integer */
2590         case TARGET_SO_PEERNAME:
2591             goto unimplemented;
2592         case TARGET_SO_RCVTIMEO: {
2593             struct timeval tv;
2594             socklen_t tvlen;
2595 
2596             optname = SO_RCVTIMEO;
2597 
2598 get_timeout:
2599             if (get_user_u32(len, optlen)) {
2600                 return -TARGET_EFAULT;
2601             }
2602             if (len < 0) {
2603                 return -TARGET_EINVAL;
2604             }
2605 
2606             tvlen = sizeof(tv);
2607             ret = get_errno(getsockopt(sockfd, level, optname,
2608                                        &tv, &tvlen));
2609             if (ret < 0) {
2610                 return ret;
2611             }
2612             if (len > sizeof(struct target_timeval)) {
2613                 len = sizeof(struct target_timeval);
2614             }
2615             if (copy_to_user_timeval(optval_addr, &tv)) {
2616                 return -TARGET_EFAULT;
2617             }
2618             if (put_user_u32(len, optlen)) {
2619                 return -TARGET_EFAULT;
2620             }
2621             break;
2622         }
2623         case TARGET_SO_SNDTIMEO:
2624             optname = SO_SNDTIMEO;
2625             goto get_timeout;
2626         case TARGET_SO_PEERCRED: {
2627             struct ucred cr;
2628             socklen_t crlen;
2629             struct target_ucred *tcr;
2630 
2631             if (get_user_u32(len, optlen)) {
2632                 return -TARGET_EFAULT;
2633             }
2634             if (len < 0) {
2635                 return -TARGET_EINVAL;
2636             }
2637 
2638             crlen = sizeof(cr);
2639             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2640                                        &cr, &crlen));
2641             if (ret < 0) {
2642                 return ret;
2643             }
2644             if (len > crlen) {
2645                 len = crlen;
2646             }
2647             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2648                 return -TARGET_EFAULT;
2649             }
2650             __put_user(cr.pid, &tcr->pid);
2651             __put_user(cr.uid, &tcr->uid);
2652             __put_user(cr.gid, &tcr->gid);
2653             unlock_user_struct(tcr, optval_addr, 1);
2654             if (put_user_u32(len, optlen)) {
2655                 return -TARGET_EFAULT;
2656             }
2657             break;
2658         }
2659         case TARGET_SO_PEERSEC: {
2660             char *name;
2661 
2662             if (get_user_u32(len, optlen)) {
2663                 return -TARGET_EFAULT;
2664             }
2665             if (len < 0) {
2666                 return -TARGET_EINVAL;
2667             }
2668             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2669             if (!name) {
2670                 return -TARGET_EFAULT;
2671             }
2672             lv = len;
2673             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2674                                        name, &lv));
2675             if (put_user_u32(lv, optlen)) {
2676                 ret = -TARGET_EFAULT;
2677             }
2678             unlock_user(name, optval_addr, lv);
2679             break;
2680         }
2681         case TARGET_SO_LINGER:
2682         {
2683             struct linger lg;
2684             socklen_t lglen;
2685             struct target_linger *tlg;
2686 
2687             if (get_user_u32(len, optlen)) {
2688                 return -TARGET_EFAULT;
2689             }
2690             if (len < 0) {
2691                 return -TARGET_EINVAL;
2692             }
2693 
2694             lglen = sizeof(lg);
2695             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2696                                        &lg, &lglen));
2697             if (ret < 0) {
2698                 return ret;
2699             }
2700             if (len > lglen) {
2701                 len = lglen;
2702             }
2703             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2704                 return -TARGET_EFAULT;
2705             }
2706             __put_user(lg.l_onoff, &tlg->l_onoff);
2707             __put_user(lg.l_linger, &tlg->l_linger);
2708             unlock_user_struct(tlg, optval_addr, 1);
2709             if (put_user_u32(len, optlen)) {
2710                 return -TARGET_EFAULT;
2711             }
2712             break;
2713         }
2714         /* Options with 'int' argument.  */
2715         case TARGET_SO_DEBUG:
2716             optname = SO_DEBUG;
2717             goto int_case;
2718         case TARGET_SO_REUSEADDR:
2719             optname = SO_REUSEADDR;
2720             goto int_case;
2721 #ifdef SO_REUSEPORT
2722         case TARGET_SO_REUSEPORT:
2723             optname = SO_REUSEPORT;
2724             goto int_case;
2725 #endif
2726         case TARGET_SO_TYPE:
2727             optname = SO_TYPE;
2728             goto int_case;
2729         case TARGET_SO_ERROR:
2730             optname = SO_ERROR;
2731             goto int_case;
2732         case TARGET_SO_DONTROUTE:
2733             optname = SO_DONTROUTE;
2734             goto int_case;
2735         case TARGET_SO_BROADCAST:
2736             optname = SO_BROADCAST;
2737             goto int_case;
2738         case TARGET_SO_SNDBUF:
2739             optname = SO_SNDBUF;
2740             goto int_case;
2741         case TARGET_SO_RCVBUF:
2742             optname = SO_RCVBUF;
2743             goto int_case;
2744         case TARGET_SO_KEEPALIVE:
2745             optname = SO_KEEPALIVE;
2746             goto int_case;
2747         case TARGET_SO_OOBINLINE:
2748             optname = SO_OOBINLINE;
2749             goto int_case;
2750         case TARGET_SO_NO_CHECK:
2751             optname = SO_NO_CHECK;
2752             goto int_case;
2753         case TARGET_SO_PRIORITY:
2754             optname = SO_PRIORITY;
2755             goto int_case;
2756 #ifdef SO_BSDCOMPAT
2757         case TARGET_SO_BSDCOMPAT:
2758             optname = SO_BSDCOMPAT;
2759             goto int_case;
2760 #endif
2761         case TARGET_SO_PASSCRED:
2762             optname = SO_PASSCRED;
2763             goto int_case;
2764         case TARGET_SO_TIMESTAMP:
2765             optname = SO_TIMESTAMP;
2766             goto int_case;
2767         case TARGET_SO_RCVLOWAT:
2768             optname = SO_RCVLOWAT;
2769             goto int_case;
2770         case TARGET_SO_ACCEPTCONN:
2771             optname = SO_ACCEPTCONN;
2772             goto int_case;
2773         case TARGET_SO_PROTOCOL:
2774             optname = SO_PROTOCOL;
2775             goto int_case;
2776         case TARGET_SO_DOMAIN:
2777             optname = SO_DOMAIN;
2778             goto int_case;
2779         default:
2780             goto int_case;
2781         }
2782         break;
2783     case SOL_TCP:
2784     case SOL_UDP:
2785         /* TCP and UDP options all take an 'int' value.  */
2786     int_case:
2787         if (get_user_u32(len, optlen))
2788             return -TARGET_EFAULT;
2789         if (len < 0)
2790             return -TARGET_EINVAL;
2791         lv = sizeof(lv);
2792         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2793         if (ret < 0)
2794             return ret;
2795         if (optname == SO_TYPE) {
2796             val = host_to_target_sock_type(val);
2797         }
2798         if (len > lv)
2799             len = lv;
2800         if (len == 4) {
2801             if (put_user_u32(val, optval_addr))
2802                 return -TARGET_EFAULT;
2803         } else {
2804             if (put_user_u8(val, optval_addr))
2805                 return -TARGET_EFAULT;
2806         }
2807         if (put_user_u32(len, optlen))
2808             return -TARGET_EFAULT;
2809         break;
2810     case SOL_IP:
2811         switch(optname) {
2812         case IP_TOS:
2813         case IP_TTL:
2814         case IP_HDRINCL:
2815         case IP_ROUTER_ALERT:
2816         case IP_RECVOPTS:
2817         case IP_RETOPTS:
2818         case IP_PKTINFO:
2819         case IP_MTU_DISCOVER:
2820         case IP_RECVERR:
2821         case IP_RECVTOS:
2822 #ifdef IP_FREEBIND
2823         case IP_FREEBIND:
2824 #endif
2825         case IP_MULTICAST_TTL:
2826         case IP_MULTICAST_LOOP:
2827             if (get_user_u32(len, optlen))
2828                 return -TARGET_EFAULT;
2829             if (len < 0)
2830                 return -TARGET_EINVAL;
2831             lv = sizeof(lv);
2832             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2833             if (ret < 0)
2834                 return ret;
2835             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2836                 len = 1;
2837                 if (put_user_u32(len, optlen)
2838                     || put_user_u8(val, optval_addr))
2839                     return -TARGET_EFAULT;
2840             } else {
2841                 if (len > sizeof(int))
2842                     len = sizeof(int);
2843                 if (put_user_u32(len, optlen)
2844                     || put_user_u32(val, optval_addr))
2845                     return -TARGET_EFAULT;
2846             }
2847             break;
2848         default:
2849             ret = -TARGET_ENOPROTOOPT;
2850             break;
2851         }
2852         break;
2853     case SOL_IPV6:
2854         switch (optname) {
2855         case IPV6_MTU_DISCOVER:
2856         case IPV6_MTU:
2857         case IPV6_V6ONLY:
2858         case IPV6_RECVPKTINFO:
2859         case IPV6_UNICAST_HOPS:
2860         case IPV6_MULTICAST_HOPS:
2861         case IPV6_MULTICAST_LOOP:
2862         case IPV6_RECVERR:
2863         case IPV6_RECVHOPLIMIT:
2864         case IPV6_2292HOPLIMIT:
2865         case IPV6_CHECKSUM:
2866         case IPV6_ADDRFORM:
2867         case IPV6_2292PKTINFO:
2868         case IPV6_RECVTCLASS:
2869         case IPV6_RECVRTHDR:
2870         case IPV6_2292RTHDR:
2871         case IPV6_RECVHOPOPTS:
2872         case IPV6_2292HOPOPTS:
2873         case IPV6_RECVDSTOPTS:
2874         case IPV6_2292DSTOPTS:
2875         case IPV6_TCLASS:
2876         case IPV6_ADDR_PREFERENCES:
2877 #ifdef IPV6_RECVPATHMTU
2878         case IPV6_RECVPATHMTU:
2879 #endif
2880 #ifdef IPV6_TRANSPARENT
2881         case IPV6_TRANSPARENT:
2882 #endif
2883 #ifdef IPV6_FREEBIND
2884         case IPV6_FREEBIND:
2885 #endif
2886 #ifdef IPV6_RECVORIGDSTADDR
2887         case IPV6_RECVORIGDSTADDR:
2888 #endif
2889             if (get_user_u32(len, optlen))
2890                 return -TARGET_EFAULT;
2891             if (len < 0)
2892                 return -TARGET_EINVAL;
2893             lv = sizeof(lv);
2894             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2895             if (ret < 0)
2896                 return ret;
2897             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2898                 len = 1;
2899                 if (put_user_u32(len, optlen)
2900                     || put_user_u8(val, optval_addr))
2901                     return -TARGET_EFAULT;
2902             } else {
2903                 if (len > sizeof(int))
2904                     len = sizeof(int);
2905                 if (put_user_u32(len, optlen)
2906                     || put_user_u32(val, optval_addr))
2907                     return -TARGET_EFAULT;
2908             }
2909             break;
2910         default:
2911             ret = -TARGET_ENOPROTOOPT;
2912             break;
2913         }
2914         break;
2915 #ifdef SOL_NETLINK
2916     case SOL_NETLINK:
2917         switch (optname) {
2918         case NETLINK_PKTINFO:
2919         case NETLINK_BROADCAST_ERROR:
2920         case NETLINK_NO_ENOBUFS:
2921 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2922         case NETLINK_LISTEN_ALL_NSID:
2923         case NETLINK_CAP_ACK:
2924 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2925 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2926         case NETLINK_EXT_ACK:
2927 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2928 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2929         case NETLINK_GET_STRICT_CHK:
2930 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2931             if (get_user_u32(len, optlen)) {
2932                 return -TARGET_EFAULT;
2933             }
2934             if (len != sizeof(val)) {
2935                 return -TARGET_EINVAL;
2936             }
2937             lv = len;
2938             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2939             if (ret < 0) {
2940                 return ret;
2941             }
2942             if (put_user_u32(lv, optlen)
2943                 || put_user_u32(val, optval_addr)) {
2944                 return -TARGET_EFAULT;
2945             }
2946             break;
2947 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2948         case NETLINK_LIST_MEMBERSHIPS:
2949         {
2950             uint32_t *results;
2951             int i;
2952             if (get_user_u32(len, optlen)) {
2953                 return -TARGET_EFAULT;
2954             }
2955             if (len < 0) {
2956                 return -TARGET_EINVAL;
2957             }
2958             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2959             if (!results && len > 0) {
2960                 return -TARGET_EFAULT;
2961             }
2962             lv = len;
2963             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2964             if (ret < 0) {
2965                 unlock_user(results, optval_addr, 0);
2966                 return ret;
2967             }
2968             /* swap host endianess to target endianess. */
2969             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2970                 results[i] = tswap32(results[i]);
2971             }
2972             if (put_user_u32(lv, optlen)) {
2973                 return -TARGET_EFAULT;
2974             }
2975             unlock_user(results, optval_addr, 0);
2976             break;
2977         }
2978 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2979         default:
2980             goto unimplemented;
2981         }
2982         break;
2983 #endif /* SOL_NETLINK */
2984     default:
2985     unimplemented:
2986         qemu_log_mask(LOG_UNIMP,
2987                       "getsockopt level=%d optname=%d not yet supported\n",
2988                       level, optname);
2989         ret = -TARGET_EOPNOTSUPP;
2990         break;
2991     }
2992     return ret;
2993 }
2994 
2995 /* Convert target low/high pair representing file offset into the host
2996  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2997  * as the kernel doesn't handle them either.
2998  */
2999 static void target_to_host_low_high(abi_ulong tlow,
3000                                     abi_ulong thigh,
3001                                     unsigned long *hlow,
3002                                     unsigned long *hhigh)
3003 {
3004     uint64_t off = tlow |
3005         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3006         TARGET_LONG_BITS / 2;
3007 
3008     *hlow = off;
3009     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3010 }
3011 
3012 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3013                                 abi_ulong count, int copy)
3014 {
3015     struct target_iovec *target_vec;
3016     struct iovec *vec;
3017     abi_ulong total_len, max_len;
3018     int i;
3019     int err = 0;
3020     bool bad_address = false;
3021 
3022     if (count == 0) {
3023         errno = 0;
3024         return NULL;
3025     }
3026     if (count > IOV_MAX) {
3027         errno = EINVAL;
3028         return NULL;
3029     }
3030 
3031     vec = g_try_new0(struct iovec, count);
3032     if (vec == NULL) {
3033         errno = ENOMEM;
3034         return NULL;
3035     }
3036 
3037     target_vec = lock_user(VERIFY_READ, target_addr,
3038                            count * sizeof(struct target_iovec), 1);
3039     if (target_vec == NULL) {
3040         err = EFAULT;
3041         goto fail2;
3042     }
3043 
3044     /* ??? If host page size > target page size, this will result in a
3045        value larger than what we can actually support.  */
3046     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3047     total_len = 0;
3048 
3049     for (i = 0; i < count; i++) {
3050         abi_ulong base = tswapal(target_vec[i].iov_base);
3051         abi_long len = tswapal(target_vec[i].iov_len);
3052 
3053         if (len < 0) {
3054             err = EINVAL;
3055             goto fail;
3056         } else if (len == 0) {
3057             /* Zero length pointer is ignored.  */
3058             vec[i].iov_base = 0;
3059         } else {
3060             vec[i].iov_base = lock_user(type, base, len, copy);
3061             /* If the first buffer pointer is bad, this is a fault.  But
3062              * subsequent bad buffers will result in a partial write; this
3063              * is realized by filling the vector with null pointers and
3064              * zero lengths. */
3065             if (!vec[i].iov_base) {
3066                 if (i == 0) {
3067                     err = EFAULT;
3068                     goto fail;
3069                 } else {
3070                     bad_address = true;
3071                 }
3072             }
3073             if (bad_address) {
3074                 len = 0;
3075             }
3076             if (len > max_len - total_len) {
3077                 len = max_len - total_len;
3078             }
3079         }
3080         vec[i].iov_len = len;
3081         total_len += len;
3082     }
3083 
3084     unlock_user(target_vec, target_addr, 0);
3085     return vec;
3086 
3087  fail:
3088     while (--i >= 0) {
3089         if (tswapal(target_vec[i].iov_len) > 0) {
3090             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3091         }
3092     }
3093     unlock_user(target_vec, target_addr, 0);
3094  fail2:
3095     g_free(vec);
3096     errno = err;
3097     return NULL;
3098 }
3099 
3100 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3101                          abi_ulong count, int copy)
3102 {
3103     struct target_iovec *target_vec;
3104     int i;
3105 
3106     target_vec = lock_user(VERIFY_READ, target_addr,
3107                            count * sizeof(struct target_iovec), 1);
3108     if (target_vec) {
3109         for (i = 0; i < count; i++) {
3110             abi_ulong base = tswapal(target_vec[i].iov_base);
3111             abi_long len = tswapal(target_vec[i].iov_len);
3112             if (len < 0) {
3113                 break;
3114             }
3115             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3116         }
3117         unlock_user(target_vec, target_addr, 0);
3118     }
3119 
3120     g_free(vec);
3121 }
3122 
3123 static inline int target_to_host_sock_type(int *type)
3124 {
3125     int host_type = 0;
3126     int target_type = *type;
3127 
3128     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3129     case TARGET_SOCK_DGRAM:
3130         host_type = SOCK_DGRAM;
3131         break;
3132     case TARGET_SOCK_STREAM:
3133         host_type = SOCK_STREAM;
3134         break;
3135     default:
3136         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3137         break;
3138     }
3139     if (target_type & TARGET_SOCK_CLOEXEC) {
3140 #if defined(SOCK_CLOEXEC)
3141         host_type |= SOCK_CLOEXEC;
3142 #else
3143         return -TARGET_EINVAL;
3144 #endif
3145     }
3146     if (target_type & TARGET_SOCK_NONBLOCK) {
3147 #if defined(SOCK_NONBLOCK)
3148         host_type |= SOCK_NONBLOCK;
3149 #elif !defined(O_NONBLOCK)
3150         return -TARGET_EINVAL;
3151 #endif
3152     }
3153     *type = host_type;
3154     return 0;
3155 }
3156 
3157 /* Try to emulate socket type flags after socket creation.  */
3158 static int sock_flags_fixup(int fd, int target_type)
3159 {
3160 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3161     if (target_type & TARGET_SOCK_NONBLOCK) {
3162         int flags = fcntl(fd, F_GETFL);
3163         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3164             close(fd);
3165             return -TARGET_EINVAL;
3166         }
3167     }
3168 #endif
3169     return fd;
3170 }
3171 
3172 /* do_socket() Must return target values and target errnos. */
3173 static abi_long do_socket(int domain, int type, int protocol)
3174 {
3175     int target_type = type;
3176     int ret;
3177 
3178     ret = target_to_host_sock_type(&type);
3179     if (ret) {
3180         return ret;
3181     }
3182 
3183     if (domain == PF_NETLINK && !(
3184 #ifdef CONFIG_RTNETLINK
3185          protocol == NETLINK_ROUTE ||
3186 #endif
3187          protocol == NETLINK_KOBJECT_UEVENT ||
3188          protocol == NETLINK_AUDIT)) {
3189         return -TARGET_EPROTONOSUPPORT;
3190     }
3191 
3192     if (domain == AF_PACKET ||
3193         (domain == AF_INET && type == SOCK_PACKET)) {
3194         protocol = tswap16(protocol);
3195     }
3196 
3197     ret = get_errno(socket(domain, type, protocol));
3198     if (ret >= 0) {
3199         ret = sock_flags_fixup(ret, target_type);
3200         if (type == SOCK_PACKET) {
3201             /* Manage an obsolete case :
3202              * if socket type is SOCK_PACKET, bind by name
3203              */
3204             fd_trans_register(ret, &target_packet_trans);
3205         } else if (domain == PF_NETLINK) {
3206             switch (protocol) {
3207 #ifdef CONFIG_RTNETLINK
3208             case NETLINK_ROUTE:
3209                 fd_trans_register(ret, &target_netlink_route_trans);
3210                 break;
3211 #endif
3212             case NETLINK_KOBJECT_UEVENT:
3213                 /* nothing to do: messages are strings */
3214                 break;
3215             case NETLINK_AUDIT:
3216                 fd_trans_register(ret, &target_netlink_audit_trans);
3217                 break;
3218             default:
3219                 g_assert_not_reached();
3220             }
3221         }
3222     }
3223     return ret;
3224 }
3225 
3226 /* do_bind() Must return target values and target errnos. */
3227 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3228                         socklen_t addrlen)
3229 {
3230     void *addr;
3231     abi_long ret;
3232 
3233     if ((int)addrlen < 0) {
3234         return -TARGET_EINVAL;
3235     }
3236 
3237     addr = alloca(addrlen+1);
3238 
3239     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3240     if (ret)
3241         return ret;
3242 
3243     return get_errno(bind(sockfd, addr, addrlen));
3244 }
3245 
3246 /* do_connect() Must return target values and target errnos. */
3247 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3248                            socklen_t addrlen)
3249 {
3250     void *addr;
3251     abi_long ret;
3252 
3253     if ((int)addrlen < 0) {
3254         return -TARGET_EINVAL;
3255     }
3256 
3257     addr = alloca(addrlen+1);
3258 
3259     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3260     if (ret)
3261         return ret;
3262 
3263     return get_errno(safe_connect(sockfd, addr, addrlen));
3264 }
3265 
3266 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3267 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3268                                       int flags, int send)
3269 {
3270     abi_long ret, len;
3271     struct msghdr msg;
3272     abi_ulong count;
3273     struct iovec *vec;
3274     abi_ulong target_vec;
3275 
3276     if (msgp->msg_name) {
3277         msg.msg_namelen = tswap32(msgp->msg_namelen);
3278         msg.msg_name = alloca(msg.msg_namelen+1);
3279         ret = target_to_host_sockaddr(fd, msg.msg_name,
3280                                       tswapal(msgp->msg_name),
3281                                       msg.msg_namelen);
3282         if (ret == -TARGET_EFAULT) {
3283             /* For connected sockets msg_name and msg_namelen must
3284              * be ignored, so returning EFAULT immediately is wrong.
3285              * Instead, pass a bad msg_name to the host kernel, and
3286              * let it decide whether to return EFAULT or not.
3287              */
3288             msg.msg_name = (void *)-1;
3289         } else if (ret) {
3290             goto out2;
3291         }
3292     } else {
3293         msg.msg_name = NULL;
3294         msg.msg_namelen = 0;
3295     }
3296     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3297     msg.msg_control = alloca(msg.msg_controllen);
3298     memset(msg.msg_control, 0, msg.msg_controllen);
3299 
3300     msg.msg_flags = tswap32(msgp->msg_flags);
3301 
3302     count = tswapal(msgp->msg_iovlen);
3303     target_vec = tswapal(msgp->msg_iov);
3304 
3305     if (count > IOV_MAX) {
3306         /* sendrcvmsg returns a different errno for this condition than
3307          * readv/writev, so we must catch it here before lock_iovec() does.
3308          */
3309         ret = -TARGET_EMSGSIZE;
3310         goto out2;
3311     }
3312 
3313     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3314                      target_vec, count, send);
3315     if (vec == NULL) {
3316         ret = -host_to_target_errno(errno);
3317         goto out2;
3318     }
3319     msg.msg_iovlen = count;
3320     msg.msg_iov = vec;
3321 
3322     if (send) {
3323         if (fd_trans_target_to_host_data(fd)) {
3324             void *host_msg;
3325 
3326             host_msg = g_malloc(msg.msg_iov->iov_len);
3327             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3328             ret = fd_trans_target_to_host_data(fd)(host_msg,
3329                                                    msg.msg_iov->iov_len);
3330             if (ret >= 0) {
3331                 msg.msg_iov->iov_base = host_msg;
3332                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3333             }
3334             g_free(host_msg);
3335         } else {
3336             ret = target_to_host_cmsg(&msg, msgp);
3337             if (ret == 0) {
3338                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3339             }
3340         }
3341     } else {
3342         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3343         if (!is_error(ret)) {
3344             len = ret;
3345             if (fd_trans_host_to_target_data(fd)) {
3346                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3347                                                MIN(msg.msg_iov->iov_len, len));
3348             } else {
3349                 ret = host_to_target_cmsg(msgp, &msg);
3350             }
3351             if (!is_error(ret)) {
3352                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3353                 msgp->msg_flags = tswap32(msg.msg_flags);
3354                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3355                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3356                                     msg.msg_name, msg.msg_namelen);
3357                     if (ret) {
3358                         goto out;
3359                     }
3360                 }
3361 
3362                 ret = len;
3363             }
3364         }
3365     }
3366 
3367 out:
3368     unlock_iovec(vec, target_vec, count, !send);
3369 out2:
3370     return ret;
3371 }
3372 
3373 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3374                                int flags, int send)
3375 {
3376     abi_long ret;
3377     struct target_msghdr *msgp;
3378 
3379     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3380                           msgp,
3381                           target_msg,
3382                           send ? 1 : 0)) {
3383         return -TARGET_EFAULT;
3384     }
3385     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3386     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3387     return ret;
3388 }
3389 
3390 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3391  * so it might not have this *mmsg-specific flag either.
3392  */
3393 #ifndef MSG_WAITFORONE
3394 #define MSG_WAITFORONE 0x10000
3395 #endif
3396 
3397 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3398                                 unsigned int vlen, unsigned int flags,
3399                                 int send)
3400 {
3401     struct target_mmsghdr *mmsgp;
3402     abi_long ret = 0;
3403     int i;
3404 
3405     if (vlen > UIO_MAXIOV) {
3406         vlen = UIO_MAXIOV;
3407     }
3408 
3409     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3410     if (!mmsgp) {
3411         return -TARGET_EFAULT;
3412     }
3413 
3414     for (i = 0; i < vlen; i++) {
3415         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3416         if (is_error(ret)) {
3417             break;
3418         }
3419         mmsgp[i].msg_len = tswap32(ret);
3420         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3421         if (flags & MSG_WAITFORONE) {
3422             flags |= MSG_DONTWAIT;
3423         }
3424     }
3425 
3426     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3427 
3428     /* Return number of datagrams sent if we sent any at all;
3429      * otherwise return the error.
3430      */
3431     if (i) {
3432         return i;
3433     }
3434     return ret;
3435 }
3436 
3437 /* do_accept4() Must return target values and target errnos. */
3438 static abi_long do_accept4(int fd, abi_ulong target_addr,
3439                            abi_ulong target_addrlen_addr, int flags)
3440 {
3441     socklen_t addrlen, ret_addrlen;
3442     void *addr;
3443     abi_long ret;
3444     int host_flags;
3445 
3446     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3447 
3448     if (target_addr == 0) {
3449         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3450     }
3451 
3452     /* linux returns EFAULT if addrlen pointer is invalid */
3453     if (get_user_u32(addrlen, target_addrlen_addr))
3454         return -TARGET_EFAULT;
3455 
3456     if ((int)addrlen < 0) {
3457         return -TARGET_EINVAL;
3458     }
3459 
3460     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3461         return -TARGET_EFAULT;
3462     }
3463 
3464     addr = alloca(addrlen);
3465 
3466     ret_addrlen = addrlen;
3467     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3468     if (!is_error(ret)) {
3469         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3470         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3471             ret = -TARGET_EFAULT;
3472         }
3473     }
3474     return ret;
3475 }
3476 
3477 /* do_getpeername() Must return target values and target errnos. */
3478 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3479                                abi_ulong target_addrlen_addr)
3480 {
3481     socklen_t addrlen, ret_addrlen;
3482     void *addr;
3483     abi_long ret;
3484 
3485     if (get_user_u32(addrlen, target_addrlen_addr))
3486         return -TARGET_EFAULT;
3487 
3488     if ((int)addrlen < 0) {
3489         return -TARGET_EINVAL;
3490     }
3491 
3492     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3493         return -TARGET_EFAULT;
3494     }
3495 
3496     addr = alloca(addrlen);
3497 
3498     ret_addrlen = addrlen;
3499     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3500     if (!is_error(ret)) {
3501         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3502         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3503             ret = -TARGET_EFAULT;
3504         }
3505     }
3506     return ret;
3507 }
3508 
3509 /* do_getsockname() Must return target values and target errnos. */
3510 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3511                                abi_ulong target_addrlen_addr)
3512 {
3513     socklen_t addrlen, ret_addrlen;
3514     void *addr;
3515     abi_long ret;
3516 
3517     if (get_user_u32(addrlen, target_addrlen_addr))
3518         return -TARGET_EFAULT;
3519 
3520     if ((int)addrlen < 0) {
3521         return -TARGET_EINVAL;
3522     }
3523 
3524     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3525         return -TARGET_EFAULT;
3526     }
3527 
3528     addr = alloca(addrlen);
3529 
3530     ret_addrlen = addrlen;
3531     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3532     if (!is_error(ret)) {
3533         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3534         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3535             ret = -TARGET_EFAULT;
3536         }
3537     }
3538     return ret;
3539 }
3540 
3541 /* do_socketpair() Must return target values and target errnos. */
3542 static abi_long do_socketpair(int domain, int type, int protocol,
3543                               abi_ulong target_tab_addr)
3544 {
3545     int tab[2];
3546     abi_long ret;
3547 
3548     target_to_host_sock_type(&type);
3549 
3550     ret = get_errno(socketpair(domain, type, protocol, tab));
3551     if (!is_error(ret)) {
3552         if (put_user_s32(tab[0], target_tab_addr)
3553             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3554             ret = -TARGET_EFAULT;
3555     }
3556     return ret;
3557 }
3558 
3559 /* do_sendto() Must return target values and target errnos. */
3560 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3561                           abi_ulong target_addr, socklen_t addrlen)
3562 {
3563     void *addr;
3564     void *host_msg;
3565     void *copy_msg = NULL;
3566     abi_long ret;
3567 
3568     if ((int)addrlen < 0) {
3569         return -TARGET_EINVAL;
3570     }
3571 
3572     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3573     if (!host_msg)
3574         return -TARGET_EFAULT;
3575     if (fd_trans_target_to_host_data(fd)) {
3576         copy_msg = host_msg;
3577         host_msg = g_malloc(len);
3578         memcpy(host_msg, copy_msg, len);
3579         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3580         if (ret < 0) {
3581             goto fail;
3582         }
3583     }
3584     if (target_addr) {
3585         addr = alloca(addrlen+1);
3586         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3587         if (ret) {
3588             goto fail;
3589         }
3590         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3591     } else {
3592         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3593     }
3594 fail:
3595     if (copy_msg) {
3596         g_free(host_msg);
3597         host_msg = copy_msg;
3598     }
3599     unlock_user(host_msg, msg, 0);
3600     return ret;
3601 }
3602 
3603 /* do_recvfrom() Must return target values and target errnos. */
3604 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3605                             abi_ulong target_addr,
3606                             abi_ulong target_addrlen)
3607 {
3608     socklen_t addrlen, ret_addrlen;
3609     void *addr;
3610     void *host_msg;
3611     abi_long ret;
3612 
3613     if (!msg) {
3614         host_msg = NULL;
3615     } else {
3616         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3617         if (!host_msg) {
3618             return -TARGET_EFAULT;
3619         }
3620     }
3621     if (target_addr) {
3622         if (get_user_u32(addrlen, target_addrlen)) {
3623             ret = -TARGET_EFAULT;
3624             goto fail;
3625         }
3626         if ((int)addrlen < 0) {
3627             ret = -TARGET_EINVAL;
3628             goto fail;
3629         }
3630         addr = alloca(addrlen);
3631         ret_addrlen = addrlen;
3632         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3633                                       addr, &ret_addrlen));
3634     } else {
3635         addr = NULL; /* To keep compiler quiet.  */
3636         addrlen = 0; /* To keep compiler quiet.  */
3637         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3638     }
3639     if (!is_error(ret)) {
3640         if (fd_trans_host_to_target_data(fd)) {
3641             abi_long trans;
3642             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3643             if (is_error(trans)) {
3644                 ret = trans;
3645                 goto fail;
3646             }
3647         }
3648         if (target_addr) {
3649             host_to_target_sockaddr(target_addr, addr,
3650                                     MIN(addrlen, ret_addrlen));
3651             if (put_user_u32(ret_addrlen, target_addrlen)) {
3652                 ret = -TARGET_EFAULT;
3653                 goto fail;
3654             }
3655         }
3656         unlock_user(host_msg, msg, len);
3657     } else {
3658 fail:
3659         unlock_user(host_msg, msg, 0);
3660     }
3661     return ret;
3662 }
3663 
3664 #ifdef TARGET_NR_socketcall
3665 /* do_socketcall() must return target values and target errnos. */
3666 static abi_long do_socketcall(int num, abi_ulong vptr)
3667 {
3668     static const unsigned nargs[] = { /* number of arguments per operation */
3669         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3670         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3671         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3672         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3673         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3674         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3675         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3676         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3677         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3678         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3679         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3680         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3681         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3682         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3683         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3684         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3685         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3686         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3687         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3688         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3689     };
3690     abi_long a[6]; /* max 6 args */
3691     unsigned i;
3692 
3693     /* check the range of the first argument num */
3694     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3695     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3696         return -TARGET_EINVAL;
3697     }
3698     /* ensure we have space for args */
3699     if (nargs[num] > ARRAY_SIZE(a)) {
3700         return -TARGET_EINVAL;
3701     }
3702     /* collect the arguments in a[] according to nargs[] */
3703     for (i = 0; i < nargs[num]; ++i) {
3704         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3705             return -TARGET_EFAULT;
3706         }
3707     }
3708     /* now when we have the args, invoke the appropriate underlying function */
3709     switch (num) {
3710     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3711         return do_socket(a[0], a[1], a[2]);
3712     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3713         return do_bind(a[0], a[1], a[2]);
3714     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3715         return do_connect(a[0], a[1], a[2]);
3716     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3717         return get_errno(listen(a[0], a[1]));
3718     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3719         return do_accept4(a[0], a[1], a[2], 0);
3720     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3721         return do_getsockname(a[0], a[1], a[2]);
3722     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3723         return do_getpeername(a[0], a[1], a[2]);
3724     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3725         return do_socketpair(a[0], a[1], a[2], a[3]);
3726     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3727         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3728     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3729         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3730     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3731         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3732     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3733         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3734     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3735         return get_errno(shutdown(a[0], a[1]));
3736     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3737         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3738     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3739         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3740     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3741         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3742     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3743         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3744     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3745         return do_accept4(a[0], a[1], a[2], a[3]);
3746     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3747         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3748     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3749         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3750     default:
3751         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3752         return -TARGET_EINVAL;
3753     }
3754 }
3755 #endif
3756 
3757 #define N_SHM_REGIONS	32
3758 
3759 static struct shm_region {
3760     abi_ulong start;
3761     abi_ulong size;
3762     bool in_use;
3763 } shm_regions[N_SHM_REGIONS];
3764 
3765 #ifndef TARGET_SEMID64_DS
3766 /* asm-generic version of this struct */
3767 struct target_semid64_ds
3768 {
3769   struct target_ipc_perm sem_perm;
3770   abi_ulong sem_otime;
3771 #if TARGET_ABI_BITS == 32
3772   abi_ulong __unused1;
3773 #endif
3774   abi_ulong sem_ctime;
3775 #if TARGET_ABI_BITS == 32
3776   abi_ulong __unused2;
3777 #endif
3778   abi_ulong sem_nsems;
3779   abi_ulong __unused3;
3780   abi_ulong __unused4;
3781 };
3782 #endif
3783 
3784 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3785                                                abi_ulong target_addr)
3786 {
3787     struct target_ipc_perm *target_ip;
3788     struct target_semid64_ds *target_sd;
3789 
3790     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3791         return -TARGET_EFAULT;
3792     target_ip = &(target_sd->sem_perm);
3793     host_ip->__key = tswap32(target_ip->__key);
3794     host_ip->uid = tswap32(target_ip->uid);
3795     host_ip->gid = tswap32(target_ip->gid);
3796     host_ip->cuid = tswap32(target_ip->cuid);
3797     host_ip->cgid = tswap32(target_ip->cgid);
3798 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3799     host_ip->mode = tswap32(target_ip->mode);
3800 #else
3801     host_ip->mode = tswap16(target_ip->mode);
3802 #endif
3803 #if defined(TARGET_PPC)
3804     host_ip->__seq = tswap32(target_ip->__seq);
3805 #else
3806     host_ip->__seq = tswap16(target_ip->__seq);
3807 #endif
3808     unlock_user_struct(target_sd, target_addr, 0);
3809     return 0;
3810 }
3811 
3812 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3813                                                struct ipc_perm *host_ip)
3814 {
3815     struct target_ipc_perm *target_ip;
3816     struct target_semid64_ds *target_sd;
3817 
3818     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3819         return -TARGET_EFAULT;
3820     target_ip = &(target_sd->sem_perm);
3821     target_ip->__key = tswap32(host_ip->__key);
3822     target_ip->uid = tswap32(host_ip->uid);
3823     target_ip->gid = tswap32(host_ip->gid);
3824     target_ip->cuid = tswap32(host_ip->cuid);
3825     target_ip->cgid = tswap32(host_ip->cgid);
3826 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3827     target_ip->mode = tswap32(host_ip->mode);
3828 #else
3829     target_ip->mode = tswap16(host_ip->mode);
3830 #endif
3831 #if defined(TARGET_PPC)
3832     target_ip->__seq = tswap32(host_ip->__seq);
3833 #else
3834     target_ip->__seq = tswap16(host_ip->__seq);
3835 #endif
3836     unlock_user_struct(target_sd, target_addr, 1);
3837     return 0;
3838 }
3839 
3840 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3841                                                abi_ulong target_addr)
3842 {
3843     struct target_semid64_ds *target_sd;
3844 
3845     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3846         return -TARGET_EFAULT;
3847     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3848         return -TARGET_EFAULT;
3849     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3850     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3851     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3852     unlock_user_struct(target_sd, target_addr, 0);
3853     return 0;
3854 }
3855 
3856 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3857                                                struct semid_ds *host_sd)
3858 {
3859     struct target_semid64_ds *target_sd;
3860 
3861     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3862         return -TARGET_EFAULT;
3863     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3864         return -TARGET_EFAULT;
3865     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3866     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3867     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3868     unlock_user_struct(target_sd, target_addr, 1);
3869     return 0;
3870 }
3871 
3872 struct target_seminfo {
3873     int semmap;
3874     int semmni;
3875     int semmns;
3876     int semmnu;
3877     int semmsl;
3878     int semopm;
3879     int semume;
3880     int semusz;
3881     int semvmx;
3882     int semaem;
3883 };
3884 
3885 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3886                                               struct seminfo *host_seminfo)
3887 {
3888     struct target_seminfo *target_seminfo;
3889     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3890         return -TARGET_EFAULT;
3891     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3892     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3893     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3894     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3895     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3896     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3897     __put_user(host_seminfo->semume, &target_seminfo->semume);
3898     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3899     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3900     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3901     unlock_user_struct(target_seminfo, target_addr, 1);
3902     return 0;
3903 }
3904 
3905 union semun {
3906 	int val;
3907 	struct semid_ds *buf;
3908 	unsigned short *array;
3909 	struct seminfo *__buf;
3910 };
3911 
3912 union target_semun {
3913 	int val;
3914 	abi_ulong buf;
3915 	abi_ulong array;
3916 	abi_ulong __buf;
3917 };
3918 
3919 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3920                                                abi_ulong target_addr)
3921 {
3922     int nsems;
3923     unsigned short *array;
3924     union semun semun;
3925     struct semid_ds semid_ds;
3926     int i, ret;
3927 
3928     semun.buf = &semid_ds;
3929 
3930     ret = semctl(semid, 0, IPC_STAT, semun);
3931     if (ret == -1)
3932         return get_errno(ret);
3933 
3934     nsems = semid_ds.sem_nsems;
3935 
3936     *host_array = g_try_new(unsigned short, nsems);
3937     if (!*host_array) {
3938         return -TARGET_ENOMEM;
3939     }
3940     array = lock_user(VERIFY_READ, target_addr,
3941                       nsems*sizeof(unsigned short), 1);
3942     if (!array) {
3943         g_free(*host_array);
3944         return -TARGET_EFAULT;
3945     }
3946 
3947     for(i=0; i<nsems; i++) {
3948         __get_user((*host_array)[i], &array[i]);
3949     }
3950     unlock_user(array, target_addr, 0);
3951 
3952     return 0;
3953 }
3954 
3955 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3956                                                unsigned short **host_array)
3957 {
3958     int nsems;
3959     unsigned short *array;
3960     union semun semun;
3961     struct semid_ds semid_ds;
3962     int i, ret;
3963 
3964     semun.buf = &semid_ds;
3965 
3966     ret = semctl(semid, 0, IPC_STAT, semun);
3967     if (ret == -1)
3968         return get_errno(ret);
3969 
3970     nsems = semid_ds.sem_nsems;
3971 
3972     array = lock_user(VERIFY_WRITE, target_addr,
3973                       nsems*sizeof(unsigned short), 0);
3974     if (!array)
3975         return -TARGET_EFAULT;
3976 
3977     for(i=0; i<nsems; i++) {
3978         __put_user((*host_array)[i], &array[i]);
3979     }
3980     g_free(*host_array);
3981     unlock_user(array, target_addr, 1);
3982 
3983     return 0;
3984 }
3985 
3986 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3987                                  abi_ulong target_arg)
3988 {
3989     union target_semun target_su = { .buf = target_arg };
3990     union semun arg;
3991     struct semid_ds dsarg;
3992     unsigned short *array = NULL;
3993     struct seminfo seminfo;
3994     abi_long ret = -TARGET_EINVAL;
3995     abi_long err;
3996     cmd &= 0xff;
3997 
3998     switch( cmd ) {
3999 	case GETVAL:
4000 	case SETVAL:
4001             /* In 64 bit cross-endian situations, we will erroneously pick up
4002              * the wrong half of the union for the "val" element.  To rectify
4003              * this, the entire 8-byte structure is byteswapped, followed by
4004 	     * a swap of the 4 byte val field. In other cases, the data is
4005 	     * already in proper host byte order. */
4006 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4007 		target_su.buf = tswapal(target_su.buf);
4008 		arg.val = tswap32(target_su.val);
4009 	    } else {
4010 		arg.val = target_su.val;
4011 	    }
4012             ret = get_errno(semctl(semid, semnum, cmd, arg));
4013             break;
4014 	case GETALL:
4015 	case SETALL:
4016             err = target_to_host_semarray(semid, &array, target_su.array);
4017             if (err)
4018                 return err;
4019             arg.array = array;
4020             ret = get_errno(semctl(semid, semnum, cmd, arg));
4021             err = host_to_target_semarray(semid, target_su.array, &array);
4022             if (err)
4023                 return err;
4024             break;
4025 	case IPC_STAT:
4026 	case IPC_SET:
4027 	case SEM_STAT:
4028             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4029             if (err)
4030                 return err;
4031             arg.buf = &dsarg;
4032             ret = get_errno(semctl(semid, semnum, cmd, arg));
4033             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4034             if (err)
4035                 return err;
4036             break;
4037 	case IPC_INFO:
4038 	case SEM_INFO:
4039             arg.__buf = &seminfo;
4040             ret = get_errno(semctl(semid, semnum, cmd, arg));
4041             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4042             if (err)
4043                 return err;
4044             break;
4045 	case IPC_RMID:
4046 	case GETPID:
4047 	case GETNCNT:
4048 	case GETZCNT:
4049             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4050             break;
4051     }
4052 
4053     return ret;
4054 }
4055 
4056 struct target_sembuf {
4057     unsigned short sem_num;
4058     short sem_op;
4059     short sem_flg;
4060 };
4061 
4062 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4063                                              abi_ulong target_addr,
4064                                              unsigned nsops)
4065 {
4066     struct target_sembuf *target_sembuf;
4067     int i;
4068 
4069     target_sembuf = lock_user(VERIFY_READ, target_addr,
4070                               nsops*sizeof(struct target_sembuf), 1);
4071     if (!target_sembuf)
4072         return -TARGET_EFAULT;
4073 
4074     for(i=0; i<nsops; i++) {
4075         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4076         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4077         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4078     }
4079 
4080     unlock_user(target_sembuf, target_addr, 0);
4081 
4082     return 0;
4083 }
4084 
4085 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4086     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4087 
4088 /*
4089  * This macro is required to handle the s390 variants, which passes the
4090  * arguments in a different order than default.
4091  */
4092 #ifdef __s390x__
4093 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4094   (__nsops), (__timeout), (__sops)
4095 #else
4096 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4097   (__nsops), 0, (__sops), (__timeout)
4098 #endif
4099 
4100 static inline abi_long do_semtimedop(int semid,
4101                                      abi_long ptr,
4102                                      unsigned nsops,
4103                                      abi_long timeout, bool time64)
4104 {
4105     struct sembuf *sops;
4106     struct timespec ts, *pts = NULL;
4107     abi_long ret;
4108 
4109     if (timeout) {
4110         pts = &ts;
4111         if (time64) {
4112             if (target_to_host_timespec64(pts, timeout)) {
4113                 return -TARGET_EFAULT;
4114             }
4115         } else {
4116             if (target_to_host_timespec(pts, timeout)) {
4117                 return -TARGET_EFAULT;
4118             }
4119         }
4120     }
4121 
4122     if (nsops > TARGET_SEMOPM) {
4123         return -TARGET_E2BIG;
4124     }
4125 
4126     sops = g_new(struct sembuf, nsops);
4127 
4128     if (target_to_host_sembuf(sops, ptr, nsops)) {
4129         g_free(sops);
4130         return -TARGET_EFAULT;
4131     }
4132 
4133     ret = -TARGET_ENOSYS;
4134 #ifdef __NR_semtimedop
4135     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4136 #endif
4137 #ifdef __NR_ipc
4138     if (ret == -TARGET_ENOSYS) {
4139         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4140                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4141     }
4142 #endif
4143     g_free(sops);
4144     return ret;
4145 }
4146 #endif
4147 
4148 struct target_msqid_ds
4149 {
4150     struct target_ipc_perm msg_perm;
4151     abi_ulong msg_stime;
4152 #if TARGET_ABI_BITS == 32
4153     abi_ulong __unused1;
4154 #endif
4155     abi_ulong msg_rtime;
4156 #if TARGET_ABI_BITS == 32
4157     abi_ulong __unused2;
4158 #endif
4159     abi_ulong msg_ctime;
4160 #if TARGET_ABI_BITS == 32
4161     abi_ulong __unused3;
4162 #endif
4163     abi_ulong __msg_cbytes;
4164     abi_ulong msg_qnum;
4165     abi_ulong msg_qbytes;
4166     abi_ulong msg_lspid;
4167     abi_ulong msg_lrpid;
4168     abi_ulong __unused4;
4169     abi_ulong __unused5;
4170 };
4171 
4172 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4173                                                abi_ulong target_addr)
4174 {
4175     struct target_msqid_ds *target_md;
4176 
4177     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4178         return -TARGET_EFAULT;
4179     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4180         return -TARGET_EFAULT;
4181     host_md->msg_stime = tswapal(target_md->msg_stime);
4182     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4183     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4184     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4185     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4186     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4187     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4188     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4189     unlock_user_struct(target_md, target_addr, 0);
4190     return 0;
4191 }
4192 
4193 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4194                                                struct msqid_ds *host_md)
4195 {
4196     struct target_msqid_ds *target_md;
4197 
4198     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4199         return -TARGET_EFAULT;
4200     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4201         return -TARGET_EFAULT;
4202     target_md->msg_stime = tswapal(host_md->msg_stime);
4203     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4204     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4205     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4206     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4207     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4208     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4209     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4210     unlock_user_struct(target_md, target_addr, 1);
4211     return 0;
4212 }
4213 
4214 struct target_msginfo {
4215     int msgpool;
4216     int msgmap;
4217     int msgmax;
4218     int msgmnb;
4219     int msgmni;
4220     int msgssz;
4221     int msgtql;
4222     unsigned short int msgseg;
4223 };
4224 
4225 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4226                                               struct msginfo *host_msginfo)
4227 {
4228     struct target_msginfo *target_msginfo;
4229     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4230         return -TARGET_EFAULT;
4231     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4232     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4233     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4234     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4235     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4236     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4237     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4238     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4239     unlock_user_struct(target_msginfo, target_addr, 1);
4240     return 0;
4241 }
4242 
4243 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4244 {
4245     struct msqid_ds dsarg;
4246     struct msginfo msginfo;
4247     abi_long ret = -TARGET_EINVAL;
4248 
4249     cmd &= 0xff;
4250 
4251     switch (cmd) {
4252     case IPC_STAT:
4253     case IPC_SET:
4254     case MSG_STAT:
4255         if (target_to_host_msqid_ds(&dsarg,ptr))
4256             return -TARGET_EFAULT;
4257         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4258         if (host_to_target_msqid_ds(ptr,&dsarg))
4259             return -TARGET_EFAULT;
4260         break;
4261     case IPC_RMID:
4262         ret = get_errno(msgctl(msgid, cmd, NULL));
4263         break;
4264     case IPC_INFO:
4265     case MSG_INFO:
4266         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4267         if (host_to_target_msginfo(ptr, &msginfo))
4268             return -TARGET_EFAULT;
4269         break;
4270     }
4271 
4272     return ret;
4273 }
4274 
4275 struct target_msgbuf {
4276     abi_long mtype;
4277     char	mtext[1];
4278 };
4279 
4280 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4281                                  ssize_t msgsz, int msgflg)
4282 {
4283     struct target_msgbuf *target_mb;
4284     struct msgbuf *host_mb;
4285     abi_long ret = 0;
4286 
4287     if (msgsz < 0) {
4288         return -TARGET_EINVAL;
4289     }
4290 
4291     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4292         return -TARGET_EFAULT;
4293     host_mb = g_try_malloc(msgsz + sizeof(long));
4294     if (!host_mb) {
4295         unlock_user_struct(target_mb, msgp, 0);
4296         return -TARGET_ENOMEM;
4297     }
4298     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4299     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4300     ret = -TARGET_ENOSYS;
4301 #ifdef __NR_msgsnd
4302     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4303 #endif
4304 #ifdef __NR_ipc
4305     if (ret == -TARGET_ENOSYS) {
4306 #ifdef __s390x__
4307         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4308                                  host_mb));
4309 #else
4310         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4311                                  host_mb, 0));
4312 #endif
4313     }
4314 #endif
4315     g_free(host_mb);
4316     unlock_user_struct(target_mb, msgp, 0);
4317 
4318     return ret;
4319 }
4320 
4321 #ifdef __NR_ipc
4322 #if defined(__sparc__)
4323 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4324 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4325 #elif defined(__s390x__)
4326 /* The s390 sys_ipc variant has only five parameters.  */
4327 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4328     ((long int[]){(long int)__msgp, __msgtyp})
4329 #else
4330 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4331     ((long int[]){(long int)__msgp, __msgtyp}), 0
4332 #endif
4333 #endif
4334 
4335 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4336                                  ssize_t msgsz, abi_long msgtyp,
4337                                  int msgflg)
4338 {
4339     struct target_msgbuf *target_mb;
4340     char *target_mtext;
4341     struct msgbuf *host_mb;
4342     abi_long ret = 0;
4343 
4344     if (msgsz < 0) {
4345         return -TARGET_EINVAL;
4346     }
4347 
4348     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4349         return -TARGET_EFAULT;
4350 
4351     host_mb = g_try_malloc(msgsz + sizeof(long));
4352     if (!host_mb) {
4353         ret = -TARGET_ENOMEM;
4354         goto end;
4355     }
4356     ret = -TARGET_ENOSYS;
4357 #ifdef __NR_msgrcv
4358     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4359 #endif
4360 #ifdef __NR_ipc
4361     if (ret == -TARGET_ENOSYS) {
4362         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4363                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4364     }
4365 #endif
4366 
4367     if (ret > 0) {
4368         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4369         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4370         if (!target_mtext) {
4371             ret = -TARGET_EFAULT;
4372             goto end;
4373         }
4374         memcpy(target_mb->mtext, host_mb->mtext, ret);
4375         unlock_user(target_mtext, target_mtext_addr, ret);
4376     }
4377 
4378     target_mb->mtype = tswapal(host_mb->mtype);
4379 
4380 end:
4381     if (target_mb)
4382         unlock_user_struct(target_mb, msgp, 1);
4383     g_free(host_mb);
4384     return ret;
4385 }
4386 
4387 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4388                                                abi_ulong target_addr)
4389 {
4390     struct target_shmid_ds *target_sd;
4391 
4392     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4393         return -TARGET_EFAULT;
4394     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4395         return -TARGET_EFAULT;
4396     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4397     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4398     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4399     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4400     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4401     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4402     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4403     unlock_user_struct(target_sd, target_addr, 0);
4404     return 0;
4405 }
4406 
4407 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4408                                                struct shmid_ds *host_sd)
4409 {
4410     struct target_shmid_ds *target_sd;
4411 
4412     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4413         return -TARGET_EFAULT;
4414     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4415         return -TARGET_EFAULT;
4416     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4417     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4418     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4419     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4420     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4421     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4422     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4423     unlock_user_struct(target_sd, target_addr, 1);
4424     return 0;
4425 }
4426 
4427 struct  target_shminfo {
4428     abi_ulong shmmax;
4429     abi_ulong shmmin;
4430     abi_ulong shmmni;
4431     abi_ulong shmseg;
4432     abi_ulong shmall;
4433 };
4434 
4435 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4436                                               struct shminfo *host_shminfo)
4437 {
4438     struct target_shminfo *target_shminfo;
4439     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4440         return -TARGET_EFAULT;
4441     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4442     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4443     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4444     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4445     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4446     unlock_user_struct(target_shminfo, target_addr, 1);
4447     return 0;
4448 }
4449 
4450 struct target_shm_info {
4451     int used_ids;
4452     abi_ulong shm_tot;
4453     abi_ulong shm_rss;
4454     abi_ulong shm_swp;
4455     abi_ulong swap_attempts;
4456     abi_ulong swap_successes;
4457 };
4458 
4459 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4460                                                struct shm_info *host_shm_info)
4461 {
4462     struct target_shm_info *target_shm_info;
4463     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4464         return -TARGET_EFAULT;
4465     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4466     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4467     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4468     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4469     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4470     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4471     unlock_user_struct(target_shm_info, target_addr, 1);
4472     return 0;
4473 }
4474 
4475 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4476 {
4477     struct shmid_ds dsarg;
4478     struct shminfo shminfo;
4479     struct shm_info shm_info;
4480     abi_long ret = -TARGET_EINVAL;
4481 
4482     cmd &= 0xff;
4483 
4484     switch(cmd) {
4485     case IPC_STAT:
4486     case IPC_SET:
4487     case SHM_STAT:
4488         if (target_to_host_shmid_ds(&dsarg, buf))
4489             return -TARGET_EFAULT;
4490         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4491         if (host_to_target_shmid_ds(buf, &dsarg))
4492             return -TARGET_EFAULT;
4493         break;
4494     case IPC_INFO:
4495         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4496         if (host_to_target_shminfo(buf, &shminfo))
4497             return -TARGET_EFAULT;
4498         break;
4499     case SHM_INFO:
4500         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4501         if (host_to_target_shm_info(buf, &shm_info))
4502             return -TARGET_EFAULT;
4503         break;
4504     case IPC_RMID:
4505     case SHM_LOCK:
4506     case SHM_UNLOCK:
4507         ret = get_errno(shmctl(shmid, cmd, NULL));
4508         break;
4509     }
4510 
4511     return ret;
4512 }
4513 
4514 #ifndef TARGET_FORCE_SHMLBA
4515 /* For most architectures, SHMLBA is the same as the page size;
4516  * some architectures have larger values, in which case they should
4517  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4518  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4519  * and defining its own value for SHMLBA.
4520  *
4521  * The kernel also permits SHMLBA to be set by the architecture to a
4522  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4523  * this means that addresses are rounded to the large size if
4524  * SHM_RND is set but addresses not aligned to that size are not rejected
4525  * as long as they are at least page-aligned. Since the only architecture
4526  * which uses this is ia64 this code doesn't provide for that oddity.
4527  */
4528 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4529 {
4530     return TARGET_PAGE_SIZE;
4531 }
4532 #endif
4533 
4534 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4535                                  int shmid, abi_ulong shmaddr, int shmflg)
4536 {
4537     CPUState *cpu = env_cpu(cpu_env);
4538     abi_long raddr;
4539     void *host_raddr;
4540     struct shmid_ds shm_info;
4541     int i,ret;
4542     abi_ulong shmlba;
4543 
4544     /* shmat pointers are always untagged */
4545 
4546     /* find out the length of the shared memory segment */
4547     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4548     if (is_error(ret)) {
4549         /* can't get length, bail out */
4550         return ret;
4551     }
4552 
4553     shmlba = target_shmlba(cpu_env);
4554 
4555     if (shmaddr & (shmlba - 1)) {
4556         if (shmflg & SHM_RND) {
4557             shmaddr &= ~(shmlba - 1);
4558         } else {
4559             return -TARGET_EINVAL;
4560         }
4561     }
4562     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4563         return -TARGET_EINVAL;
4564     }
4565 
4566     mmap_lock();
4567 
4568     /*
4569      * We're mapping shared memory, so ensure we generate code for parallel
4570      * execution and flush old translations.  This will work up to the level
4571      * supported by the host -- anything that requires EXCP_ATOMIC will not
4572      * be atomic with respect to an external process.
4573      */
4574     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4575         cpu->tcg_cflags |= CF_PARALLEL;
4576         tb_flush(cpu);
4577     }
4578 
4579     if (shmaddr)
4580         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4581     else {
4582         abi_ulong mmap_start;
4583 
4584         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4585         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4586 
4587         if (mmap_start == -1) {
4588             errno = ENOMEM;
4589             host_raddr = (void *)-1;
4590         } else
4591             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4592                                shmflg | SHM_REMAP);
4593     }
4594 
4595     if (host_raddr == (void *)-1) {
4596         mmap_unlock();
4597         return get_errno((long)host_raddr);
4598     }
4599     raddr=h2g((unsigned long)host_raddr);
4600 
4601     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4602                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4603                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4604 
4605     for (i = 0; i < N_SHM_REGIONS; i++) {
4606         if (!shm_regions[i].in_use) {
4607             shm_regions[i].in_use = true;
4608             shm_regions[i].start = raddr;
4609             shm_regions[i].size = shm_info.shm_segsz;
4610             break;
4611         }
4612     }
4613 
4614     mmap_unlock();
4615     return raddr;
4616 
4617 }
4618 
4619 static inline abi_long do_shmdt(abi_ulong shmaddr)
4620 {
4621     int i;
4622     abi_long rv;
4623 
4624     /* shmdt pointers are always untagged */
4625 
4626     mmap_lock();
4627 
4628     for (i = 0; i < N_SHM_REGIONS; ++i) {
4629         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4630             shm_regions[i].in_use = false;
4631             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4632             break;
4633         }
4634     }
4635     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4636 
4637     mmap_unlock();
4638 
4639     return rv;
4640 }
4641 
4642 #ifdef TARGET_NR_ipc
4643 /* ??? This only works with linear mappings.  */
4644 /* do_ipc() must return target values and target errnos. */
4645 static abi_long do_ipc(CPUArchState *cpu_env,
4646                        unsigned int call, abi_long first,
4647                        abi_long second, abi_long third,
4648                        abi_long ptr, abi_long fifth)
4649 {
4650     int version;
4651     abi_long ret = 0;
4652 
4653     version = call >> 16;
4654     call &= 0xffff;
4655 
4656     switch (call) {
4657     case IPCOP_semop:
4658         ret = do_semtimedop(first, ptr, second, 0, false);
4659         break;
4660     case IPCOP_semtimedop:
4661     /*
4662      * The s390 sys_ipc variant has only five parameters instead of six
4663      * (as for default variant) and the only difference is the handling of
4664      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4665      * to a struct timespec where the generic variant uses fifth parameter.
4666      */
4667 #if defined(TARGET_S390X)
4668         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4669 #else
4670         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4671 #endif
4672         break;
4673 
4674     case IPCOP_semget:
4675         ret = get_errno(semget(first, second, third));
4676         break;
4677 
4678     case IPCOP_semctl: {
4679         /* The semun argument to semctl is passed by value, so dereference the
4680          * ptr argument. */
4681         abi_ulong atptr;
4682         get_user_ual(atptr, ptr);
4683         ret = do_semctl(first, second, third, atptr);
4684         break;
4685     }
4686 
4687     case IPCOP_msgget:
4688         ret = get_errno(msgget(first, second));
4689         break;
4690 
4691     case IPCOP_msgsnd:
4692         ret = do_msgsnd(first, ptr, second, third);
4693         break;
4694 
4695     case IPCOP_msgctl:
4696         ret = do_msgctl(first, second, ptr);
4697         break;
4698 
4699     case IPCOP_msgrcv:
4700         switch (version) {
4701         case 0:
4702             {
4703                 struct target_ipc_kludge {
4704                     abi_long msgp;
4705                     abi_long msgtyp;
4706                 } *tmp;
4707 
4708                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4709                     ret = -TARGET_EFAULT;
4710                     break;
4711                 }
4712 
4713                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4714 
4715                 unlock_user_struct(tmp, ptr, 0);
4716                 break;
4717             }
4718         default:
4719             ret = do_msgrcv(first, ptr, second, fifth, third);
4720         }
4721         break;
4722 
4723     case IPCOP_shmat:
4724         switch (version) {
4725         default:
4726         {
4727             abi_ulong raddr;
4728             raddr = do_shmat(cpu_env, first, ptr, second);
4729             if (is_error(raddr))
4730                 return get_errno(raddr);
4731             if (put_user_ual(raddr, third))
4732                 return -TARGET_EFAULT;
4733             break;
4734         }
4735         case 1:
4736             ret = -TARGET_EINVAL;
4737             break;
4738         }
4739 	break;
4740     case IPCOP_shmdt:
4741         ret = do_shmdt(ptr);
4742 	break;
4743 
4744     case IPCOP_shmget:
4745 	/* IPC_* flag values are the same on all linux platforms */
4746 	ret = get_errno(shmget(first, second, third));
4747 	break;
4748 
4749 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4750     case IPCOP_shmctl:
4751         ret = do_shmctl(first, second, ptr);
4752         break;
4753     default:
4754         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4755                       call, version);
4756 	ret = -TARGET_ENOSYS;
4757 	break;
4758     }
4759     return ret;
4760 }
4761 #endif
4762 
4763 /* kernel structure types definitions */
4764 
4765 #define STRUCT(name, ...) STRUCT_ ## name,
4766 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4767 enum {
4768 #include "syscall_types.h"
4769 STRUCT_MAX
4770 };
4771 #undef STRUCT
4772 #undef STRUCT_SPECIAL
4773 
4774 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4775 #define STRUCT_SPECIAL(name)
4776 #include "syscall_types.h"
4777 #undef STRUCT
4778 #undef STRUCT_SPECIAL
4779 
4780 #define MAX_STRUCT_SIZE 4096
4781 
4782 #ifdef CONFIG_FIEMAP
4783 /* So fiemap access checks don't overflow on 32 bit systems.
4784  * This is very slightly smaller than the limit imposed by
4785  * the underlying kernel.
4786  */
4787 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4788                             / sizeof(struct fiemap_extent))
4789 
4790 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4791                                        int fd, int cmd, abi_long arg)
4792 {
4793     /* The parameter for this ioctl is a struct fiemap followed
4794      * by an array of struct fiemap_extent whose size is set
4795      * in fiemap->fm_extent_count. The array is filled in by the
4796      * ioctl.
4797      */
4798     int target_size_in, target_size_out;
4799     struct fiemap *fm;
4800     const argtype *arg_type = ie->arg_type;
4801     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4802     void *argptr, *p;
4803     abi_long ret;
4804     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4805     uint32_t outbufsz;
4806     int free_fm = 0;
4807 
4808     assert(arg_type[0] == TYPE_PTR);
4809     assert(ie->access == IOC_RW);
4810     arg_type++;
4811     target_size_in = thunk_type_size(arg_type, 0);
4812     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4813     if (!argptr) {
4814         return -TARGET_EFAULT;
4815     }
4816     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4817     unlock_user(argptr, arg, 0);
4818     fm = (struct fiemap *)buf_temp;
4819     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4820         return -TARGET_EINVAL;
4821     }
4822 
4823     outbufsz = sizeof (*fm) +
4824         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4825 
4826     if (outbufsz > MAX_STRUCT_SIZE) {
4827         /* We can't fit all the extents into the fixed size buffer.
4828          * Allocate one that is large enough and use it instead.
4829          */
4830         fm = g_try_malloc(outbufsz);
4831         if (!fm) {
4832             return -TARGET_ENOMEM;
4833         }
4834         memcpy(fm, buf_temp, sizeof(struct fiemap));
4835         free_fm = 1;
4836     }
4837     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4838     if (!is_error(ret)) {
4839         target_size_out = target_size_in;
4840         /* An extent_count of 0 means we were only counting the extents
4841          * so there are no structs to copy
4842          */
4843         if (fm->fm_extent_count != 0) {
4844             target_size_out += fm->fm_mapped_extents * extent_size;
4845         }
4846         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4847         if (!argptr) {
4848             ret = -TARGET_EFAULT;
4849         } else {
4850             /* Convert the struct fiemap */
4851             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4852             if (fm->fm_extent_count != 0) {
4853                 p = argptr + target_size_in;
4854                 /* ...and then all the struct fiemap_extents */
4855                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4856                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4857                                   THUNK_TARGET);
4858                     p += extent_size;
4859                 }
4860             }
4861             unlock_user(argptr, arg, target_size_out);
4862         }
4863     }
4864     if (free_fm) {
4865         g_free(fm);
4866     }
4867     return ret;
4868 }
4869 #endif
4870 
4871 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4872                                 int fd, int cmd, abi_long arg)
4873 {
4874     const argtype *arg_type = ie->arg_type;
4875     int target_size;
4876     void *argptr;
4877     int ret;
4878     struct ifconf *host_ifconf;
4879     uint32_t outbufsz;
4880     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4881     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4882     int target_ifreq_size;
4883     int nb_ifreq;
4884     int free_buf = 0;
4885     int i;
4886     int target_ifc_len;
4887     abi_long target_ifc_buf;
4888     int host_ifc_len;
4889     char *host_ifc_buf;
4890 
4891     assert(arg_type[0] == TYPE_PTR);
4892     assert(ie->access == IOC_RW);
4893 
4894     arg_type++;
4895     target_size = thunk_type_size(arg_type, 0);
4896 
4897     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4898     if (!argptr)
4899         return -TARGET_EFAULT;
4900     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4901     unlock_user(argptr, arg, 0);
4902 
4903     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4904     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4905     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4906 
4907     if (target_ifc_buf != 0) {
4908         target_ifc_len = host_ifconf->ifc_len;
4909         nb_ifreq = target_ifc_len / target_ifreq_size;
4910         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4911 
4912         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4913         if (outbufsz > MAX_STRUCT_SIZE) {
4914             /*
4915              * We can't fit all the extents into the fixed size buffer.
4916              * Allocate one that is large enough and use it instead.
4917              */
4918             host_ifconf = g_try_malloc(outbufsz);
4919             if (!host_ifconf) {
4920                 return -TARGET_ENOMEM;
4921             }
4922             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4923             free_buf = 1;
4924         }
4925         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4926 
4927         host_ifconf->ifc_len = host_ifc_len;
4928     } else {
4929       host_ifc_buf = NULL;
4930     }
4931     host_ifconf->ifc_buf = host_ifc_buf;
4932 
4933     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4934     if (!is_error(ret)) {
4935 	/* convert host ifc_len to target ifc_len */
4936 
4937         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4938         target_ifc_len = nb_ifreq * target_ifreq_size;
4939         host_ifconf->ifc_len = target_ifc_len;
4940 
4941 	/* restore target ifc_buf */
4942 
4943         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4944 
4945 	/* copy struct ifconf to target user */
4946 
4947         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4948         if (!argptr)
4949             return -TARGET_EFAULT;
4950         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4951         unlock_user(argptr, arg, target_size);
4952 
4953         if (target_ifc_buf != 0) {
4954             /* copy ifreq[] to target user */
4955             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4956             for (i = 0; i < nb_ifreq ; i++) {
4957                 thunk_convert(argptr + i * target_ifreq_size,
4958                               host_ifc_buf + i * sizeof(struct ifreq),
4959                               ifreq_arg_type, THUNK_TARGET);
4960             }
4961             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4962         }
4963     }
4964 
4965     if (free_buf) {
4966         g_free(host_ifconf);
4967     }
4968 
4969     return ret;
4970 }
4971 
4972 #if defined(CONFIG_USBFS)
4973 #if HOST_LONG_BITS > 64
4974 #error USBDEVFS thunks do not support >64 bit hosts yet.
4975 #endif
4976 struct live_urb {
4977     uint64_t target_urb_adr;
4978     uint64_t target_buf_adr;
4979     char *target_buf_ptr;
4980     struct usbdevfs_urb host_urb;
4981 };
4982 
4983 static GHashTable *usbdevfs_urb_hashtable(void)
4984 {
4985     static GHashTable *urb_hashtable;
4986 
4987     if (!urb_hashtable) {
4988         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4989     }
4990     return urb_hashtable;
4991 }
4992 
4993 static void urb_hashtable_insert(struct live_urb *urb)
4994 {
4995     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4996     g_hash_table_insert(urb_hashtable, urb, urb);
4997 }
4998 
4999 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5000 {
5001     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5002     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5003 }
5004 
5005 static void urb_hashtable_remove(struct live_urb *urb)
5006 {
5007     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5008     g_hash_table_remove(urb_hashtable, urb);
5009 }
5010 
5011 static abi_long
5012 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5013                           int fd, int cmd, abi_long arg)
5014 {
5015     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5016     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5017     struct live_urb *lurb;
5018     void *argptr;
5019     uint64_t hurb;
5020     int target_size;
5021     uintptr_t target_urb_adr;
5022     abi_long ret;
5023 
5024     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5025 
5026     memset(buf_temp, 0, sizeof(uint64_t));
5027     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5028     if (is_error(ret)) {
5029         return ret;
5030     }
5031 
5032     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5033     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5034     if (!lurb->target_urb_adr) {
5035         return -TARGET_EFAULT;
5036     }
5037     urb_hashtable_remove(lurb);
5038     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5039         lurb->host_urb.buffer_length);
5040     lurb->target_buf_ptr = NULL;
5041 
5042     /* restore the guest buffer pointer */
5043     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5044 
5045     /* update the guest urb struct */
5046     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5047     if (!argptr) {
5048         g_free(lurb);
5049         return -TARGET_EFAULT;
5050     }
5051     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5052     unlock_user(argptr, lurb->target_urb_adr, target_size);
5053 
5054     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5055     /* write back the urb handle */
5056     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5057     if (!argptr) {
5058         g_free(lurb);
5059         return -TARGET_EFAULT;
5060     }
5061 
5062     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5063     target_urb_adr = lurb->target_urb_adr;
5064     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5065     unlock_user(argptr, arg, target_size);
5066 
5067     g_free(lurb);
5068     return ret;
5069 }
5070 
5071 static abi_long
5072 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5073                              uint8_t *buf_temp __attribute__((unused)),
5074                              int fd, int cmd, abi_long arg)
5075 {
5076     struct live_urb *lurb;
5077 
5078     /* map target address back to host URB with metadata. */
5079     lurb = urb_hashtable_lookup(arg);
5080     if (!lurb) {
5081         return -TARGET_EFAULT;
5082     }
5083     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5084 }
5085 
5086 static abi_long
5087 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5088                             int fd, int cmd, abi_long arg)
5089 {
5090     const argtype *arg_type = ie->arg_type;
5091     int target_size;
5092     abi_long ret;
5093     void *argptr;
5094     int rw_dir;
5095     struct live_urb *lurb;
5096 
5097     /*
5098      * each submitted URB needs to map to a unique ID for the
5099      * kernel, and that unique ID needs to be a pointer to
5100      * host memory.  hence, we need to malloc for each URB.
5101      * isochronous transfers have a variable length struct.
5102      */
5103     arg_type++;
5104     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5105 
5106     /* construct host copy of urb and metadata */
5107     lurb = g_try_new0(struct live_urb, 1);
5108     if (!lurb) {
5109         return -TARGET_ENOMEM;
5110     }
5111 
5112     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5113     if (!argptr) {
5114         g_free(lurb);
5115         return -TARGET_EFAULT;
5116     }
5117     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5118     unlock_user(argptr, arg, 0);
5119 
5120     lurb->target_urb_adr = arg;
5121     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5122 
5123     /* buffer space used depends on endpoint type so lock the entire buffer */
5124     /* control type urbs should check the buffer contents for true direction */
5125     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5126     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5127         lurb->host_urb.buffer_length, 1);
5128     if (lurb->target_buf_ptr == NULL) {
5129         g_free(lurb);
5130         return -TARGET_EFAULT;
5131     }
5132 
5133     /* update buffer pointer in host copy */
5134     lurb->host_urb.buffer = lurb->target_buf_ptr;
5135 
5136     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5137     if (is_error(ret)) {
5138         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5139         g_free(lurb);
5140     } else {
5141         urb_hashtable_insert(lurb);
5142     }
5143 
5144     return ret;
5145 }
5146 #endif /* CONFIG_USBFS */
5147 
5148 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5149                             int cmd, abi_long arg)
5150 {
5151     void *argptr;
5152     struct dm_ioctl *host_dm;
5153     abi_long guest_data;
5154     uint32_t guest_data_size;
5155     int target_size;
5156     const argtype *arg_type = ie->arg_type;
5157     abi_long ret;
5158     void *big_buf = NULL;
5159     char *host_data;
5160 
5161     arg_type++;
5162     target_size = thunk_type_size(arg_type, 0);
5163     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5164     if (!argptr) {
5165         ret = -TARGET_EFAULT;
5166         goto out;
5167     }
5168     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5169     unlock_user(argptr, arg, 0);
5170 
5171     /* buf_temp is too small, so fetch things into a bigger buffer */
5172     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5173     memcpy(big_buf, buf_temp, target_size);
5174     buf_temp = big_buf;
5175     host_dm = big_buf;
5176 
5177     guest_data = arg + host_dm->data_start;
5178     if ((guest_data - arg) < 0) {
5179         ret = -TARGET_EINVAL;
5180         goto out;
5181     }
5182     guest_data_size = host_dm->data_size - host_dm->data_start;
5183     host_data = (char*)host_dm + host_dm->data_start;
5184 
5185     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5186     if (!argptr) {
5187         ret = -TARGET_EFAULT;
5188         goto out;
5189     }
5190 
5191     switch (ie->host_cmd) {
5192     case DM_REMOVE_ALL:
5193     case DM_LIST_DEVICES:
5194     case DM_DEV_CREATE:
5195     case DM_DEV_REMOVE:
5196     case DM_DEV_SUSPEND:
5197     case DM_DEV_STATUS:
5198     case DM_DEV_WAIT:
5199     case DM_TABLE_STATUS:
5200     case DM_TABLE_CLEAR:
5201     case DM_TABLE_DEPS:
5202     case DM_LIST_VERSIONS:
5203         /* no input data */
5204         break;
5205     case DM_DEV_RENAME:
5206     case DM_DEV_SET_GEOMETRY:
5207         /* data contains only strings */
5208         memcpy(host_data, argptr, guest_data_size);
5209         break;
5210     case DM_TARGET_MSG:
5211         memcpy(host_data, argptr, guest_data_size);
5212         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5213         break;
5214     case DM_TABLE_LOAD:
5215     {
5216         void *gspec = argptr;
5217         void *cur_data = host_data;
5218         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5219         int spec_size = thunk_type_size(arg_type, 0);
5220         int i;
5221 
5222         for (i = 0; i < host_dm->target_count; i++) {
5223             struct dm_target_spec *spec = cur_data;
5224             uint32_t next;
5225             int slen;
5226 
5227             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5228             slen = strlen((char*)gspec + spec_size) + 1;
5229             next = spec->next;
5230             spec->next = sizeof(*spec) + slen;
5231             strcpy((char*)&spec[1], gspec + spec_size);
5232             gspec += next;
5233             cur_data += spec->next;
5234         }
5235         break;
5236     }
5237     default:
5238         ret = -TARGET_EINVAL;
5239         unlock_user(argptr, guest_data, 0);
5240         goto out;
5241     }
5242     unlock_user(argptr, guest_data, 0);
5243 
5244     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5245     if (!is_error(ret)) {
5246         guest_data = arg + host_dm->data_start;
5247         guest_data_size = host_dm->data_size - host_dm->data_start;
5248         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5249         switch (ie->host_cmd) {
5250         case DM_REMOVE_ALL:
5251         case DM_DEV_CREATE:
5252         case DM_DEV_REMOVE:
5253         case DM_DEV_RENAME:
5254         case DM_DEV_SUSPEND:
5255         case DM_DEV_STATUS:
5256         case DM_TABLE_LOAD:
5257         case DM_TABLE_CLEAR:
5258         case DM_TARGET_MSG:
5259         case DM_DEV_SET_GEOMETRY:
5260             /* no return data */
5261             break;
5262         case DM_LIST_DEVICES:
5263         {
5264             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5265             uint32_t remaining_data = guest_data_size;
5266             void *cur_data = argptr;
5267             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5268             int nl_size = 12; /* can't use thunk_size due to alignment */
5269 
5270             while (1) {
5271                 uint32_t next = nl->next;
5272                 if (next) {
5273                     nl->next = nl_size + (strlen(nl->name) + 1);
5274                 }
5275                 if (remaining_data < nl->next) {
5276                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5277                     break;
5278                 }
5279                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5280                 strcpy(cur_data + nl_size, nl->name);
5281                 cur_data += nl->next;
5282                 remaining_data -= nl->next;
5283                 if (!next) {
5284                     break;
5285                 }
5286                 nl = (void*)nl + next;
5287             }
5288             break;
5289         }
5290         case DM_DEV_WAIT:
5291         case DM_TABLE_STATUS:
5292         {
5293             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5294             void *cur_data = argptr;
5295             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5296             int spec_size = thunk_type_size(arg_type, 0);
5297             int i;
5298 
5299             for (i = 0; i < host_dm->target_count; i++) {
5300                 uint32_t next = spec->next;
5301                 int slen = strlen((char*)&spec[1]) + 1;
5302                 spec->next = (cur_data - argptr) + spec_size + slen;
5303                 if (guest_data_size < spec->next) {
5304                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5305                     break;
5306                 }
5307                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5308                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5309                 cur_data = argptr + spec->next;
5310                 spec = (void*)host_dm + host_dm->data_start + next;
5311             }
5312             break;
5313         }
5314         case DM_TABLE_DEPS:
5315         {
5316             void *hdata = (void*)host_dm + host_dm->data_start;
5317             int count = *(uint32_t*)hdata;
5318             uint64_t *hdev = hdata + 8;
5319             uint64_t *gdev = argptr + 8;
5320             int i;
5321 
5322             *(uint32_t*)argptr = tswap32(count);
5323             for (i = 0; i < count; i++) {
5324                 *gdev = tswap64(*hdev);
5325                 gdev++;
5326                 hdev++;
5327             }
5328             break;
5329         }
5330         case DM_LIST_VERSIONS:
5331         {
5332             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5333             uint32_t remaining_data = guest_data_size;
5334             void *cur_data = argptr;
5335             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5336             int vers_size = thunk_type_size(arg_type, 0);
5337 
5338             while (1) {
5339                 uint32_t next = vers->next;
5340                 if (next) {
5341                     vers->next = vers_size + (strlen(vers->name) + 1);
5342                 }
5343                 if (remaining_data < vers->next) {
5344                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5345                     break;
5346                 }
5347                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5348                 strcpy(cur_data + vers_size, vers->name);
5349                 cur_data += vers->next;
5350                 remaining_data -= vers->next;
5351                 if (!next) {
5352                     break;
5353                 }
5354                 vers = (void*)vers + next;
5355             }
5356             break;
5357         }
5358         default:
5359             unlock_user(argptr, guest_data, 0);
5360             ret = -TARGET_EINVAL;
5361             goto out;
5362         }
5363         unlock_user(argptr, guest_data, guest_data_size);
5364 
5365         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5366         if (!argptr) {
5367             ret = -TARGET_EFAULT;
5368             goto out;
5369         }
5370         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5371         unlock_user(argptr, arg, target_size);
5372     }
5373 out:
5374     g_free(big_buf);
5375     return ret;
5376 }
5377 
5378 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5379                                int cmd, abi_long arg)
5380 {
5381     void *argptr;
5382     int target_size;
5383     const argtype *arg_type = ie->arg_type;
5384     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5385     abi_long ret;
5386 
5387     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5388     struct blkpg_partition host_part;
5389 
5390     /* Read and convert blkpg */
5391     arg_type++;
5392     target_size = thunk_type_size(arg_type, 0);
5393     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5394     if (!argptr) {
5395         ret = -TARGET_EFAULT;
5396         goto out;
5397     }
5398     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5399     unlock_user(argptr, arg, 0);
5400 
5401     switch (host_blkpg->op) {
5402     case BLKPG_ADD_PARTITION:
5403     case BLKPG_DEL_PARTITION:
5404         /* payload is struct blkpg_partition */
5405         break;
5406     default:
5407         /* Unknown opcode */
5408         ret = -TARGET_EINVAL;
5409         goto out;
5410     }
5411 
5412     /* Read and convert blkpg->data */
5413     arg = (abi_long)(uintptr_t)host_blkpg->data;
5414     target_size = thunk_type_size(part_arg_type, 0);
5415     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5416     if (!argptr) {
5417         ret = -TARGET_EFAULT;
5418         goto out;
5419     }
5420     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5421     unlock_user(argptr, arg, 0);
5422 
5423     /* Swizzle the data pointer to our local copy and call! */
5424     host_blkpg->data = &host_part;
5425     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5426 
5427 out:
5428     return ret;
5429 }
5430 
5431 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5432                                 int fd, int cmd, abi_long arg)
5433 {
5434     const argtype *arg_type = ie->arg_type;
5435     const StructEntry *se;
5436     const argtype *field_types;
5437     const int *dst_offsets, *src_offsets;
5438     int target_size;
5439     void *argptr;
5440     abi_ulong *target_rt_dev_ptr = NULL;
5441     unsigned long *host_rt_dev_ptr = NULL;
5442     abi_long ret;
5443     int i;
5444 
5445     assert(ie->access == IOC_W);
5446     assert(*arg_type == TYPE_PTR);
5447     arg_type++;
5448     assert(*arg_type == TYPE_STRUCT);
5449     target_size = thunk_type_size(arg_type, 0);
5450     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5451     if (!argptr) {
5452         return -TARGET_EFAULT;
5453     }
5454     arg_type++;
5455     assert(*arg_type == (int)STRUCT_rtentry);
5456     se = struct_entries + *arg_type++;
5457     assert(se->convert[0] == NULL);
5458     /* convert struct here to be able to catch rt_dev string */
5459     field_types = se->field_types;
5460     dst_offsets = se->field_offsets[THUNK_HOST];
5461     src_offsets = se->field_offsets[THUNK_TARGET];
5462     for (i = 0; i < se->nb_fields; i++) {
5463         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5464             assert(*field_types == TYPE_PTRVOID);
5465             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5466             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5467             if (*target_rt_dev_ptr != 0) {
5468                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5469                                                   tswapal(*target_rt_dev_ptr));
5470                 if (!*host_rt_dev_ptr) {
5471                     unlock_user(argptr, arg, 0);
5472                     return -TARGET_EFAULT;
5473                 }
5474             } else {
5475                 *host_rt_dev_ptr = 0;
5476             }
5477             field_types++;
5478             continue;
5479         }
5480         field_types = thunk_convert(buf_temp + dst_offsets[i],
5481                                     argptr + src_offsets[i],
5482                                     field_types, THUNK_HOST);
5483     }
5484     unlock_user(argptr, arg, 0);
5485 
5486     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5487 
5488     assert(host_rt_dev_ptr != NULL);
5489     assert(target_rt_dev_ptr != NULL);
5490     if (*host_rt_dev_ptr != 0) {
5491         unlock_user((void *)*host_rt_dev_ptr,
5492                     *target_rt_dev_ptr, 0);
5493     }
5494     return ret;
5495 }
5496 
5497 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5498                                      int fd, int cmd, abi_long arg)
5499 {
5500     int sig = target_to_host_signal(arg);
5501     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5502 }
5503 
5504 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5505                                     int fd, int cmd, abi_long arg)
5506 {
5507     struct timeval tv;
5508     abi_long ret;
5509 
5510     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5511     if (is_error(ret)) {
5512         return ret;
5513     }
5514 
5515     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5516         if (copy_to_user_timeval(arg, &tv)) {
5517             return -TARGET_EFAULT;
5518         }
5519     } else {
5520         if (copy_to_user_timeval64(arg, &tv)) {
5521             return -TARGET_EFAULT;
5522         }
5523     }
5524 
5525     return ret;
5526 }
5527 
5528 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5529                                       int fd, int cmd, abi_long arg)
5530 {
5531     struct timespec ts;
5532     abi_long ret;
5533 
5534     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5535     if (is_error(ret)) {
5536         return ret;
5537     }
5538 
5539     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5540         if (host_to_target_timespec(arg, &ts)) {
5541             return -TARGET_EFAULT;
5542         }
5543     } else{
5544         if (host_to_target_timespec64(arg, &ts)) {
5545             return -TARGET_EFAULT;
5546         }
5547     }
5548 
5549     return ret;
5550 }
5551 
5552 #ifdef TIOCGPTPEER
5553 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5554                                      int fd, int cmd, abi_long arg)
5555 {
5556     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5557     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5558 }
5559 #endif
5560 
5561 #ifdef HAVE_DRM_H
5562 
5563 static void unlock_drm_version(struct drm_version *host_ver,
5564                                struct target_drm_version *target_ver,
5565                                bool copy)
5566 {
5567     unlock_user(host_ver->name, target_ver->name,
5568                                 copy ? host_ver->name_len : 0);
5569     unlock_user(host_ver->date, target_ver->date,
5570                                 copy ? host_ver->date_len : 0);
5571     unlock_user(host_ver->desc, target_ver->desc,
5572                                 copy ? host_ver->desc_len : 0);
5573 }
5574 
5575 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5576                                           struct target_drm_version *target_ver)
5577 {
5578     memset(host_ver, 0, sizeof(*host_ver));
5579 
5580     __get_user(host_ver->name_len, &target_ver->name_len);
5581     if (host_ver->name_len) {
5582         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5583                                    target_ver->name_len, 0);
5584         if (!host_ver->name) {
5585             return -EFAULT;
5586         }
5587     }
5588 
5589     __get_user(host_ver->date_len, &target_ver->date_len);
5590     if (host_ver->date_len) {
5591         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5592                                    target_ver->date_len, 0);
5593         if (!host_ver->date) {
5594             goto err;
5595         }
5596     }
5597 
5598     __get_user(host_ver->desc_len, &target_ver->desc_len);
5599     if (host_ver->desc_len) {
5600         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5601                                    target_ver->desc_len, 0);
5602         if (!host_ver->desc) {
5603             goto err;
5604         }
5605     }
5606 
5607     return 0;
5608 err:
5609     unlock_drm_version(host_ver, target_ver, false);
5610     return -EFAULT;
5611 }
5612 
5613 static inline void host_to_target_drmversion(
5614                                           struct target_drm_version *target_ver,
5615                                           struct drm_version *host_ver)
5616 {
5617     __put_user(host_ver->version_major, &target_ver->version_major);
5618     __put_user(host_ver->version_minor, &target_ver->version_minor);
5619     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5620     __put_user(host_ver->name_len, &target_ver->name_len);
5621     __put_user(host_ver->date_len, &target_ver->date_len);
5622     __put_user(host_ver->desc_len, &target_ver->desc_len);
5623     unlock_drm_version(host_ver, target_ver, true);
5624 }
5625 
5626 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5627                              int fd, int cmd, abi_long arg)
5628 {
5629     struct drm_version *ver;
5630     struct target_drm_version *target_ver;
5631     abi_long ret;
5632 
5633     switch (ie->host_cmd) {
5634     case DRM_IOCTL_VERSION:
5635         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5636             return -TARGET_EFAULT;
5637         }
5638         ver = (struct drm_version *)buf_temp;
5639         ret = target_to_host_drmversion(ver, target_ver);
5640         if (!is_error(ret)) {
5641             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5642             if (is_error(ret)) {
5643                 unlock_drm_version(ver, target_ver, false);
5644             } else {
5645                 host_to_target_drmversion(target_ver, ver);
5646             }
5647         }
5648         unlock_user_struct(target_ver, arg, 0);
5649         return ret;
5650     }
5651     return -TARGET_ENOSYS;
5652 }
5653 
5654 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5655                                            struct drm_i915_getparam *gparam,
5656                                            int fd, abi_long arg)
5657 {
5658     abi_long ret;
5659     int value;
5660     struct target_drm_i915_getparam *target_gparam;
5661 
5662     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5663         return -TARGET_EFAULT;
5664     }
5665 
5666     __get_user(gparam->param, &target_gparam->param);
5667     gparam->value = &value;
5668     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5669     put_user_s32(value, target_gparam->value);
5670 
5671     unlock_user_struct(target_gparam, arg, 0);
5672     return ret;
5673 }
5674 
5675 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5676                                   int fd, int cmd, abi_long arg)
5677 {
5678     switch (ie->host_cmd) {
5679     case DRM_IOCTL_I915_GETPARAM:
5680         return do_ioctl_drm_i915_getparam(ie,
5681                                           (struct drm_i915_getparam *)buf_temp,
5682                                           fd, arg);
5683     default:
5684         return -TARGET_ENOSYS;
5685     }
5686 }
5687 
5688 #endif
5689 
5690 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5691                                         int fd, int cmd, abi_long arg)
5692 {
5693     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5694     struct tun_filter *target_filter;
5695     char *target_addr;
5696 
5697     assert(ie->access == IOC_W);
5698 
5699     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5700     if (!target_filter) {
5701         return -TARGET_EFAULT;
5702     }
5703     filter->flags = tswap16(target_filter->flags);
5704     filter->count = tswap16(target_filter->count);
5705     unlock_user(target_filter, arg, 0);
5706 
5707     if (filter->count) {
5708         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5709             MAX_STRUCT_SIZE) {
5710             return -TARGET_EFAULT;
5711         }
5712 
5713         target_addr = lock_user(VERIFY_READ,
5714                                 arg + offsetof(struct tun_filter, addr),
5715                                 filter->count * ETH_ALEN, 1);
5716         if (!target_addr) {
5717             return -TARGET_EFAULT;
5718         }
5719         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5720         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5721     }
5722 
5723     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5724 }
5725 
5726 IOCTLEntry ioctl_entries[] = {
5727 #define IOCTL(cmd, access, ...) \
5728     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5729 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5730     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5731 #define IOCTL_IGNORE(cmd) \
5732     { TARGET_ ## cmd, 0, #cmd },
5733 #include "ioctls.h"
5734     { 0, 0, },
5735 };
5736 
5737 /* ??? Implement proper locking for ioctls.  */
5738 /* do_ioctl() Must return target values and target errnos. */
5739 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5740 {
5741     const IOCTLEntry *ie;
5742     const argtype *arg_type;
5743     abi_long ret;
5744     uint8_t buf_temp[MAX_STRUCT_SIZE];
5745     int target_size;
5746     void *argptr;
5747 
5748     ie = ioctl_entries;
5749     for(;;) {
5750         if (ie->target_cmd == 0) {
5751             qemu_log_mask(
5752                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5753             return -TARGET_ENOSYS;
5754         }
5755         if (ie->target_cmd == cmd)
5756             break;
5757         ie++;
5758     }
5759     arg_type = ie->arg_type;
5760     if (ie->do_ioctl) {
5761         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5762     } else if (!ie->host_cmd) {
5763         /* Some architectures define BSD ioctls in their headers
5764            that are not implemented in Linux.  */
5765         return -TARGET_ENOSYS;
5766     }
5767 
5768     switch(arg_type[0]) {
5769     case TYPE_NULL:
5770         /* no argument */
5771         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5772         break;
5773     case TYPE_PTRVOID:
5774     case TYPE_INT:
5775     case TYPE_LONG:
5776     case TYPE_ULONG:
5777         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5778         break;
5779     case TYPE_PTR:
5780         arg_type++;
5781         target_size = thunk_type_size(arg_type, 0);
5782         switch(ie->access) {
5783         case IOC_R:
5784             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5785             if (!is_error(ret)) {
5786                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5787                 if (!argptr)
5788                     return -TARGET_EFAULT;
5789                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5790                 unlock_user(argptr, arg, target_size);
5791             }
5792             break;
5793         case IOC_W:
5794             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5795             if (!argptr)
5796                 return -TARGET_EFAULT;
5797             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5798             unlock_user(argptr, arg, 0);
5799             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5800             break;
5801         default:
5802         case IOC_RW:
5803             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5804             if (!argptr)
5805                 return -TARGET_EFAULT;
5806             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5807             unlock_user(argptr, arg, 0);
5808             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5809             if (!is_error(ret)) {
5810                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5811                 if (!argptr)
5812                     return -TARGET_EFAULT;
5813                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5814                 unlock_user(argptr, arg, target_size);
5815             }
5816             break;
5817         }
5818         break;
5819     default:
5820         qemu_log_mask(LOG_UNIMP,
5821                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5822                       (long)cmd, arg_type[0]);
5823         ret = -TARGET_ENOSYS;
5824         break;
5825     }
5826     return ret;
5827 }
5828 
5829 static const bitmask_transtbl iflag_tbl[] = {
5830         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5831         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5832         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5833         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5834         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5835         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5836         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5837         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5838         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5839         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5840         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5841         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5842         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5843         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5844         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5845         { 0, 0, 0, 0 }
5846 };
5847 
5848 static const bitmask_transtbl oflag_tbl[] = {
5849 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5850 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5851 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5852 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5853 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5854 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5855 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5856 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5857 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5858 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5859 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5860 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5861 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5862 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5863 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5864 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5865 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5866 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5867 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5868 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5869 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5870 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5871 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5872 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5873 	{ 0, 0, 0, 0 }
5874 };
5875 
5876 static const bitmask_transtbl cflag_tbl[] = {
5877 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5878 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5879 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5880 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5881 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5882 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5883 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5884 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5885 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5886 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5887 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5888 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5889 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5890 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5891 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5892 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5893 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5894 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5895 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5896 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5897 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5898 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5899 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5900 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5901 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5902 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5903 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5904 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5905 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5906 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5907 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5908 	{ 0, 0, 0, 0 }
5909 };
5910 
5911 static const bitmask_transtbl lflag_tbl[] = {
5912   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5913   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5914   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5915   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5916   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5917   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5918   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5919   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5920   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5921   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5922   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5923   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5924   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5925   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5926   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5927   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5928   { 0, 0, 0, 0 }
5929 };
5930 
5931 static void target_to_host_termios (void *dst, const void *src)
5932 {
5933     struct host_termios *host = dst;
5934     const struct target_termios *target = src;
5935 
5936     host->c_iflag =
5937         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5938     host->c_oflag =
5939         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5940     host->c_cflag =
5941         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5942     host->c_lflag =
5943         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5944     host->c_line = target->c_line;
5945 
5946     memset(host->c_cc, 0, sizeof(host->c_cc));
5947     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5948     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5949     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5950     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5951     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5952     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5953     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5954     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5955     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5956     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5957     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5958     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5959     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5960     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5961     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5962     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5963     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5964 }
5965 
5966 static void host_to_target_termios (void *dst, const void *src)
5967 {
5968     struct target_termios *target = dst;
5969     const struct host_termios *host = src;
5970 
5971     target->c_iflag =
5972         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5973     target->c_oflag =
5974         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5975     target->c_cflag =
5976         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5977     target->c_lflag =
5978         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5979     target->c_line = host->c_line;
5980 
5981     memset(target->c_cc, 0, sizeof(target->c_cc));
5982     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5983     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5984     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5985     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5986     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5987     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5988     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5989     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5990     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5991     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5992     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5993     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5994     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5995     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5996     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5997     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5998     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5999 }
6000 
6001 static const StructEntry struct_termios_def = {
6002     .convert = { host_to_target_termios, target_to_host_termios },
6003     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6004     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6005     .print = print_termios,
6006 };
6007 
6008 static const bitmask_transtbl mmap_flags_tbl[] = {
6009     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6010     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6011     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6012     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6013       MAP_ANONYMOUS, MAP_ANONYMOUS },
6014     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6015       MAP_GROWSDOWN, MAP_GROWSDOWN },
6016     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6017       MAP_DENYWRITE, MAP_DENYWRITE },
6018     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6019       MAP_EXECUTABLE, MAP_EXECUTABLE },
6020     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6021     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6022       MAP_NORESERVE, MAP_NORESERVE },
6023     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6024     /* MAP_STACK had been ignored by the kernel for quite some time.
6025        Recognize it for the target insofar as we do not want to pass
6026        it through to the host.  */
6027     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6028     { 0, 0, 0, 0 }
6029 };
6030 
6031 /*
6032  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6033  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6034  */
6035 #if defined(TARGET_I386)
6036 
6037 /* NOTE: there is really one LDT for all the threads */
6038 static uint8_t *ldt_table;
6039 
6040 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6041 {
6042     int size;
6043     void *p;
6044 
6045     if (!ldt_table)
6046         return 0;
6047     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6048     if (size > bytecount)
6049         size = bytecount;
6050     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6051     if (!p)
6052         return -TARGET_EFAULT;
6053     /* ??? Should this by byteswapped?  */
6054     memcpy(p, ldt_table, size);
6055     unlock_user(p, ptr, size);
6056     return size;
6057 }
6058 
6059 /* XXX: add locking support */
6060 static abi_long write_ldt(CPUX86State *env,
6061                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6062 {
6063     struct target_modify_ldt_ldt_s ldt_info;
6064     struct target_modify_ldt_ldt_s *target_ldt_info;
6065     int seg_32bit, contents, read_exec_only, limit_in_pages;
6066     int seg_not_present, useable, lm;
6067     uint32_t *lp, entry_1, entry_2;
6068 
6069     if (bytecount != sizeof(ldt_info))
6070         return -TARGET_EINVAL;
6071     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6072         return -TARGET_EFAULT;
6073     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6074     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6075     ldt_info.limit = tswap32(target_ldt_info->limit);
6076     ldt_info.flags = tswap32(target_ldt_info->flags);
6077     unlock_user_struct(target_ldt_info, ptr, 0);
6078 
6079     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6080         return -TARGET_EINVAL;
6081     seg_32bit = ldt_info.flags & 1;
6082     contents = (ldt_info.flags >> 1) & 3;
6083     read_exec_only = (ldt_info.flags >> 3) & 1;
6084     limit_in_pages = (ldt_info.flags >> 4) & 1;
6085     seg_not_present = (ldt_info.flags >> 5) & 1;
6086     useable = (ldt_info.flags >> 6) & 1;
6087 #ifdef TARGET_ABI32
6088     lm = 0;
6089 #else
6090     lm = (ldt_info.flags >> 7) & 1;
6091 #endif
6092     if (contents == 3) {
6093         if (oldmode)
6094             return -TARGET_EINVAL;
6095         if (seg_not_present == 0)
6096             return -TARGET_EINVAL;
6097     }
6098     /* allocate the LDT */
6099     if (!ldt_table) {
6100         env->ldt.base = target_mmap(0,
6101                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6102                                     PROT_READ|PROT_WRITE,
6103                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6104         if (env->ldt.base == -1)
6105             return -TARGET_ENOMEM;
6106         memset(g2h_untagged(env->ldt.base), 0,
6107                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6108         env->ldt.limit = 0xffff;
6109         ldt_table = g2h_untagged(env->ldt.base);
6110     }
6111 
6112     /* NOTE: same code as Linux kernel */
6113     /* Allow LDTs to be cleared by the user. */
6114     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6115         if (oldmode ||
6116             (contents == 0		&&
6117              read_exec_only == 1	&&
6118              seg_32bit == 0		&&
6119              limit_in_pages == 0	&&
6120              seg_not_present == 1	&&
6121              useable == 0 )) {
6122             entry_1 = 0;
6123             entry_2 = 0;
6124             goto install;
6125         }
6126     }
6127 
6128     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6129         (ldt_info.limit & 0x0ffff);
6130     entry_2 = (ldt_info.base_addr & 0xff000000) |
6131         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6132         (ldt_info.limit & 0xf0000) |
6133         ((read_exec_only ^ 1) << 9) |
6134         (contents << 10) |
6135         ((seg_not_present ^ 1) << 15) |
6136         (seg_32bit << 22) |
6137         (limit_in_pages << 23) |
6138         (lm << 21) |
6139         0x7000;
6140     if (!oldmode)
6141         entry_2 |= (useable << 20);
6142 
6143     /* Install the new entry ...  */
6144 install:
6145     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6146     lp[0] = tswap32(entry_1);
6147     lp[1] = tswap32(entry_2);
6148     return 0;
6149 }
6150 
6151 /* specific and weird i386 syscalls */
6152 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6153                               unsigned long bytecount)
6154 {
6155     abi_long ret;
6156 
6157     switch (func) {
6158     case 0:
6159         ret = read_ldt(ptr, bytecount);
6160         break;
6161     case 1:
6162         ret = write_ldt(env, ptr, bytecount, 1);
6163         break;
6164     case 0x11:
6165         ret = write_ldt(env, ptr, bytecount, 0);
6166         break;
6167     default:
6168         ret = -TARGET_ENOSYS;
6169         break;
6170     }
6171     return ret;
6172 }
6173 
6174 #if defined(TARGET_ABI32)
6175 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6176 {
6177     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6178     struct target_modify_ldt_ldt_s ldt_info;
6179     struct target_modify_ldt_ldt_s *target_ldt_info;
6180     int seg_32bit, contents, read_exec_only, limit_in_pages;
6181     int seg_not_present, useable, lm;
6182     uint32_t *lp, entry_1, entry_2;
6183     int i;
6184 
6185     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6186     if (!target_ldt_info)
6187         return -TARGET_EFAULT;
6188     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6189     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6190     ldt_info.limit = tswap32(target_ldt_info->limit);
6191     ldt_info.flags = tswap32(target_ldt_info->flags);
6192     if (ldt_info.entry_number == -1) {
6193         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6194             if (gdt_table[i] == 0) {
6195                 ldt_info.entry_number = i;
6196                 target_ldt_info->entry_number = tswap32(i);
6197                 break;
6198             }
6199         }
6200     }
6201     unlock_user_struct(target_ldt_info, ptr, 1);
6202 
6203     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6204         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6205            return -TARGET_EINVAL;
6206     seg_32bit = ldt_info.flags & 1;
6207     contents = (ldt_info.flags >> 1) & 3;
6208     read_exec_only = (ldt_info.flags >> 3) & 1;
6209     limit_in_pages = (ldt_info.flags >> 4) & 1;
6210     seg_not_present = (ldt_info.flags >> 5) & 1;
6211     useable = (ldt_info.flags >> 6) & 1;
6212 #ifdef TARGET_ABI32
6213     lm = 0;
6214 #else
6215     lm = (ldt_info.flags >> 7) & 1;
6216 #endif
6217 
6218     if (contents == 3) {
6219         if (seg_not_present == 0)
6220             return -TARGET_EINVAL;
6221     }
6222 
6223     /* NOTE: same code as Linux kernel */
6224     /* Allow LDTs to be cleared by the user. */
6225     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6226         if ((contents == 0             &&
6227              read_exec_only == 1       &&
6228              seg_32bit == 0            &&
6229              limit_in_pages == 0       &&
6230              seg_not_present == 1      &&
6231              useable == 0 )) {
6232             entry_1 = 0;
6233             entry_2 = 0;
6234             goto install;
6235         }
6236     }
6237 
6238     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6239         (ldt_info.limit & 0x0ffff);
6240     entry_2 = (ldt_info.base_addr & 0xff000000) |
6241         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6242         (ldt_info.limit & 0xf0000) |
6243         ((read_exec_only ^ 1) << 9) |
6244         (contents << 10) |
6245         ((seg_not_present ^ 1) << 15) |
6246         (seg_32bit << 22) |
6247         (limit_in_pages << 23) |
6248         (useable << 20) |
6249         (lm << 21) |
6250         0x7000;
6251 
6252     /* Install the new entry ...  */
6253 install:
6254     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6255     lp[0] = tswap32(entry_1);
6256     lp[1] = tswap32(entry_2);
6257     return 0;
6258 }
6259 
6260 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6261 {
6262     struct target_modify_ldt_ldt_s *target_ldt_info;
6263     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6264     uint32_t base_addr, limit, flags;
6265     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6266     int seg_not_present, useable, lm;
6267     uint32_t *lp, entry_1, entry_2;
6268 
6269     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6270     if (!target_ldt_info)
6271         return -TARGET_EFAULT;
6272     idx = tswap32(target_ldt_info->entry_number);
6273     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6274         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6275         unlock_user_struct(target_ldt_info, ptr, 1);
6276         return -TARGET_EINVAL;
6277     }
6278     lp = (uint32_t *)(gdt_table + idx);
6279     entry_1 = tswap32(lp[0]);
6280     entry_2 = tswap32(lp[1]);
6281 
6282     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6283     contents = (entry_2 >> 10) & 3;
6284     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6285     seg_32bit = (entry_2 >> 22) & 1;
6286     limit_in_pages = (entry_2 >> 23) & 1;
6287     useable = (entry_2 >> 20) & 1;
6288 #ifdef TARGET_ABI32
6289     lm = 0;
6290 #else
6291     lm = (entry_2 >> 21) & 1;
6292 #endif
6293     flags = (seg_32bit << 0) | (contents << 1) |
6294         (read_exec_only << 3) | (limit_in_pages << 4) |
6295         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6296     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6297     base_addr = (entry_1 >> 16) |
6298         (entry_2 & 0xff000000) |
6299         ((entry_2 & 0xff) << 16);
6300     target_ldt_info->base_addr = tswapal(base_addr);
6301     target_ldt_info->limit = tswap32(limit);
6302     target_ldt_info->flags = tswap32(flags);
6303     unlock_user_struct(target_ldt_info, ptr, 1);
6304     return 0;
6305 }
6306 
6307 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6308 {
6309     return -TARGET_ENOSYS;
6310 }
6311 #else
6312 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6313 {
6314     abi_long ret = 0;
6315     abi_ulong val;
6316     int idx;
6317 
6318     switch(code) {
6319     case TARGET_ARCH_SET_GS:
6320     case TARGET_ARCH_SET_FS:
6321         if (code == TARGET_ARCH_SET_GS)
6322             idx = R_GS;
6323         else
6324             idx = R_FS;
6325         cpu_x86_load_seg(env, idx, 0);
6326         env->segs[idx].base = addr;
6327         break;
6328     case TARGET_ARCH_GET_GS:
6329     case TARGET_ARCH_GET_FS:
6330         if (code == TARGET_ARCH_GET_GS)
6331             idx = R_GS;
6332         else
6333             idx = R_FS;
6334         val = env->segs[idx].base;
6335         if (put_user(val, addr, abi_ulong))
6336             ret = -TARGET_EFAULT;
6337         break;
6338     default:
6339         ret = -TARGET_EINVAL;
6340         break;
6341     }
6342     return ret;
6343 }
6344 #endif /* defined(TARGET_ABI32 */
6345 #endif /* defined(TARGET_I386) */
6346 
6347 /*
6348  * These constants are generic.  Supply any that are missing from the host.
6349  */
6350 #ifndef PR_SET_NAME
6351 # define PR_SET_NAME    15
6352 # define PR_GET_NAME    16
6353 #endif
6354 #ifndef PR_SET_FP_MODE
6355 # define PR_SET_FP_MODE 45
6356 # define PR_GET_FP_MODE 46
6357 # define PR_FP_MODE_FR   (1 << 0)
6358 # define PR_FP_MODE_FRE  (1 << 1)
6359 #endif
6360 #ifndef PR_SVE_SET_VL
6361 # define PR_SVE_SET_VL  50
6362 # define PR_SVE_GET_VL  51
6363 # define PR_SVE_VL_LEN_MASK  0xffff
6364 # define PR_SVE_VL_INHERIT   (1 << 17)
6365 #endif
6366 #ifndef PR_PAC_RESET_KEYS
6367 # define PR_PAC_RESET_KEYS  54
6368 # define PR_PAC_APIAKEY   (1 << 0)
6369 # define PR_PAC_APIBKEY   (1 << 1)
6370 # define PR_PAC_APDAKEY   (1 << 2)
6371 # define PR_PAC_APDBKEY   (1 << 3)
6372 # define PR_PAC_APGAKEY   (1 << 4)
6373 #endif
6374 #ifndef PR_SET_TAGGED_ADDR_CTRL
6375 # define PR_SET_TAGGED_ADDR_CTRL 55
6376 # define PR_GET_TAGGED_ADDR_CTRL 56
6377 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6378 #endif
6379 #ifndef PR_MTE_TCF_SHIFT
6380 # define PR_MTE_TCF_SHIFT       1
6381 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6382 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6383 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6384 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6385 # define PR_MTE_TAG_SHIFT       3
6386 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6387 #endif
6388 #ifndef PR_SET_IO_FLUSHER
6389 # define PR_SET_IO_FLUSHER 57
6390 # define PR_GET_IO_FLUSHER 58
6391 #endif
6392 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6393 # define PR_SET_SYSCALL_USER_DISPATCH 59
6394 #endif
6395 #ifndef PR_SME_SET_VL
6396 # define PR_SME_SET_VL  63
6397 # define PR_SME_GET_VL  64
6398 # define PR_SME_VL_LEN_MASK  0xffff
6399 # define PR_SME_VL_INHERIT   (1 << 17)
6400 #endif
6401 
6402 #include "target_prctl.h"
6403 
6404 static abi_long do_prctl_inval0(CPUArchState *env)
6405 {
6406     return -TARGET_EINVAL;
6407 }
6408 
6409 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6410 {
6411     return -TARGET_EINVAL;
6412 }
6413 
6414 #ifndef do_prctl_get_fp_mode
6415 #define do_prctl_get_fp_mode do_prctl_inval0
6416 #endif
6417 #ifndef do_prctl_set_fp_mode
6418 #define do_prctl_set_fp_mode do_prctl_inval1
6419 #endif
6420 #ifndef do_prctl_sve_get_vl
6421 #define do_prctl_sve_get_vl do_prctl_inval0
6422 #endif
6423 #ifndef do_prctl_sve_set_vl
6424 #define do_prctl_sve_set_vl do_prctl_inval1
6425 #endif
6426 #ifndef do_prctl_reset_keys
6427 #define do_prctl_reset_keys do_prctl_inval1
6428 #endif
6429 #ifndef do_prctl_set_tagged_addr_ctrl
6430 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6431 #endif
6432 #ifndef do_prctl_get_tagged_addr_ctrl
6433 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6434 #endif
6435 #ifndef do_prctl_get_unalign
6436 #define do_prctl_get_unalign do_prctl_inval1
6437 #endif
6438 #ifndef do_prctl_set_unalign
6439 #define do_prctl_set_unalign do_prctl_inval1
6440 #endif
6441 #ifndef do_prctl_sme_get_vl
6442 #define do_prctl_sme_get_vl do_prctl_inval0
6443 #endif
6444 #ifndef do_prctl_sme_set_vl
6445 #define do_prctl_sme_set_vl do_prctl_inval1
6446 #endif
6447 
6448 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6449                          abi_long arg3, abi_long arg4, abi_long arg5)
6450 {
6451     abi_long ret;
6452 
6453     switch (option) {
6454     case PR_GET_PDEATHSIG:
6455         {
6456             int deathsig;
6457             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6458                                   arg3, arg4, arg5));
6459             if (!is_error(ret) &&
6460                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6461                 return -TARGET_EFAULT;
6462             }
6463             return ret;
6464         }
6465     case PR_SET_PDEATHSIG:
6466         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6467                                arg3, arg4, arg5));
6468     case PR_GET_NAME:
6469         {
6470             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6471             if (!name) {
6472                 return -TARGET_EFAULT;
6473             }
6474             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6475                                   arg3, arg4, arg5));
6476             unlock_user(name, arg2, 16);
6477             return ret;
6478         }
6479     case PR_SET_NAME:
6480         {
6481             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6482             if (!name) {
6483                 return -TARGET_EFAULT;
6484             }
6485             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6486                                   arg3, arg4, arg5));
6487             unlock_user(name, arg2, 0);
6488             return ret;
6489         }
6490     case PR_GET_FP_MODE:
6491         return do_prctl_get_fp_mode(env);
6492     case PR_SET_FP_MODE:
6493         return do_prctl_set_fp_mode(env, arg2);
6494     case PR_SVE_GET_VL:
6495         return do_prctl_sve_get_vl(env);
6496     case PR_SVE_SET_VL:
6497         return do_prctl_sve_set_vl(env, arg2);
6498     case PR_SME_GET_VL:
6499         return do_prctl_sme_get_vl(env);
6500     case PR_SME_SET_VL:
6501         return do_prctl_sme_set_vl(env, arg2);
6502     case PR_PAC_RESET_KEYS:
6503         if (arg3 || arg4 || arg5) {
6504             return -TARGET_EINVAL;
6505         }
6506         return do_prctl_reset_keys(env, arg2);
6507     case PR_SET_TAGGED_ADDR_CTRL:
6508         if (arg3 || arg4 || arg5) {
6509             return -TARGET_EINVAL;
6510         }
6511         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6512     case PR_GET_TAGGED_ADDR_CTRL:
6513         if (arg2 || arg3 || arg4 || arg5) {
6514             return -TARGET_EINVAL;
6515         }
6516         return do_prctl_get_tagged_addr_ctrl(env);
6517 
6518     case PR_GET_UNALIGN:
6519         return do_prctl_get_unalign(env, arg2);
6520     case PR_SET_UNALIGN:
6521         return do_prctl_set_unalign(env, arg2);
6522 
6523     case PR_CAP_AMBIENT:
6524     case PR_CAPBSET_READ:
6525     case PR_CAPBSET_DROP:
6526     case PR_GET_DUMPABLE:
6527     case PR_SET_DUMPABLE:
6528     case PR_GET_KEEPCAPS:
6529     case PR_SET_KEEPCAPS:
6530     case PR_GET_SECUREBITS:
6531     case PR_SET_SECUREBITS:
6532     case PR_GET_TIMING:
6533     case PR_SET_TIMING:
6534     case PR_GET_TIMERSLACK:
6535     case PR_SET_TIMERSLACK:
6536     case PR_MCE_KILL:
6537     case PR_MCE_KILL_GET:
6538     case PR_GET_NO_NEW_PRIVS:
6539     case PR_SET_NO_NEW_PRIVS:
6540     case PR_GET_IO_FLUSHER:
6541     case PR_SET_IO_FLUSHER:
6542         /* Some prctl options have no pointer arguments and we can pass on. */
6543         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6544 
6545     case PR_GET_CHILD_SUBREAPER:
6546     case PR_SET_CHILD_SUBREAPER:
6547     case PR_GET_SPECULATION_CTRL:
6548     case PR_SET_SPECULATION_CTRL:
6549     case PR_GET_TID_ADDRESS:
6550         /* TODO */
6551         return -TARGET_EINVAL;
6552 
6553     case PR_GET_FPEXC:
6554     case PR_SET_FPEXC:
6555         /* Was used for SPE on PowerPC. */
6556         return -TARGET_EINVAL;
6557 
6558     case PR_GET_ENDIAN:
6559     case PR_SET_ENDIAN:
6560     case PR_GET_FPEMU:
6561     case PR_SET_FPEMU:
6562     case PR_SET_MM:
6563     case PR_GET_SECCOMP:
6564     case PR_SET_SECCOMP:
6565     case PR_SET_SYSCALL_USER_DISPATCH:
6566     case PR_GET_THP_DISABLE:
6567     case PR_SET_THP_DISABLE:
6568     case PR_GET_TSC:
6569     case PR_SET_TSC:
6570         /* Disable to prevent the target disabling stuff we need. */
6571         return -TARGET_EINVAL;
6572 
6573     default:
6574         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6575                       option);
6576         return -TARGET_EINVAL;
6577     }
6578 }
6579 
6580 #define NEW_STACK_SIZE 0x40000
6581 
6582 
6583 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6584 typedef struct {
6585     CPUArchState *env;
6586     pthread_mutex_t mutex;
6587     pthread_cond_t cond;
6588     pthread_t thread;
6589     uint32_t tid;
6590     abi_ulong child_tidptr;
6591     abi_ulong parent_tidptr;
6592     sigset_t sigmask;
6593 } new_thread_info;
6594 
6595 static void *clone_func(void *arg)
6596 {
6597     new_thread_info *info = arg;
6598     CPUArchState *env;
6599     CPUState *cpu;
6600     TaskState *ts;
6601 
6602     rcu_register_thread();
6603     tcg_register_thread();
6604     env = info->env;
6605     cpu = env_cpu(env);
6606     thread_cpu = cpu;
6607     ts = (TaskState *)cpu->opaque;
6608     info->tid = sys_gettid();
6609     task_settid(ts);
6610     if (info->child_tidptr)
6611         put_user_u32(info->tid, info->child_tidptr);
6612     if (info->parent_tidptr)
6613         put_user_u32(info->tid, info->parent_tidptr);
6614     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6615     /* Enable signals.  */
6616     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6617     /* Signal to the parent that we're ready.  */
6618     pthread_mutex_lock(&info->mutex);
6619     pthread_cond_broadcast(&info->cond);
6620     pthread_mutex_unlock(&info->mutex);
6621     /* Wait until the parent has finished initializing the tls state.  */
6622     pthread_mutex_lock(&clone_lock);
6623     pthread_mutex_unlock(&clone_lock);
6624     cpu_loop(env);
6625     /* never exits */
6626     return NULL;
6627 }
6628 
6629 /* do_fork() Must return host values and target errnos (unlike most
6630    do_*() functions). */
6631 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6632                    abi_ulong parent_tidptr, target_ulong newtls,
6633                    abi_ulong child_tidptr)
6634 {
6635     CPUState *cpu = env_cpu(env);
6636     int ret;
6637     TaskState *ts;
6638     CPUState *new_cpu;
6639     CPUArchState *new_env;
6640     sigset_t sigmask;
6641 
6642     flags &= ~CLONE_IGNORED_FLAGS;
6643 
6644     /* Emulate vfork() with fork() */
6645     if (flags & CLONE_VFORK)
6646         flags &= ~(CLONE_VFORK | CLONE_VM);
6647 
6648     if (flags & CLONE_VM) {
6649         TaskState *parent_ts = (TaskState *)cpu->opaque;
6650         new_thread_info info;
6651         pthread_attr_t attr;
6652 
6653         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6654             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6655             return -TARGET_EINVAL;
6656         }
6657 
6658         ts = g_new0(TaskState, 1);
6659         init_task_state(ts);
6660 
6661         /* Grab a mutex so that thread setup appears atomic.  */
6662         pthread_mutex_lock(&clone_lock);
6663 
6664         /*
6665          * If this is our first additional thread, we need to ensure we
6666          * generate code for parallel execution and flush old translations.
6667          * Do this now so that the copy gets CF_PARALLEL too.
6668          */
6669         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6670             cpu->tcg_cflags |= CF_PARALLEL;
6671             tb_flush(cpu);
6672         }
6673 
6674         /* we create a new CPU instance. */
6675         new_env = cpu_copy(env);
6676         /* Init regs that differ from the parent.  */
6677         cpu_clone_regs_child(new_env, newsp, flags);
6678         cpu_clone_regs_parent(env, flags);
6679         new_cpu = env_cpu(new_env);
6680         new_cpu->opaque = ts;
6681         ts->bprm = parent_ts->bprm;
6682         ts->info = parent_ts->info;
6683         ts->signal_mask = parent_ts->signal_mask;
6684 
6685         if (flags & CLONE_CHILD_CLEARTID) {
6686             ts->child_tidptr = child_tidptr;
6687         }
6688 
6689         if (flags & CLONE_SETTLS) {
6690             cpu_set_tls (new_env, newtls);
6691         }
6692 
6693         memset(&info, 0, sizeof(info));
6694         pthread_mutex_init(&info.mutex, NULL);
6695         pthread_mutex_lock(&info.mutex);
6696         pthread_cond_init(&info.cond, NULL);
6697         info.env = new_env;
6698         if (flags & CLONE_CHILD_SETTID) {
6699             info.child_tidptr = child_tidptr;
6700         }
6701         if (flags & CLONE_PARENT_SETTID) {
6702             info.parent_tidptr = parent_tidptr;
6703         }
6704 
6705         ret = pthread_attr_init(&attr);
6706         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6707         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6708         /* It is not safe to deliver signals until the child has finished
6709            initializing, so temporarily block all signals.  */
6710         sigfillset(&sigmask);
6711         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6712         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6713 
6714         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6715         /* TODO: Free new CPU state if thread creation failed.  */
6716 
6717         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6718         pthread_attr_destroy(&attr);
6719         if (ret == 0) {
6720             /* Wait for the child to initialize.  */
6721             pthread_cond_wait(&info.cond, &info.mutex);
6722             ret = info.tid;
6723         } else {
6724             ret = -1;
6725         }
6726         pthread_mutex_unlock(&info.mutex);
6727         pthread_cond_destroy(&info.cond);
6728         pthread_mutex_destroy(&info.mutex);
6729         pthread_mutex_unlock(&clone_lock);
6730     } else {
6731         /* if no CLONE_VM, we consider it is a fork */
6732         if (flags & CLONE_INVALID_FORK_FLAGS) {
6733             return -TARGET_EINVAL;
6734         }
6735 
6736         /* We can't support custom termination signals */
6737         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6738             return -TARGET_EINVAL;
6739         }
6740 
6741         if (block_signals()) {
6742             return -QEMU_ERESTARTSYS;
6743         }
6744 
6745         fork_start();
6746         ret = fork();
6747         if (ret == 0) {
6748             /* Child Process.  */
6749             cpu_clone_regs_child(env, newsp, flags);
6750             fork_end(1);
6751             /* There is a race condition here.  The parent process could
6752                theoretically read the TID in the child process before the child
6753                tid is set.  This would require using either ptrace
6754                (not implemented) or having *_tidptr to point at a shared memory
6755                mapping.  We can't repeat the spinlock hack used above because
6756                the child process gets its own copy of the lock.  */
6757             if (flags & CLONE_CHILD_SETTID)
6758                 put_user_u32(sys_gettid(), child_tidptr);
6759             if (flags & CLONE_PARENT_SETTID)
6760                 put_user_u32(sys_gettid(), parent_tidptr);
6761             ts = (TaskState *)cpu->opaque;
6762             if (flags & CLONE_SETTLS)
6763                 cpu_set_tls (env, newtls);
6764             if (flags & CLONE_CHILD_CLEARTID)
6765                 ts->child_tidptr = child_tidptr;
6766         } else {
6767             cpu_clone_regs_parent(env, flags);
6768             fork_end(0);
6769         }
6770     }
6771     return ret;
6772 }
6773 
6774 /* warning : doesn't handle linux specific flags... */
6775 static int target_to_host_fcntl_cmd(int cmd)
6776 {
6777     int ret;
6778 
6779     switch(cmd) {
6780     case TARGET_F_DUPFD:
6781     case TARGET_F_GETFD:
6782     case TARGET_F_SETFD:
6783     case TARGET_F_GETFL:
6784     case TARGET_F_SETFL:
6785     case TARGET_F_OFD_GETLK:
6786     case TARGET_F_OFD_SETLK:
6787     case TARGET_F_OFD_SETLKW:
6788         ret = cmd;
6789         break;
6790     case TARGET_F_GETLK:
6791         ret = F_GETLK64;
6792         break;
6793     case TARGET_F_SETLK:
6794         ret = F_SETLK64;
6795         break;
6796     case TARGET_F_SETLKW:
6797         ret = F_SETLKW64;
6798         break;
6799     case TARGET_F_GETOWN:
6800         ret = F_GETOWN;
6801         break;
6802     case TARGET_F_SETOWN:
6803         ret = F_SETOWN;
6804         break;
6805     case TARGET_F_GETSIG:
6806         ret = F_GETSIG;
6807         break;
6808     case TARGET_F_SETSIG:
6809         ret = F_SETSIG;
6810         break;
6811 #if TARGET_ABI_BITS == 32
6812     case TARGET_F_GETLK64:
6813         ret = F_GETLK64;
6814         break;
6815     case TARGET_F_SETLK64:
6816         ret = F_SETLK64;
6817         break;
6818     case TARGET_F_SETLKW64:
6819         ret = F_SETLKW64;
6820         break;
6821 #endif
6822     case TARGET_F_SETLEASE:
6823         ret = F_SETLEASE;
6824         break;
6825     case TARGET_F_GETLEASE:
6826         ret = F_GETLEASE;
6827         break;
6828 #ifdef F_DUPFD_CLOEXEC
6829     case TARGET_F_DUPFD_CLOEXEC:
6830         ret = F_DUPFD_CLOEXEC;
6831         break;
6832 #endif
6833     case TARGET_F_NOTIFY:
6834         ret = F_NOTIFY;
6835         break;
6836 #ifdef F_GETOWN_EX
6837     case TARGET_F_GETOWN_EX:
6838         ret = F_GETOWN_EX;
6839         break;
6840 #endif
6841 #ifdef F_SETOWN_EX
6842     case TARGET_F_SETOWN_EX:
6843         ret = F_SETOWN_EX;
6844         break;
6845 #endif
6846 #ifdef F_SETPIPE_SZ
6847     case TARGET_F_SETPIPE_SZ:
6848         ret = F_SETPIPE_SZ;
6849         break;
6850     case TARGET_F_GETPIPE_SZ:
6851         ret = F_GETPIPE_SZ;
6852         break;
6853 #endif
6854 #ifdef F_ADD_SEALS
6855     case TARGET_F_ADD_SEALS:
6856         ret = F_ADD_SEALS;
6857         break;
6858     case TARGET_F_GET_SEALS:
6859         ret = F_GET_SEALS;
6860         break;
6861 #endif
6862     default:
6863         ret = -TARGET_EINVAL;
6864         break;
6865     }
6866 
6867 #if defined(__powerpc64__)
6868     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6869      * is not supported by kernel. The glibc fcntl call actually adjusts
6870      * them to 5, 6 and 7 before making the syscall(). Since we make the
6871      * syscall directly, adjust to what is supported by the kernel.
6872      */
6873     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6874         ret -= F_GETLK64 - 5;
6875     }
6876 #endif
6877 
6878     return ret;
6879 }
6880 
6881 #define FLOCK_TRANSTBL \
6882     switch (type) { \
6883     TRANSTBL_CONVERT(F_RDLCK); \
6884     TRANSTBL_CONVERT(F_WRLCK); \
6885     TRANSTBL_CONVERT(F_UNLCK); \
6886     }
6887 
6888 static int target_to_host_flock(int type)
6889 {
6890 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6891     FLOCK_TRANSTBL
6892 #undef  TRANSTBL_CONVERT
6893     return -TARGET_EINVAL;
6894 }
6895 
6896 static int host_to_target_flock(int type)
6897 {
6898 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6899     FLOCK_TRANSTBL
6900 #undef  TRANSTBL_CONVERT
6901     /* if we don't know how to convert the value coming
6902      * from the host we copy to the target field as-is
6903      */
6904     return type;
6905 }
6906 
6907 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6908                                             abi_ulong target_flock_addr)
6909 {
6910     struct target_flock *target_fl;
6911     int l_type;
6912 
6913     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6914         return -TARGET_EFAULT;
6915     }
6916 
6917     __get_user(l_type, &target_fl->l_type);
6918     l_type = target_to_host_flock(l_type);
6919     if (l_type < 0) {
6920         return l_type;
6921     }
6922     fl->l_type = l_type;
6923     __get_user(fl->l_whence, &target_fl->l_whence);
6924     __get_user(fl->l_start, &target_fl->l_start);
6925     __get_user(fl->l_len, &target_fl->l_len);
6926     __get_user(fl->l_pid, &target_fl->l_pid);
6927     unlock_user_struct(target_fl, target_flock_addr, 0);
6928     return 0;
6929 }
6930 
6931 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6932                                           const struct flock64 *fl)
6933 {
6934     struct target_flock *target_fl;
6935     short l_type;
6936 
6937     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6938         return -TARGET_EFAULT;
6939     }
6940 
6941     l_type = host_to_target_flock(fl->l_type);
6942     __put_user(l_type, &target_fl->l_type);
6943     __put_user(fl->l_whence, &target_fl->l_whence);
6944     __put_user(fl->l_start, &target_fl->l_start);
6945     __put_user(fl->l_len, &target_fl->l_len);
6946     __put_user(fl->l_pid, &target_fl->l_pid);
6947     unlock_user_struct(target_fl, target_flock_addr, 1);
6948     return 0;
6949 }
6950 
6951 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6952 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6953 
6954 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6955 struct target_oabi_flock64 {
6956     abi_short l_type;
6957     abi_short l_whence;
6958     abi_llong l_start;
6959     abi_llong l_len;
6960     abi_int   l_pid;
6961 } QEMU_PACKED;
6962 
6963 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6964                                                    abi_ulong target_flock_addr)
6965 {
6966     struct target_oabi_flock64 *target_fl;
6967     int l_type;
6968 
6969     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6970         return -TARGET_EFAULT;
6971     }
6972 
6973     __get_user(l_type, &target_fl->l_type);
6974     l_type = target_to_host_flock(l_type);
6975     if (l_type < 0) {
6976         return l_type;
6977     }
6978     fl->l_type = l_type;
6979     __get_user(fl->l_whence, &target_fl->l_whence);
6980     __get_user(fl->l_start, &target_fl->l_start);
6981     __get_user(fl->l_len, &target_fl->l_len);
6982     __get_user(fl->l_pid, &target_fl->l_pid);
6983     unlock_user_struct(target_fl, target_flock_addr, 0);
6984     return 0;
6985 }
6986 
6987 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6988                                                  const struct flock64 *fl)
6989 {
6990     struct target_oabi_flock64 *target_fl;
6991     short l_type;
6992 
6993     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6994         return -TARGET_EFAULT;
6995     }
6996 
6997     l_type = host_to_target_flock(fl->l_type);
6998     __put_user(l_type, &target_fl->l_type);
6999     __put_user(fl->l_whence, &target_fl->l_whence);
7000     __put_user(fl->l_start, &target_fl->l_start);
7001     __put_user(fl->l_len, &target_fl->l_len);
7002     __put_user(fl->l_pid, &target_fl->l_pid);
7003     unlock_user_struct(target_fl, target_flock_addr, 1);
7004     return 0;
7005 }
7006 #endif
7007 
7008 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7009                                               abi_ulong target_flock_addr)
7010 {
7011     struct target_flock64 *target_fl;
7012     int l_type;
7013 
7014     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7015         return -TARGET_EFAULT;
7016     }
7017 
7018     __get_user(l_type, &target_fl->l_type);
7019     l_type = target_to_host_flock(l_type);
7020     if (l_type < 0) {
7021         return l_type;
7022     }
7023     fl->l_type = l_type;
7024     __get_user(fl->l_whence, &target_fl->l_whence);
7025     __get_user(fl->l_start, &target_fl->l_start);
7026     __get_user(fl->l_len, &target_fl->l_len);
7027     __get_user(fl->l_pid, &target_fl->l_pid);
7028     unlock_user_struct(target_fl, target_flock_addr, 0);
7029     return 0;
7030 }
7031 
7032 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7033                                             const struct flock64 *fl)
7034 {
7035     struct target_flock64 *target_fl;
7036     short l_type;
7037 
7038     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7039         return -TARGET_EFAULT;
7040     }
7041 
7042     l_type = host_to_target_flock(fl->l_type);
7043     __put_user(l_type, &target_fl->l_type);
7044     __put_user(fl->l_whence, &target_fl->l_whence);
7045     __put_user(fl->l_start, &target_fl->l_start);
7046     __put_user(fl->l_len, &target_fl->l_len);
7047     __put_user(fl->l_pid, &target_fl->l_pid);
7048     unlock_user_struct(target_fl, target_flock_addr, 1);
7049     return 0;
7050 }
7051 
7052 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7053 {
7054     struct flock64 fl64;
7055 #ifdef F_GETOWN_EX
7056     struct f_owner_ex fox;
7057     struct target_f_owner_ex *target_fox;
7058 #endif
7059     abi_long ret;
7060     int host_cmd = target_to_host_fcntl_cmd(cmd);
7061 
7062     if (host_cmd == -TARGET_EINVAL)
7063 	    return host_cmd;
7064 
7065     switch(cmd) {
7066     case TARGET_F_GETLK:
7067         ret = copy_from_user_flock(&fl64, arg);
7068         if (ret) {
7069             return ret;
7070         }
7071         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7072         if (ret == 0) {
7073             ret = copy_to_user_flock(arg, &fl64);
7074         }
7075         break;
7076 
7077     case TARGET_F_SETLK:
7078     case TARGET_F_SETLKW:
7079         ret = copy_from_user_flock(&fl64, arg);
7080         if (ret) {
7081             return ret;
7082         }
7083         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7084         break;
7085 
7086     case TARGET_F_GETLK64:
7087     case TARGET_F_OFD_GETLK:
7088         ret = copy_from_user_flock64(&fl64, arg);
7089         if (ret) {
7090             return ret;
7091         }
7092         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7093         if (ret == 0) {
7094             ret = copy_to_user_flock64(arg, &fl64);
7095         }
7096         break;
7097     case TARGET_F_SETLK64:
7098     case TARGET_F_SETLKW64:
7099     case TARGET_F_OFD_SETLK:
7100     case TARGET_F_OFD_SETLKW:
7101         ret = copy_from_user_flock64(&fl64, arg);
7102         if (ret) {
7103             return ret;
7104         }
7105         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7106         break;
7107 
7108     case TARGET_F_GETFL:
7109         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7110         if (ret >= 0) {
7111             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7112         }
7113         break;
7114 
7115     case TARGET_F_SETFL:
7116         ret = get_errno(safe_fcntl(fd, host_cmd,
7117                                    target_to_host_bitmask(arg,
7118                                                           fcntl_flags_tbl)));
7119         break;
7120 
7121 #ifdef F_GETOWN_EX
7122     case TARGET_F_GETOWN_EX:
7123         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7124         if (ret >= 0) {
7125             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7126                 return -TARGET_EFAULT;
7127             target_fox->type = tswap32(fox.type);
7128             target_fox->pid = tswap32(fox.pid);
7129             unlock_user_struct(target_fox, arg, 1);
7130         }
7131         break;
7132 #endif
7133 
7134 #ifdef F_SETOWN_EX
7135     case TARGET_F_SETOWN_EX:
7136         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7137             return -TARGET_EFAULT;
7138         fox.type = tswap32(target_fox->type);
7139         fox.pid = tswap32(target_fox->pid);
7140         unlock_user_struct(target_fox, arg, 0);
7141         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7142         break;
7143 #endif
7144 
7145     case TARGET_F_SETSIG:
7146         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7147         break;
7148 
7149     case TARGET_F_GETSIG:
7150         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7151         break;
7152 
7153     case TARGET_F_SETOWN:
7154     case TARGET_F_GETOWN:
7155     case TARGET_F_SETLEASE:
7156     case TARGET_F_GETLEASE:
7157     case TARGET_F_SETPIPE_SZ:
7158     case TARGET_F_GETPIPE_SZ:
7159     case TARGET_F_ADD_SEALS:
7160     case TARGET_F_GET_SEALS:
7161         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7162         break;
7163 
7164     default:
7165         ret = get_errno(safe_fcntl(fd, cmd, arg));
7166         break;
7167     }
7168     return ret;
7169 }
7170 
7171 #ifdef USE_UID16
7172 
7173 static inline int high2lowuid(int uid)
7174 {
7175     if (uid > 65535)
7176         return 65534;
7177     else
7178         return uid;
7179 }
7180 
7181 static inline int high2lowgid(int gid)
7182 {
7183     if (gid > 65535)
7184         return 65534;
7185     else
7186         return gid;
7187 }
7188 
7189 static inline int low2highuid(int uid)
7190 {
7191     if ((int16_t)uid == -1)
7192         return -1;
7193     else
7194         return uid;
7195 }
7196 
7197 static inline int low2highgid(int gid)
7198 {
7199     if ((int16_t)gid == -1)
7200         return -1;
7201     else
7202         return gid;
7203 }
7204 static inline int tswapid(int id)
7205 {
7206     return tswap16(id);
7207 }
7208 
7209 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7210 
7211 #else /* !USE_UID16 */
7212 static inline int high2lowuid(int uid)
7213 {
7214     return uid;
7215 }
7216 static inline int high2lowgid(int gid)
7217 {
7218     return gid;
7219 }
7220 static inline int low2highuid(int uid)
7221 {
7222     return uid;
7223 }
7224 static inline int low2highgid(int gid)
7225 {
7226     return gid;
7227 }
7228 static inline int tswapid(int id)
7229 {
7230     return tswap32(id);
7231 }
7232 
7233 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7234 
7235 #endif /* USE_UID16 */
7236 
7237 /* We must do direct syscalls for setting UID/GID, because we want to
7238  * implement the Linux system call semantics of "change only for this thread",
7239  * not the libc/POSIX semantics of "change for all threads in process".
7240  * (See http://ewontfix.com/17/ for more details.)
7241  * We use the 32-bit version of the syscalls if present; if it is not
7242  * then either the host architecture supports 32-bit UIDs natively with
7243  * the standard syscall, or the 16-bit UID is the best we can do.
7244  */
7245 #ifdef __NR_setuid32
7246 #define __NR_sys_setuid __NR_setuid32
7247 #else
7248 #define __NR_sys_setuid __NR_setuid
7249 #endif
7250 #ifdef __NR_setgid32
7251 #define __NR_sys_setgid __NR_setgid32
7252 #else
7253 #define __NR_sys_setgid __NR_setgid
7254 #endif
7255 #ifdef __NR_setresuid32
7256 #define __NR_sys_setresuid __NR_setresuid32
7257 #else
7258 #define __NR_sys_setresuid __NR_setresuid
7259 #endif
7260 #ifdef __NR_setresgid32
7261 #define __NR_sys_setresgid __NR_setresgid32
7262 #else
7263 #define __NR_sys_setresgid __NR_setresgid
7264 #endif
7265 
7266 _syscall1(int, sys_setuid, uid_t, uid)
7267 _syscall1(int, sys_setgid, gid_t, gid)
7268 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7269 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7270 
7271 void syscall_init(void)
7272 {
7273     IOCTLEntry *ie;
7274     const argtype *arg_type;
7275     int size;
7276 
7277     thunk_init(STRUCT_MAX);
7278 
7279 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7280 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7281 #include "syscall_types.h"
7282 #undef STRUCT
7283 #undef STRUCT_SPECIAL
7284 
7285     /* we patch the ioctl size if necessary. We rely on the fact that
7286        no ioctl has all the bits at '1' in the size field */
7287     ie = ioctl_entries;
7288     while (ie->target_cmd != 0) {
7289         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7290             TARGET_IOC_SIZEMASK) {
7291             arg_type = ie->arg_type;
7292             if (arg_type[0] != TYPE_PTR) {
7293                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7294                         ie->target_cmd);
7295                 exit(1);
7296             }
7297             arg_type++;
7298             size = thunk_type_size(arg_type, 0);
7299             ie->target_cmd = (ie->target_cmd &
7300                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7301                 (size << TARGET_IOC_SIZESHIFT);
7302         }
7303 
7304         /* automatic consistency check if same arch */
7305 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7306     (defined(__x86_64__) && defined(TARGET_X86_64))
7307         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7308             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7309                     ie->name, ie->target_cmd, ie->host_cmd);
7310         }
7311 #endif
7312         ie++;
7313     }
7314 }
7315 
7316 #ifdef TARGET_NR_truncate64
7317 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7318                                          abi_long arg2,
7319                                          abi_long arg3,
7320                                          abi_long arg4)
7321 {
7322     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7323         arg2 = arg3;
7324         arg3 = arg4;
7325     }
7326     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7327 }
7328 #endif
7329 
7330 #ifdef TARGET_NR_ftruncate64
7331 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7332                                           abi_long arg2,
7333                                           abi_long arg3,
7334                                           abi_long arg4)
7335 {
7336     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7337         arg2 = arg3;
7338         arg3 = arg4;
7339     }
7340     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7341 }
7342 #endif
7343 
7344 #if defined(TARGET_NR_timer_settime) || \
7345     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7346 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7347                                                  abi_ulong target_addr)
7348 {
7349     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7350                                 offsetof(struct target_itimerspec,
7351                                          it_interval)) ||
7352         target_to_host_timespec(&host_its->it_value, target_addr +
7353                                 offsetof(struct target_itimerspec,
7354                                          it_value))) {
7355         return -TARGET_EFAULT;
7356     }
7357 
7358     return 0;
7359 }
7360 #endif
7361 
7362 #if defined(TARGET_NR_timer_settime64) || \
7363     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7364 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7365                                                    abi_ulong target_addr)
7366 {
7367     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7368                                   offsetof(struct target__kernel_itimerspec,
7369                                            it_interval)) ||
7370         target_to_host_timespec64(&host_its->it_value, target_addr +
7371                                   offsetof(struct target__kernel_itimerspec,
7372                                            it_value))) {
7373         return -TARGET_EFAULT;
7374     }
7375 
7376     return 0;
7377 }
7378 #endif
7379 
7380 #if ((defined(TARGET_NR_timerfd_gettime) || \
7381       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7382       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7383 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7384                                                  struct itimerspec *host_its)
7385 {
7386     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7387                                                        it_interval),
7388                                 &host_its->it_interval) ||
7389         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7390                                                        it_value),
7391                                 &host_its->it_value)) {
7392         return -TARGET_EFAULT;
7393     }
7394     return 0;
7395 }
7396 #endif
7397 
7398 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7399       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7400       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7401 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7402                                                    struct itimerspec *host_its)
7403 {
7404     if (host_to_target_timespec64(target_addr +
7405                                   offsetof(struct target__kernel_itimerspec,
7406                                            it_interval),
7407                                   &host_its->it_interval) ||
7408         host_to_target_timespec64(target_addr +
7409                                   offsetof(struct target__kernel_itimerspec,
7410                                            it_value),
7411                                   &host_its->it_value)) {
7412         return -TARGET_EFAULT;
7413     }
7414     return 0;
7415 }
7416 #endif
7417 
7418 #if defined(TARGET_NR_adjtimex) || \
7419     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7420 static inline abi_long target_to_host_timex(struct timex *host_tx,
7421                                             abi_long target_addr)
7422 {
7423     struct target_timex *target_tx;
7424 
7425     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7426         return -TARGET_EFAULT;
7427     }
7428 
7429     __get_user(host_tx->modes, &target_tx->modes);
7430     __get_user(host_tx->offset, &target_tx->offset);
7431     __get_user(host_tx->freq, &target_tx->freq);
7432     __get_user(host_tx->maxerror, &target_tx->maxerror);
7433     __get_user(host_tx->esterror, &target_tx->esterror);
7434     __get_user(host_tx->status, &target_tx->status);
7435     __get_user(host_tx->constant, &target_tx->constant);
7436     __get_user(host_tx->precision, &target_tx->precision);
7437     __get_user(host_tx->tolerance, &target_tx->tolerance);
7438     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7439     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7440     __get_user(host_tx->tick, &target_tx->tick);
7441     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7442     __get_user(host_tx->jitter, &target_tx->jitter);
7443     __get_user(host_tx->shift, &target_tx->shift);
7444     __get_user(host_tx->stabil, &target_tx->stabil);
7445     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7446     __get_user(host_tx->calcnt, &target_tx->calcnt);
7447     __get_user(host_tx->errcnt, &target_tx->errcnt);
7448     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7449     __get_user(host_tx->tai, &target_tx->tai);
7450 
7451     unlock_user_struct(target_tx, target_addr, 0);
7452     return 0;
7453 }
7454 
7455 static inline abi_long host_to_target_timex(abi_long target_addr,
7456                                             struct timex *host_tx)
7457 {
7458     struct target_timex *target_tx;
7459 
7460     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7461         return -TARGET_EFAULT;
7462     }
7463 
7464     __put_user(host_tx->modes, &target_tx->modes);
7465     __put_user(host_tx->offset, &target_tx->offset);
7466     __put_user(host_tx->freq, &target_tx->freq);
7467     __put_user(host_tx->maxerror, &target_tx->maxerror);
7468     __put_user(host_tx->esterror, &target_tx->esterror);
7469     __put_user(host_tx->status, &target_tx->status);
7470     __put_user(host_tx->constant, &target_tx->constant);
7471     __put_user(host_tx->precision, &target_tx->precision);
7472     __put_user(host_tx->tolerance, &target_tx->tolerance);
7473     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7474     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7475     __put_user(host_tx->tick, &target_tx->tick);
7476     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7477     __put_user(host_tx->jitter, &target_tx->jitter);
7478     __put_user(host_tx->shift, &target_tx->shift);
7479     __put_user(host_tx->stabil, &target_tx->stabil);
7480     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7481     __put_user(host_tx->calcnt, &target_tx->calcnt);
7482     __put_user(host_tx->errcnt, &target_tx->errcnt);
7483     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7484     __put_user(host_tx->tai, &target_tx->tai);
7485 
7486     unlock_user_struct(target_tx, target_addr, 1);
7487     return 0;
7488 }
7489 #endif
7490 
7491 
7492 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7493 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7494                                               abi_long target_addr)
7495 {
7496     struct target__kernel_timex *target_tx;
7497 
7498     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7499                                  offsetof(struct target__kernel_timex,
7500                                           time))) {
7501         return -TARGET_EFAULT;
7502     }
7503 
7504     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7505         return -TARGET_EFAULT;
7506     }
7507 
7508     __get_user(host_tx->modes, &target_tx->modes);
7509     __get_user(host_tx->offset, &target_tx->offset);
7510     __get_user(host_tx->freq, &target_tx->freq);
7511     __get_user(host_tx->maxerror, &target_tx->maxerror);
7512     __get_user(host_tx->esterror, &target_tx->esterror);
7513     __get_user(host_tx->status, &target_tx->status);
7514     __get_user(host_tx->constant, &target_tx->constant);
7515     __get_user(host_tx->precision, &target_tx->precision);
7516     __get_user(host_tx->tolerance, &target_tx->tolerance);
7517     __get_user(host_tx->tick, &target_tx->tick);
7518     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7519     __get_user(host_tx->jitter, &target_tx->jitter);
7520     __get_user(host_tx->shift, &target_tx->shift);
7521     __get_user(host_tx->stabil, &target_tx->stabil);
7522     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7523     __get_user(host_tx->calcnt, &target_tx->calcnt);
7524     __get_user(host_tx->errcnt, &target_tx->errcnt);
7525     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7526     __get_user(host_tx->tai, &target_tx->tai);
7527 
7528     unlock_user_struct(target_tx, target_addr, 0);
7529     return 0;
7530 }
7531 
7532 static inline abi_long host_to_target_timex64(abi_long target_addr,
7533                                               struct timex *host_tx)
7534 {
7535     struct target__kernel_timex *target_tx;
7536 
7537    if (copy_to_user_timeval64(target_addr +
7538                               offsetof(struct target__kernel_timex, time),
7539                               &host_tx->time)) {
7540         return -TARGET_EFAULT;
7541     }
7542 
7543     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7544         return -TARGET_EFAULT;
7545     }
7546 
7547     __put_user(host_tx->modes, &target_tx->modes);
7548     __put_user(host_tx->offset, &target_tx->offset);
7549     __put_user(host_tx->freq, &target_tx->freq);
7550     __put_user(host_tx->maxerror, &target_tx->maxerror);
7551     __put_user(host_tx->esterror, &target_tx->esterror);
7552     __put_user(host_tx->status, &target_tx->status);
7553     __put_user(host_tx->constant, &target_tx->constant);
7554     __put_user(host_tx->precision, &target_tx->precision);
7555     __put_user(host_tx->tolerance, &target_tx->tolerance);
7556     __put_user(host_tx->tick, &target_tx->tick);
7557     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7558     __put_user(host_tx->jitter, &target_tx->jitter);
7559     __put_user(host_tx->shift, &target_tx->shift);
7560     __put_user(host_tx->stabil, &target_tx->stabil);
7561     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7562     __put_user(host_tx->calcnt, &target_tx->calcnt);
7563     __put_user(host_tx->errcnt, &target_tx->errcnt);
7564     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7565     __put_user(host_tx->tai, &target_tx->tai);
7566 
7567     unlock_user_struct(target_tx, target_addr, 1);
7568     return 0;
7569 }
7570 #endif
7571 
7572 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7573 #define sigev_notify_thread_id _sigev_un._tid
7574 #endif
7575 
7576 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7577                                                abi_ulong target_addr)
7578 {
7579     struct target_sigevent *target_sevp;
7580 
7581     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7582         return -TARGET_EFAULT;
7583     }
7584 
7585     /* This union is awkward on 64 bit systems because it has a 32 bit
7586      * integer and a pointer in it; we follow the conversion approach
7587      * used for handling sigval types in signal.c so the guest should get
7588      * the correct value back even if we did a 64 bit byteswap and it's
7589      * using the 32 bit integer.
7590      */
7591     host_sevp->sigev_value.sival_ptr =
7592         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7593     host_sevp->sigev_signo =
7594         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7595     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7596     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7597 
7598     unlock_user_struct(target_sevp, target_addr, 1);
7599     return 0;
7600 }
7601 
7602 #if defined(TARGET_NR_mlockall)
7603 static inline int target_to_host_mlockall_arg(int arg)
7604 {
7605     int result = 0;
7606 
7607     if (arg & TARGET_MCL_CURRENT) {
7608         result |= MCL_CURRENT;
7609     }
7610     if (arg & TARGET_MCL_FUTURE) {
7611         result |= MCL_FUTURE;
7612     }
7613 #ifdef MCL_ONFAULT
7614     if (arg & TARGET_MCL_ONFAULT) {
7615         result |= MCL_ONFAULT;
7616     }
7617 #endif
7618 
7619     return result;
7620 }
7621 #endif
7622 
7623 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7624      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7625      defined(TARGET_NR_newfstatat))
7626 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7627                                              abi_ulong target_addr,
7628                                              struct stat *host_st)
7629 {
7630 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7631     if (cpu_env->eabi) {
7632         struct target_eabi_stat64 *target_st;
7633 
7634         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7635             return -TARGET_EFAULT;
7636         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7637         __put_user(host_st->st_dev, &target_st->st_dev);
7638         __put_user(host_st->st_ino, &target_st->st_ino);
7639 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7640         __put_user(host_st->st_ino, &target_st->__st_ino);
7641 #endif
7642         __put_user(host_st->st_mode, &target_st->st_mode);
7643         __put_user(host_st->st_nlink, &target_st->st_nlink);
7644         __put_user(host_st->st_uid, &target_st->st_uid);
7645         __put_user(host_st->st_gid, &target_st->st_gid);
7646         __put_user(host_st->st_rdev, &target_st->st_rdev);
7647         __put_user(host_st->st_size, &target_st->st_size);
7648         __put_user(host_st->st_blksize, &target_st->st_blksize);
7649         __put_user(host_st->st_blocks, &target_st->st_blocks);
7650         __put_user(host_st->st_atime, &target_st->target_st_atime);
7651         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7652         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7653 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7654         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7655         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7656         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7657 #endif
7658         unlock_user_struct(target_st, target_addr, 1);
7659     } else
7660 #endif
7661     {
7662 #if defined(TARGET_HAS_STRUCT_STAT64)
7663         struct target_stat64 *target_st;
7664 #else
7665         struct target_stat *target_st;
7666 #endif
7667 
7668         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7669             return -TARGET_EFAULT;
7670         memset(target_st, 0, sizeof(*target_st));
7671         __put_user(host_st->st_dev, &target_st->st_dev);
7672         __put_user(host_st->st_ino, &target_st->st_ino);
7673 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7674         __put_user(host_st->st_ino, &target_st->__st_ino);
7675 #endif
7676         __put_user(host_st->st_mode, &target_st->st_mode);
7677         __put_user(host_st->st_nlink, &target_st->st_nlink);
7678         __put_user(host_st->st_uid, &target_st->st_uid);
7679         __put_user(host_st->st_gid, &target_st->st_gid);
7680         __put_user(host_st->st_rdev, &target_st->st_rdev);
7681         /* XXX: better use of kernel struct */
7682         __put_user(host_st->st_size, &target_st->st_size);
7683         __put_user(host_st->st_blksize, &target_st->st_blksize);
7684         __put_user(host_st->st_blocks, &target_st->st_blocks);
7685         __put_user(host_st->st_atime, &target_st->target_st_atime);
7686         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7687         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7688 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7689         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7690         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7691         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7692 #endif
7693         unlock_user_struct(target_st, target_addr, 1);
7694     }
7695 
7696     return 0;
7697 }
7698 #endif
7699 
7700 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7701 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7702                                             abi_ulong target_addr)
7703 {
7704     struct target_statx *target_stx;
7705 
7706     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7707         return -TARGET_EFAULT;
7708     }
7709     memset(target_stx, 0, sizeof(*target_stx));
7710 
7711     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7712     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7713     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7714     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7715     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7716     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7717     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7718     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7719     __put_user(host_stx->stx_size, &target_stx->stx_size);
7720     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7721     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7722     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7723     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7724     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7725     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7726     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7727     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7728     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7729     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7730     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7731     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7732     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7733     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7734 
7735     unlock_user_struct(target_stx, target_addr, 1);
7736 
7737     return 0;
7738 }
7739 #endif
7740 
7741 static int do_sys_futex(int *uaddr, int op, int val,
7742                          const struct timespec *timeout, int *uaddr2,
7743                          int val3)
7744 {
7745 #if HOST_LONG_BITS == 64
7746 #if defined(__NR_futex)
7747     /* always a 64-bit time_t, it doesn't define _time64 version  */
7748     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7749 
7750 #endif
7751 #else /* HOST_LONG_BITS == 64 */
7752 #if defined(__NR_futex_time64)
7753     if (sizeof(timeout->tv_sec) == 8) {
7754         /* _time64 function on 32bit arch */
7755         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7756     }
7757 #endif
7758 #if defined(__NR_futex)
7759     /* old function on 32bit arch */
7760     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7761 #endif
7762 #endif /* HOST_LONG_BITS == 64 */
7763     g_assert_not_reached();
7764 }
7765 
7766 static int do_safe_futex(int *uaddr, int op, int val,
7767                          const struct timespec *timeout, int *uaddr2,
7768                          int val3)
7769 {
7770 #if HOST_LONG_BITS == 64
7771 #if defined(__NR_futex)
7772     /* always a 64-bit time_t, it doesn't define _time64 version  */
7773     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7774 #endif
7775 #else /* HOST_LONG_BITS == 64 */
7776 #if defined(__NR_futex_time64)
7777     if (sizeof(timeout->tv_sec) == 8) {
7778         /* _time64 function on 32bit arch */
7779         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7780                                            val3));
7781     }
7782 #endif
7783 #if defined(__NR_futex)
7784     /* old function on 32bit arch */
7785     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7786 #endif
7787 #endif /* HOST_LONG_BITS == 64 */
7788     return -TARGET_ENOSYS;
7789 }
7790 
7791 /* ??? Using host futex calls even when target atomic operations
7792    are not really atomic probably breaks things.  However implementing
7793    futexes locally would make futexes shared between multiple processes
7794    tricky.  However they're probably useless because guest atomic
7795    operations won't work either.  */
7796 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7797 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7798                     int op, int val, target_ulong timeout,
7799                     target_ulong uaddr2, int val3)
7800 {
7801     struct timespec ts, *pts = NULL;
7802     void *haddr2 = NULL;
7803     int base_op;
7804 
7805     /* We assume FUTEX_* constants are the same on both host and target. */
7806 #ifdef FUTEX_CMD_MASK
7807     base_op = op & FUTEX_CMD_MASK;
7808 #else
7809     base_op = op;
7810 #endif
7811     switch (base_op) {
7812     case FUTEX_WAIT:
7813     case FUTEX_WAIT_BITSET:
7814         val = tswap32(val);
7815         break;
7816     case FUTEX_WAIT_REQUEUE_PI:
7817         val = tswap32(val);
7818         haddr2 = g2h(cpu, uaddr2);
7819         break;
7820     case FUTEX_LOCK_PI:
7821     case FUTEX_LOCK_PI2:
7822         break;
7823     case FUTEX_WAKE:
7824     case FUTEX_WAKE_BITSET:
7825     case FUTEX_TRYLOCK_PI:
7826     case FUTEX_UNLOCK_PI:
7827         timeout = 0;
7828         break;
7829     case FUTEX_FD:
7830         val = target_to_host_signal(val);
7831         timeout = 0;
7832         break;
7833     case FUTEX_CMP_REQUEUE:
7834     case FUTEX_CMP_REQUEUE_PI:
7835         val3 = tswap32(val3);
7836         /* fall through */
7837     case FUTEX_REQUEUE:
7838     case FUTEX_WAKE_OP:
7839         /*
7840          * For these, the 4th argument is not TIMEOUT, but VAL2.
7841          * But the prototype of do_safe_futex takes a pointer, so
7842          * insert casts to satisfy the compiler.  We do not need
7843          * to tswap VAL2 since it's not compared to guest memory.
7844           */
7845         pts = (struct timespec *)(uintptr_t)timeout;
7846         timeout = 0;
7847         haddr2 = g2h(cpu, uaddr2);
7848         break;
7849     default:
7850         return -TARGET_ENOSYS;
7851     }
7852     if (timeout) {
7853         pts = &ts;
7854         if (time64
7855             ? target_to_host_timespec64(pts, timeout)
7856             : target_to_host_timespec(pts, timeout)) {
7857             return -TARGET_EFAULT;
7858         }
7859     }
7860     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7861 }
7862 #endif
7863 
7864 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7865 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7866                                      abi_long handle, abi_long mount_id,
7867                                      abi_long flags)
7868 {
7869     struct file_handle *target_fh;
7870     struct file_handle *fh;
7871     int mid = 0;
7872     abi_long ret;
7873     char *name;
7874     unsigned int size, total_size;
7875 
7876     if (get_user_s32(size, handle)) {
7877         return -TARGET_EFAULT;
7878     }
7879 
7880     name = lock_user_string(pathname);
7881     if (!name) {
7882         return -TARGET_EFAULT;
7883     }
7884 
7885     total_size = sizeof(struct file_handle) + size;
7886     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7887     if (!target_fh) {
7888         unlock_user(name, pathname, 0);
7889         return -TARGET_EFAULT;
7890     }
7891 
7892     fh = g_malloc0(total_size);
7893     fh->handle_bytes = size;
7894 
7895     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7896     unlock_user(name, pathname, 0);
7897 
7898     /* man name_to_handle_at(2):
7899      * Other than the use of the handle_bytes field, the caller should treat
7900      * the file_handle structure as an opaque data type
7901      */
7902 
7903     memcpy(target_fh, fh, total_size);
7904     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7905     target_fh->handle_type = tswap32(fh->handle_type);
7906     g_free(fh);
7907     unlock_user(target_fh, handle, total_size);
7908 
7909     if (put_user_s32(mid, mount_id)) {
7910         return -TARGET_EFAULT;
7911     }
7912 
7913     return ret;
7914 
7915 }
7916 #endif
7917 
7918 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7919 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7920                                      abi_long flags)
7921 {
7922     struct file_handle *target_fh;
7923     struct file_handle *fh;
7924     unsigned int size, total_size;
7925     abi_long ret;
7926 
7927     if (get_user_s32(size, handle)) {
7928         return -TARGET_EFAULT;
7929     }
7930 
7931     total_size = sizeof(struct file_handle) + size;
7932     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7933     if (!target_fh) {
7934         return -TARGET_EFAULT;
7935     }
7936 
7937     fh = g_memdup(target_fh, total_size);
7938     fh->handle_bytes = size;
7939     fh->handle_type = tswap32(target_fh->handle_type);
7940 
7941     ret = get_errno(open_by_handle_at(mount_fd, fh,
7942                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7943 
7944     g_free(fh);
7945 
7946     unlock_user(target_fh, handle, total_size);
7947 
7948     return ret;
7949 }
7950 #endif
7951 
7952 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7953 
7954 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7955 {
7956     int host_flags;
7957     target_sigset_t *target_mask;
7958     sigset_t host_mask;
7959     abi_long ret;
7960 
7961     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7962         return -TARGET_EINVAL;
7963     }
7964     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7965         return -TARGET_EFAULT;
7966     }
7967 
7968     target_to_host_sigset(&host_mask, target_mask);
7969 
7970     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7971 
7972     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7973     if (ret >= 0) {
7974         fd_trans_register(ret, &target_signalfd_trans);
7975     }
7976 
7977     unlock_user_struct(target_mask, mask, 0);
7978 
7979     return ret;
7980 }
7981 #endif
7982 
7983 /* Map host to target signal numbers for the wait family of syscalls.
7984    Assume all other status bits are the same.  */
7985 int host_to_target_waitstatus(int status)
7986 {
7987     if (WIFSIGNALED(status)) {
7988         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7989     }
7990     if (WIFSTOPPED(status)) {
7991         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7992                | (status & 0xff);
7993     }
7994     return status;
7995 }
7996 
7997 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7998 {
7999     CPUState *cpu = env_cpu(cpu_env);
8000     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8001     int i;
8002 
8003     for (i = 0; i < bprm->argc; i++) {
8004         size_t len = strlen(bprm->argv[i]) + 1;
8005 
8006         if (write(fd, bprm->argv[i], len) != len) {
8007             return -1;
8008         }
8009     }
8010 
8011     return 0;
8012 }
8013 
8014 static int open_self_maps(CPUArchState *cpu_env, int fd)
8015 {
8016     CPUState *cpu = env_cpu(cpu_env);
8017     TaskState *ts = cpu->opaque;
8018     GSList *map_info = read_self_maps();
8019     GSList *s;
8020     int count;
8021 
8022     for (s = map_info; s; s = g_slist_next(s)) {
8023         MapInfo *e = (MapInfo *) s->data;
8024 
8025         if (h2g_valid(e->start)) {
8026             unsigned long min = e->start;
8027             unsigned long max = e->end;
8028             int flags = page_get_flags(h2g(min));
8029             const char *path;
8030 
8031             max = h2g_valid(max - 1) ?
8032                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8033 
8034             if (page_check_range(h2g(min), max - min, flags) == -1) {
8035                 continue;
8036             }
8037 
8038 #ifdef TARGET_HPPA
8039             if (h2g(max) == ts->info->stack_limit) {
8040 #else
8041             if (h2g(min) == ts->info->stack_limit) {
8042 #endif
8043                 path = "[stack]";
8044             } else {
8045                 path = e->path;
8046             }
8047 
8048             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8049                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8050                             h2g(min), h2g(max - 1) + 1,
8051                             (flags & PAGE_READ) ? 'r' : '-',
8052                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8053                             (flags & PAGE_EXEC) ? 'x' : '-',
8054                             e->is_priv ? 'p' : 's',
8055                             (uint64_t) e->offset, e->dev, e->inode);
8056             if (path) {
8057                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8058             } else {
8059                 dprintf(fd, "\n");
8060             }
8061         }
8062     }
8063 
8064     free_self_maps(map_info);
8065 
8066 #ifdef TARGET_VSYSCALL_PAGE
8067     /*
8068      * We only support execution from the vsyscall page.
8069      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8070      */
8071     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8072                     " --xp 00000000 00:00 0",
8073                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8074     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8075 #endif
8076 
8077     return 0;
8078 }
8079 
8080 static int open_self_stat(CPUArchState *cpu_env, int fd)
8081 {
8082     CPUState *cpu = env_cpu(cpu_env);
8083     TaskState *ts = cpu->opaque;
8084     g_autoptr(GString) buf = g_string_new(NULL);
8085     int i;
8086 
8087     for (i = 0; i < 44; i++) {
8088         if (i == 0) {
8089             /* pid */
8090             g_string_printf(buf, FMT_pid " ", getpid());
8091         } else if (i == 1) {
8092             /* app name */
8093             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8094             bin = bin ? bin + 1 : ts->bprm->argv[0];
8095             g_string_printf(buf, "(%.15s) ", bin);
8096         } else if (i == 3) {
8097             /* ppid */
8098             g_string_printf(buf, FMT_pid " ", getppid());
8099         } else if (i == 21) {
8100             /* starttime */
8101             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8102         } else if (i == 27) {
8103             /* stack bottom */
8104             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8105         } else {
8106             /* for the rest, there is MasterCard */
8107             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8108         }
8109 
8110         if (write(fd, buf->str, buf->len) != buf->len) {
8111             return -1;
8112         }
8113     }
8114 
8115     return 0;
8116 }
8117 
8118 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8119 {
8120     CPUState *cpu = env_cpu(cpu_env);
8121     TaskState *ts = cpu->opaque;
8122     abi_ulong auxv = ts->info->saved_auxv;
8123     abi_ulong len = ts->info->auxv_len;
8124     char *ptr;
8125 
8126     /*
8127      * Auxiliary vector is stored in target process stack.
8128      * read in whole auxv vector and copy it to file
8129      */
8130     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8131     if (ptr != NULL) {
8132         while (len > 0) {
8133             ssize_t r;
8134             r = write(fd, ptr, len);
8135             if (r <= 0) {
8136                 break;
8137             }
8138             len -= r;
8139             ptr += r;
8140         }
8141         lseek(fd, 0, SEEK_SET);
8142         unlock_user(ptr, auxv, len);
8143     }
8144 
8145     return 0;
8146 }
8147 
8148 static int is_proc_myself(const char *filename, const char *entry)
8149 {
8150     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8151         filename += strlen("/proc/");
8152         if (!strncmp(filename, "self/", strlen("self/"))) {
8153             filename += strlen("self/");
8154         } else if (*filename >= '1' && *filename <= '9') {
8155             char myself[80];
8156             snprintf(myself, sizeof(myself), "%d/", getpid());
8157             if (!strncmp(filename, myself, strlen(myself))) {
8158                 filename += strlen(myself);
8159             } else {
8160                 return 0;
8161             }
8162         } else {
8163             return 0;
8164         }
8165         if (!strcmp(filename, entry)) {
8166             return 1;
8167         }
8168     }
8169     return 0;
8170 }
8171 
8172 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8173     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8174 static int is_proc(const char *filename, const char *entry)
8175 {
8176     return strcmp(filename, entry) == 0;
8177 }
8178 #endif
8179 
8180 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8181 static int open_net_route(CPUArchState *cpu_env, int fd)
8182 {
8183     FILE *fp;
8184     char *line = NULL;
8185     size_t len = 0;
8186     ssize_t read;
8187 
8188     fp = fopen("/proc/net/route", "r");
8189     if (fp == NULL) {
8190         return -1;
8191     }
8192 
8193     /* read header */
8194 
8195     read = getline(&line, &len, fp);
8196     dprintf(fd, "%s", line);
8197 
8198     /* read routes */
8199 
8200     while ((read = getline(&line, &len, fp)) != -1) {
8201         char iface[16];
8202         uint32_t dest, gw, mask;
8203         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8204         int fields;
8205 
8206         fields = sscanf(line,
8207                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8208                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8209                         &mask, &mtu, &window, &irtt);
8210         if (fields != 11) {
8211             continue;
8212         }
8213         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8214                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8215                 metric, tswap32(mask), mtu, window, irtt);
8216     }
8217 
8218     free(line);
8219     fclose(fp);
8220 
8221     return 0;
8222 }
8223 #endif
8224 
8225 #if defined(TARGET_SPARC)
8226 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8227 {
8228     dprintf(fd, "type\t\t: sun4u\n");
8229     return 0;
8230 }
8231 #endif
8232 
8233 #if defined(TARGET_HPPA)
8234 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8235 {
8236     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8237     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8238     dprintf(fd, "capabilities\t: os32\n");
8239     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8240     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8241     return 0;
8242 }
8243 #endif
8244 
8245 #if defined(TARGET_M68K)
8246 static int open_hardware(CPUArchState *cpu_env, int fd)
8247 {
8248     dprintf(fd, "Model:\t\tqemu-m68k\n");
8249     return 0;
8250 }
8251 #endif
8252 
8253 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8254 {
8255     struct fake_open {
8256         const char *filename;
8257         int (*fill)(CPUArchState *cpu_env, int fd);
8258         int (*cmp)(const char *s1, const char *s2);
8259     };
8260     const struct fake_open *fake_open;
8261     static const struct fake_open fakes[] = {
8262         { "maps", open_self_maps, is_proc_myself },
8263         { "stat", open_self_stat, is_proc_myself },
8264         { "auxv", open_self_auxv, is_proc_myself },
8265         { "cmdline", open_self_cmdline, is_proc_myself },
8266 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8267         { "/proc/net/route", open_net_route, is_proc },
8268 #endif
8269 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8270         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8271 #endif
8272 #if defined(TARGET_M68K)
8273         { "/proc/hardware", open_hardware, is_proc },
8274 #endif
8275         { NULL, NULL, NULL }
8276     };
8277 
8278     if (is_proc_myself(pathname, "exe")) {
8279         return safe_openat(dirfd, exec_path, flags, mode);
8280     }
8281 
8282     for (fake_open = fakes; fake_open->filename; fake_open++) {
8283         if (fake_open->cmp(pathname, fake_open->filename)) {
8284             break;
8285         }
8286     }
8287 
8288     if (fake_open->filename) {
8289         const char *tmpdir;
8290         char filename[PATH_MAX];
8291         int fd, r;
8292 
8293         fd = memfd_create("qemu-open", 0);
8294         if (fd < 0) {
8295             if (errno != ENOSYS) {
8296                 return fd;
8297             }
8298             /* create temporary file to map stat to */
8299             tmpdir = getenv("TMPDIR");
8300             if (!tmpdir)
8301                 tmpdir = "/tmp";
8302             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8303             fd = mkstemp(filename);
8304             if (fd < 0) {
8305                 return fd;
8306             }
8307             unlink(filename);
8308         }
8309 
8310         if ((r = fake_open->fill(cpu_env, fd))) {
8311             int e = errno;
8312             close(fd);
8313             errno = e;
8314             return r;
8315         }
8316         lseek(fd, 0, SEEK_SET);
8317 
8318         return fd;
8319     }
8320 
8321     return safe_openat(dirfd, path(pathname), flags, mode);
8322 }
8323 
8324 #define TIMER_MAGIC 0x0caf0000
8325 #define TIMER_MAGIC_MASK 0xffff0000
8326 
8327 /* Convert QEMU provided timer ID back to internal 16bit index format */
8328 static target_timer_t get_timer_id(abi_long arg)
8329 {
8330     target_timer_t timerid = arg;
8331 
8332     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8333         return -TARGET_EINVAL;
8334     }
8335 
8336     timerid &= 0xffff;
8337 
8338     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8339         return -TARGET_EINVAL;
8340     }
8341 
8342     return timerid;
8343 }
8344 
8345 static int target_to_host_cpu_mask(unsigned long *host_mask,
8346                                    size_t host_size,
8347                                    abi_ulong target_addr,
8348                                    size_t target_size)
8349 {
8350     unsigned target_bits = sizeof(abi_ulong) * 8;
8351     unsigned host_bits = sizeof(*host_mask) * 8;
8352     abi_ulong *target_mask;
8353     unsigned i, j;
8354 
8355     assert(host_size >= target_size);
8356 
8357     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8358     if (!target_mask) {
8359         return -TARGET_EFAULT;
8360     }
8361     memset(host_mask, 0, host_size);
8362 
8363     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8364         unsigned bit = i * target_bits;
8365         abi_ulong val;
8366 
8367         __get_user(val, &target_mask[i]);
8368         for (j = 0; j < target_bits; j++, bit++) {
8369             if (val & (1UL << j)) {
8370                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8371             }
8372         }
8373     }
8374 
8375     unlock_user(target_mask, target_addr, 0);
8376     return 0;
8377 }
8378 
8379 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8380                                    size_t host_size,
8381                                    abi_ulong target_addr,
8382                                    size_t target_size)
8383 {
8384     unsigned target_bits = sizeof(abi_ulong) * 8;
8385     unsigned host_bits = sizeof(*host_mask) * 8;
8386     abi_ulong *target_mask;
8387     unsigned i, j;
8388 
8389     assert(host_size >= target_size);
8390 
8391     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8392     if (!target_mask) {
8393         return -TARGET_EFAULT;
8394     }
8395 
8396     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8397         unsigned bit = i * target_bits;
8398         abi_ulong val = 0;
8399 
8400         for (j = 0; j < target_bits; j++, bit++) {
8401             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8402                 val |= 1UL << j;
8403             }
8404         }
8405         __put_user(val, &target_mask[i]);
8406     }
8407 
8408     unlock_user(target_mask, target_addr, target_size);
8409     return 0;
8410 }
8411 
8412 #ifdef TARGET_NR_getdents
8413 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8414 {
8415     g_autofree void *hdirp = NULL;
8416     void *tdirp;
8417     int hlen, hoff, toff;
8418     int hreclen, treclen;
8419     off64_t prev_diroff = 0;
8420 
8421     hdirp = g_try_malloc(count);
8422     if (!hdirp) {
8423         return -TARGET_ENOMEM;
8424     }
8425 
8426 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8427     hlen = sys_getdents(dirfd, hdirp, count);
8428 #else
8429     hlen = sys_getdents64(dirfd, hdirp, count);
8430 #endif
8431 
8432     hlen = get_errno(hlen);
8433     if (is_error(hlen)) {
8434         return hlen;
8435     }
8436 
8437     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8438     if (!tdirp) {
8439         return -TARGET_EFAULT;
8440     }
8441 
8442     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8443 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8444         struct linux_dirent *hde = hdirp + hoff;
8445 #else
8446         struct linux_dirent64 *hde = hdirp + hoff;
8447 #endif
8448         struct target_dirent *tde = tdirp + toff;
8449         int namelen;
8450         uint8_t type;
8451 
8452         namelen = strlen(hde->d_name);
8453         hreclen = hde->d_reclen;
8454         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8455         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8456 
8457         if (toff + treclen > count) {
8458             /*
8459              * If the host struct is smaller than the target struct, or
8460              * requires less alignment and thus packs into less space,
8461              * then the host can return more entries than we can pass
8462              * on to the guest.
8463              */
8464             if (toff == 0) {
8465                 toff = -TARGET_EINVAL; /* result buffer is too small */
8466                 break;
8467             }
8468             /*
8469              * Return what we have, resetting the file pointer to the
8470              * location of the first record not returned.
8471              */
8472             lseek64(dirfd, prev_diroff, SEEK_SET);
8473             break;
8474         }
8475 
8476         prev_diroff = hde->d_off;
8477         tde->d_ino = tswapal(hde->d_ino);
8478         tde->d_off = tswapal(hde->d_off);
8479         tde->d_reclen = tswap16(treclen);
8480         memcpy(tde->d_name, hde->d_name, namelen + 1);
8481 
8482         /*
8483          * The getdents type is in what was formerly a padding byte at the
8484          * end of the structure.
8485          */
8486 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8487         type = *((uint8_t *)hde + hreclen - 1);
8488 #else
8489         type = hde->d_type;
8490 #endif
8491         *((uint8_t *)tde + treclen - 1) = type;
8492     }
8493 
8494     unlock_user(tdirp, arg2, toff);
8495     return toff;
8496 }
8497 #endif /* TARGET_NR_getdents */
8498 
8499 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8500 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8501 {
8502     g_autofree void *hdirp = NULL;
8503     void *tdirp;
8504     int hlen, hoff, toff;
8505     int hreclen, treclen;
8506     off64_t prev_diroff = 0;
8507 
8508     hdirp = g_try_malloc(count);
8509     if (!hdirp) {
8510         return -TARGET_ENOMEM;
8511     }
8512 
8513     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8514     if (is_error(hlen)) {
8515         return hlen;
8516     }
8517 
8518     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8519     if (!tdirp) {
8520         return -TARGET_EFAULT;
8521     }
8522 
8523     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8524         struct linux_dirent64 *hde = hdirp + hoff;
8525         struct target_dirent64 *tde = tdirp + toff;
8526         int namelen;
8527 
8528         namelen = strlen(hde->d_name) + 1;
8529         hreclen = hde->d_reclen;
8530         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8531         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8532 
8533         if (toff + treclen > count) {
8534             /*
8535              * If the host struct is smaller than the target struct, or
8536              * requires less alignment and thus packs into less space,
8537              * then the host can return more entries than we can pass
8538              * on to the guest.
8539              */
8540             if (toff == 0) {
8541                 toff = -TARGET_EINVAL; /* result buffer is too small */
8542                 break;
8543             }
8544             /*
8545              * Return what we have, resetting the file pointer to the
8546              * location of the first record not returned.
8547              */
8548             lseek64(dirfd, prev_diroff, SEEK_SET);
8549             break;
8550         }
8551 
8552         prev_diroff = hde->d_off;
8553         tde->d_ino = tswap64(hde->d_ino);
8554         tde->d_off = tswap64(hde->d_off);
8555         tde->d_reclen = tswap16(treclen);
8556         tde->d_type = hde->d_type;
8557         memcpy(tde->d_name, hde->d_name, namelen);
8558     }
8559 
8560     unlock_user(tdirp, arg2, toff);
8561     return toff;
8562 }
8563 #endif /* TARGET_NR_getdents64 */
8564 
8565 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8566 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8567 #endif
8568 
8569 /* This is an internal helper for do_syscall so that it is easier
8570  * to have a single return point, so that actions, such as logging
8571  * of syscall results, can be performed.
8572  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8573  */
8574 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8575                             abi_long arg2, abi_long arg3, abi_long arg4,
8576                             abi_long arg5, abi_long arg6, abi_long arg7,
8577                             abi_long arg8)
8578 {
8579     CPUState *cpu = env_cpu(cpu_env);
8580     abi_long ret;
8581 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8582     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8583     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8584     || defined(TARGET_NR_statx)
8585     struct stat st;
8586 #endif
8587 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8588     || defined(TARGET_NR_fstatfs)
8589     struct statfs stfs;
8590 #endif
8591     void *p;
8592 
8593     switch(num) {
8594     case TARGET_NR_exit:
8595         /* In old applications this may be used to implement _exit(2).
8596            However in threaded applications it is used for thread termination,
8597            and _exit_group is used for application termination.
8598            Do thread termination if we have more then one thread.  */
8599 
8600         if (block_signals()) {
8601             return -QEMU_ERESTARTSYS;
8602         }
8603 
8604         pthread_mutex_lock(&clone_lock);
8605 
8606         if (CPU_NEXT(first_cpu)) {
8607             TaskState *ts = cpu->opaque;
8608 
8609             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8610             object_unref(OBJECT(cpu));
8611             /*
8612              * At this point the CPU should be unrealized and removed
8613              * from cpu lists. We can clean-up the rest of the thread
8614              * data without the lock held.
8615              */
8616 
8617             pthread_mutex_unlock(&clone_lock);
8618 
8619             if (ts->child_tidptr) {
8620                 put_user_u32(0, ts->child_tidptr);
8621                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8622                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8623             }
8624             thread_cpu = NULL;
8625             g_free(ts);
8626             rcu_unregister_thread();
8627             pthread_exit(NULL);
8628         }
8629 
8630         pthread_mutex_unlock(&clone_lock);
8631         preexit_cleanup(cpu_env, arg1);
8632         _exit(arg1);
8633         return 0; /* avoid warning */
8634     case TARGET_NR_read:
8635         if (arg2 == 0 && arg3 == 0) {
8636             return get_errno(safe_read(arg1, 0, 0));
8637         } else {
8638             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8639                 return -TARGET_EFAULT;
8640             ret = get_errno(safe_read(arg1, p, arg3));
8641             if (ret >= 0 &&
8642                 fd_trans_host_to_target_data(arg1)) {
8643                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8644             }
8645             unlock_user(p, arg2, ret);
8646         }
8647         return ret;
8648     case TARGET_NR_write:
8649         if (arg2 == 0 && arg3 == 0) {
8650             return get_errno(safe_write(arg1, 0, 0));
8651         }
8652         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8653             return -TARGET_EFAULT;
8654         if (fd_trans_target_to_host_data(arg1)) {
8655             void *copy = g_malloc(arg3);
8656             memcpy(copy, p, arg3);
8657             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8658             if (ret >= 0) {
8659                 ret = get_errno(safe_write(arg1, copy, ret));
8660             }
8661             g_free(copy);
8662         } else {
8663             ret = get_errno(safe_write(arg1, p, arg3));
8664         }
8665         unlock_user(p, arg2, 0);
8666         return ret;
8667 
8668 #ifdef TARGET_NR_open
8669     case TARGET_NR_open:
8670         if (!(p = lock_user_string(arg1)))
8671             return -TARGET_EFAULT;
8672         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8673                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8674                                   arg3));
8675         fd_trans_unregister(ret);
8676         unlock_user(p, arg1, 0);
8677         return ret;
8678 #endif
8679     case TARGET_NR_openat:
8680         if (!(p = lock_user_string(arg2)))
8681             return -TARGET_EFAULT;
8682         ret = get_errno(do_openat(cpu_env, arg1, p,
8683                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8684                                   arg4));
8685         fd_trans_unregister(ret);
8686         unlock_user(p, arg2, 0);
8687         return ret;
8688 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8689     case TARGET_NR_name_to_handle_at:
8690         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8691         return ret;
8692 #endif
8693 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8694     case TARGET_NR_open_by_handle_at:
8695         ret = do_open_by_handle_at(arg1, arg2, arg3);
8696         fd_trans_unregister(ret);
8697         return ret;
8698 #endif
8699 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8700     case TARGET_NR_pidfd_open:
8701         return get_errno(pidfd_open(arg1, arg2));
8702 #endif
8703 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8704     case TARGET_NR_pidfd_send_signal:
8705         {
8706             siginfo_t uinfo, *puinfo;
8707 
8708             if (arg3) {
8709                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8710                 if (!p) {
8711                     return -TARGET_EFAULT;
8712                  }
8713                  target_to_host_siginfo(&uinfo, p);
8714                  unlock_user(p, arg3, 0);
8715                  puinfo = &uinfo;
8716             } else {
8717                  puinfo = NULL;
8718             }
8719             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8720                                               puinfo, arg4));
8721         }
8722         return ret;
8723 #endif
8724 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8725     case TARGET_NR_pidfd_getfd:
8726         return get_errno(pidfd_getfd(arg1, arg2, arg3));
8727 #endif
8728     case TARGET_NR_close:
8729         fd_trans_unregister(arg1);
8730         return get_errno(close(arg1));
8731 
8732     case TARGET_NR_brk:
8733         return do_brk(arg1);
8734 #ifdef TARGET_NR_fork
8735     case TARGET_NR_fork:
8736         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8737 #endif
8738 #ifdef TARGET_NR_waitpid
8739     case TARGET_NR_waitpid:
8740         {
8741             int status;
8742             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8743             if (!is_error(ret) && arg2 && ret
8744                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8745                 return -TARGET_EFAULT;
8746         }
8747         return ret;
8748 #endif
8749 #ifdef TARGET_NR_waitid
8750     case TARGET_NR_waitid:
8751         {
8752             siginfo_t info;
8753             info.si_pid = 0;
8754             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8755             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8756                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8757                     return -TARGET_EFAULT;
8758                 host_to_target_siginfo(p, &info);
8759                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8760             }
8761         }
8762         return ret;
8763 #endif
8764 #ifdef TARGET_NR_creat /* not on alpha */
8765     case TARGET_NR_creat:
8766         if (!(p = lock_user_string(arg1)))
8767             return -TARGET_EFAULT;
8768         ret = get_errno(creat(p, arg2));
8769         fd_trans_unregister(ret);
8770         unlock_user(p, arg1, 0);
8771         return ret;
8772 #endif
8773 #ifdef TARGET_NR_link
8774     case TARGET_NR_link:
8775         {
8776             void * p2;
8777             p = lock_user_string(arg1);
8778             p2 = lock_user_string(arg2);
8779             if (!p || !p2)
8780                 ret = -TARGET_EFAULT;
8781             else
8782                 ret = get_errno(link(p, p2));
8783             unlock_user(p2, arg2, 0);
8784             unlock_user(p, arg1, 0);
8785         }
8786         return ret;
8787 #endif
8788 #if defined(TARGET_NR_linkat)
8789     case TARGET_NR_linkat:
8790         {
8791             void * p2 = NULL;
8792             if (!arg2 || !arg4)
8793                 return -TARGET_EFAULT;
8794             p  = lock_user_string(arg2);
8795             p2 = lock_user_string(arg4);
8796             if (!p || !p2)
8797                 ret = -TARGET_EFAULT;
8798             else
8799                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8800             unlock_user(p, arg2, 0);
8801             unlock_user(p2, arg4, 0);
8802         }
8803         return ret;
8804 #endif
8805 #ifdef TARGET_NR_unlink
8806     case TARGET_NR_unlink:
8807         if (!(p = lock_user_string(arg1)))
8808             return -TARGET_EFAULT;
8809         ret = get_errno(unlink(p));
8810         unlock_user(p, arg1, 0);
8811         return ret;
8812 #endif
8813 #if defined(TARGET_NR_unlinkat)
8814     case TARGET_NR_unlinkat:
8815         if (!(p = lock_user_string(arg2)))
8816             return -TARGET_EFAULT;
8817         ret = get_errno(unlinkat(arg1, p, arg3));
8818         unlock_user(p, arg2, 0);
8819         return ret;
8820 #endif
8821     case TARGET_NR_execve:
8822         {
8823             char **argp, **envp;
8824             int argc, envc;
8825             abi_ulong gp;
8826             abi_ulong guest_argp;
8827             abi_ulong guest_envp;
8828             abi_ulong addr;
8829             char **q;
8830 
8831             argc = 0;
8832             guest_argp = arg2;
8833             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8834                 if (get_user_ual(addr, gp))
8835                     return -TARGET_EFAULT;
8836                 if (!addr)
8837                     break;
8838                 argc++;
8839             }
8840             envc = 0;
8841             guest_envp = arg3;
8842             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8843                 if (get_user_ual(addr, gp))
8844                     return -TARGET_EFAULT;
8845                 if (!addr)
8846                     break;
8847                 envc++;
8848             }
8849 
8850             argp = g_new0(char *, argc + 1);
8851             envp = g_new0(char *, envc + 1);
8852 
8853             for (gp = guest_argp, q = argp; gp;
8854                   gp += sizeof(abi_ulong), q++) {
8855                 if (get_user_ual(addr, gp))
8856                     goto execve_efault;
8857                 if (!addr)
8858                     break;
8859                 if (!(*q = lock_user_string(addr)))
8860                     goto execve_efault;
8861             }
8862             *q = NULL;
8863 
8864             for (gp = guest_envp, q = envp; gp;
8865                   gp += sizeof(abi_ulong), q++) {
8866                 if (get_user_ual(addr, gp))
8867                     goto execve_efault;
8868                 if (!addr)
8869                     break;
8870                 if (!(*q = lock_user_string(addr)))
8871                     goto execve_efault;
8872             }
8873             *q = NULL;
8874 
8875             if (!(p = lock_user_string(arg1)))
8876                 goto execve_efault;
8877             /* Although execve() is not an interruptible syscall it is
8878              * a special case where we must use the safe_syscall wrapper:
8879              * if we allow a signal to happen before we make the host
8880              * syscall then we will 'lose' it, because at the point of
8881              * execve the process leaves QEMU's control. So we use the
8882              * safe syscall wrapper to ensure that we either take the
8883              * signal as a guest signal, or else it does not happen
8884              * before the execve completes and makes it the other
8885              * program's problem.
8886              */
8887             if (is_proc_myself(p, "exe")) {
8888                 ret = get_errno(safe_execve(exec_path, argp, envp));
8889             } else {
8890                 ret = get_errno(safe_execve(p, argp, envp));
8891             }
8892             unlock_user(p, arg1, 0);
8893 
8894             goto execve_end;
8895 
8896         execve_efault:
8897             ret = -TARGET_EFAULT;
8898 
8899         execve_end:
8900             for (gp = guest_argp, q = argp; *q;
8901                   gp += sizeof(abi_ulong), q++) {
8902                 if (get_user_ual(addr, gp)
8903                     || !addr)
8904                     break;
8905                 unlock_user(*q, addr, 0);
8906             }
8907             for (gp = guest_envp, q = envp; *q;
8908                   gp += sizeof(abi_ulong), q++) {
8909                 if (get_user_ual(addr, gp)
8910                     || !addr)
8911                     break;
8912                 unlock_user(*q, addr, 0);
8913             }
8914 
8915             g_free(argp);
8916             g_free(envp);
8917         }
8918         return ret;
8919     case TARGET_NR_chdir:
8920         if (!(p = lock_user_string(arg1)))
8921             return -TARGET_EFAULT;
8922         ret = get_errno(chdir(p));
8923         unlock_user(p, arg1, 0);
8924         return ret;
8925 #ifdef TARGET_NR_time
8926     case TARGET_NR_time:
8927         {
8928             time_t host_time;
8929             ret = get_errno(time(&host_time));
8930             if (!is_error(ret)
8931                 && arg1
8932                 && put_user_sal(host_time, arg1))
8933                 return -TARGET_EFAULT;
8934         }
8935         return ret;
8936 #endif
8937 #ifdef TARGET_NR_mknod
8938     case TARGET_NR_mknod:
8939         if (!(p = lock_user_string(arg1)))
8940             return -TARGET_EFAULT;
8941         ret = get_errno(mknod(p, arg2, arg3));
8942         unlock_user(p, arg1, 0);
8943         return ret;
8944 #endif
8945 #if defined(TARGET_NR_mknodat)
8946     case TARGET_NR_mknodat:
8947         if (!(p = lock_user_string(arg2)))
8948             return -TARGET_EFAULT;
8949         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8950         unlock_user(p, arg2, 0);
8951         return ret;
8952 #endif
8953 #ifdef TARGET_NR_chmod
8954     case TARGET_NR_chmod:
8955         if (!(p = lock_user_string(arg1)))
8956             return -TARGET_EFAULT;
8957         ret = get_errno(chmod(p, arg2));
8958         unlock_user(p, arg1, 0);
8959         return ret;
8960 #endif
8961 #ifdef TARGET_NR_lseek
8962     case TARGET_NR_lseek:
8963         return get_errno(lseek(arg1, arg2, arg3));
8964 #endif
8965 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8966     /* Alpha specific */
8967     case TARGET_NR_getxpid:
8968         cpu_env->ir[IR_A4] = getppid();
8969         return get_errno(getpid());
8970 #endif
8971 #ifdef TARGET_NR_getpid
8972     case TARGET_NR_getpid:
8973         return get_errno(getpid());
8974 #endif
8975     case TARGET_NR_mount:
8976         {
8977             /* need to look at the data field */
8978             void *p2, *p3;
8979 
8980             if (arg1) {
8981                 p = lock_user_string(arg1);
8982                 if (!p) {
8983                     return -TARGET_EFAULT;
8984                 }
8985             } else {
8986                 p = NULL;
8987             }
8988 
8989             p2 = lock_user_string(arg2);
8990             if (!p2) {
8991                 if (arg1) {
8992                     unlock_user(p, arg1, 0);
8993                 }
8994                 return -TARGET_EFAULT;
8995             }
8996 
8997             if (arg3) {
8998                 p3 = lock_user_string(arg3);
8999                 if (!p3) {
9000                     if (arg1) {
9001                         unlock_user(p, arg1, 0);
9002                     }
9003                     unlock_user(p2, arg2, 0);
9004                     return -TARGET_EFAULT;
9005                 }
9006             } else {
9007                 p3 = NULL;
9008             }
9009 
9010             /* FIXME - arg5 should be locked, but it isn't clear how to
9011              * do that since it's not guaranteed to be a NULL-terminated
9012              * string.
9013              */
9014             if (!arg5) {
9015                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9016             } else {
9017                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9018             }
9019             ret = get_errno(ret);
9020 
9021             if (arg1) {
9022                 unlock_user(p, arg1, 0);
9023             }
9024             unlock_user(p2, arg2, 0);
9025             if (arg3) {
9026                 unlock_user(p3, arg3, 0);
9027             }
9028         }
9029         return ret;
9030 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9031 #if defined(TARGET_NR_umount)
9032     case TARGET_NR_umount:
9033 #endif
9034 #if defined(TARGET_NR_oldumount)
9035     case TARGET_NR_oldumount:
9036 #endif
9037         if (!(p = lock_user_string(arg1)))
9038             return -TARGET_EFAULT;
9039         ret = get_errno(umount(p));
9040         unlock_user(p, arg1, 0);
9041         return ret;
9042 #endif
9043 #ifdef TARGET_NR_stime /* not on alpha */
9044     case TARGET_NR_stime:
9045         {
9046             struct timespec ts;
9047             ts.tv_nsec = 0;
9048             if (get_user_sal(ts.tv_sec, arg1)) {
9049                 return -TARGET_EFAULT;
9050             }
9051             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9052         }
9053 #endif
9054 #ifdef TARGET_NR_alarm /* not on alpha */
9055     case TARGET_NR_alarm:
9056         return alarm(arg1);
9057 #endif
9058 #ifdef TARGET_NR_pause /* not on alpha */
9059     case TARGET_NR_pause:
9060         if (!block_signals()) {
9061             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9062         }
9063         return -TARGET_EINTR;
9064 #endif
9065 #ifdef TARGET_NR_utime
9066     case TARGET_NR_utime:
9067         {
9068             struct utimbuf tbuf, *host_tbuf;
9069             struct target_utimbuf *target_tbuf;
9070             if (arg2) {
9071                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9072                     return -TARGET_EFAULT;
9073                 tbuf.actime = tswapal(target_tbuf->actime);
9074                 tbuf.modtime = tswapal(target_tbuf->modtime);
9075                 unlock_user_struct(target_tbuf, arg2, 0);
9076                 host_tbuf = &tbuf;
9077             } else {
9078                 host_tbuf = NULL;
9079             }
9080             if (!(p = lock_user_string(arg1)))
9081                 return -TARGET_EFAULT;
9082             ret = get_errno(utime(p, host_tbuf));
9083             unlock_user(p, arg1, 0);
9084         }
9085         return ret;
9086 #endif
9087 #ifdef TARGET_NR_utimes
9088     case TARGET_NR_utimes:
9089         {
9090             struct timeval *tvp, tv[2];
9091             if (arg2) {
9092                 if (copy_from_user_timeval(&tv[0], arg2)
9093                     || copy_from_user_timeval(&tv[1],
9094                                               arg2 + sizeof(struct target_timeval)))
9095                     return -TARGET_EFAULT;
9096                 tvp = tv;
9097             } else {
9098                 tvp = NULL;
9099             }
9100             if (!(p = lock_user_string(arg1)))
9101                 return -TARGET_EFAULT;
9102             ret = get_errno(utimes(p, tvp));
9103             unlock_user(p, arg1, 0);
9104         }
9105         return ret;
9106 #endif
9107 #if defined(TARGET_NR_futimesat)
9108     case TARGET_NR_futimesat:
9109         {
9110             struct timeval *tvp, tv[2];
9111             if (arg3) {
9112                 if (copy_from_user_timeval(&tv[0], arg3)
9113                     || copy_from_user_timeval(&tv[1],
9114                                               arg3 + sizeof(struct target_timeval)))
9115                     return -TARGET_EFAULT;
9116                 tvp = tv;
9117             } else {
9118                 tvp = NULL;
9119             }
9120             if (!(p = lock_user_string(arg2))) {
9121                 return -TARGET_EFAULT;
9122             }
9123             ret = get_errno(futimesat(arg1, path(p), tvp));
9124             unlock_user(p, arg2, 0);
9125         }
9126         return ret;
9127 #endif
9128 #ifdef TARGET_NR_access
9129     case TARGET_NR_access:
9130         if (!(p = lock_user_string(arg1))) {
9131             return -TARGET_EFAULT;
9132         }
9133         ret = get_errno(access(path(p), arg2));
9134         unlock_user(p, arg1, 0);
9135         return ret;
9136 #endif
9137 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9138     case TARGET_NR_faccessat:
9139         if (!(p = lock_user_string(arg2))) {
9140             return -TARGET_EFAULT;
9141         }
9142         ret = get_errno(faccessat(arg1, p, arg3, 0));
9143         unlock_user(p, arg2, 0);
9144         return ret;
9145 #endif
9146 #if defined(TARGET_NR_faccessat2)
9147     case TARGET_NR_faccessat2:
9148         if (!(p = lock_user_string(arg2))) {
9149             return -TARGET_EFAULT;
9150         }
9151         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9152         unlock_user(p, arg2, 0);
9153         return ret;
9154 #endif
9155 #ifdef TARGET_NR_nice /* not on alpha */
9156     case TARGET_NR_nice:
9157         return get_errno(nice(arg1));
9158 #endif
9159     case TARGET_NR_sync:
9160         sync();
9161         return 0;
9162 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9163     case TARGET_NR_syncfs:
9164         return get_errno(syncfs(arg1));
9165 #endif
9166     case TARGET_NR_kill:
9167         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9168 #ifdef TARGET_NR_rename
9169     case TARGET_NR_rename:
9170         {
9171             void *p2;
9172             p = lock_user_string(arg1);
9173             p2 = lock_user_string(arg2);
9174             if (!p || !p2)
9175                 ret = -TARGET_EFAULT;
9176             else
9177                 ret = get_errno(rename(p, p2));
9178             unlock_user(p2, arg2, 0);
9179             unlock_user(p, arg1, 0);
9180         }
9181         return ret;
9182 #endif
9183 #if defined(TARGET_NR_renameat)
9184     case TARGET_NR_renameat:
9185         {
9186             void *p2;
9187             p  = lock_user_string(arg2);
9188             p2 = lock_user_string(arg4);
9189             if (!p || !p2)
9190                 ret = -TARGET_EFAULT;
9191             else
9192                 ret = get_errno(renameat(arg1, p, arg3, p2));
9193             unlock_user(p2, arg4, 0);
9194             unlock_user(p, arg2, 0);
9195         }
9196         return ret;
9197 #endif
9198 #if defined(TARGET_NR_renameat2)
9199     case TARGET_NR_renameat2:
9200         {
9201             void *p2;
9202             p  = lock_user_string(arg2);
9203             p2 = lock_user_string(arg4);
9204             if (!p || !p2) {
9205                 ret = -TARGET_EFAULT;
9206             } else {
9207                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9208             }
9209             unlock_user(p2, arg4, 0);
9210             unlock_user(p, arg2, 0);
9211         }
9212         return ret;
9213 #endif
9214 #ifdef TARGET_NR_mkdir
9215     case TARGET_NR_mkdir:
9216         if (!(p = lock_user_string(arg1)))
9217             return -TARGET_EFAULT;
9218         ret = get_errno(mkdir(p, arg2));
9219         unlock_user(p, arg1, 0);
9220         return ret;
9221 #endif
9222 #if defined(TARGET_NR_mkdirat)
9223     case TARGET_NR_mkdirat:
9224         if (!(p = lock_user_string(arg2)))
9225             return -TARGET_EFAULT;
9226         ret = get_errno(mkdirat(arg1, p, arg3));
9227         unlock_user(p, arg2, 0);
9228         return ret;
9229 #endif
9230 #ifdef TARGET_NR_rmdir
9231     case TARGET_NR_rmdir:
9232         if (!(p = lock_user_string(arg1)))
9233             return -TARGET_EFAULT;
9234         ret = get_errno(rmdir(p));
9235         unlock_user(p, arg1, 0);
9236         return ret;
9237 #endif
9238     case TARGET_NR_dup:
9239         ret = get_errno(dup(arg1));
9240         if (ret >= 0) {
9241             fd_trans_dup(arg1, ret);
9242         }
9243         return ret;
9244 #ifdef TARGET_NR_pipe
9245     case TARGET_NR_pipe:
9246         return do_pipe(cpu_env, arg1, 0, 0);
9247 #endif
9248 #ifdef TARGET_NR_pipe2
9249     case TARGET_NR_pipe2:
9250         return do_pipe(cpu_env, arg1,
9251                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9252 #endif
9253     case TARGET_NR_times:
9254         {
9255             struct target_tms *tmsp;
9256             struct tms tms;
9257             ret = get_errno(times(&tms));
9258             if (arg1) {
9259                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9260                 if (!tmsp)
9261                     return -TARGET_EFAULT;
9262                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9263                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9264                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9265                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9266             }
9267             if (!is_error(ret))
9268                 ret = host_to_target_clock_t(ret);
9269         }
9270         return ret;
9271     case TARGET_NR_acct:
9272         if (arg1 == 0) {
9273             ret = get_errno(acct(NULL));
9274         } else {
9275             if (!(p = lock_user_string(arg1))) {
9276                 return -TARGET_EFAULT;
9277             }
9278             ret = get_errno(acct(path(p)));
9279             unlock_user(p, arg1, 0);
9280         }
9281         return ret;
9282 #ifdef TARGET_NR_umount2
9283     case TARGET_NR_umount2:
9284         if (!(p = lock_user_string(arg1)))
9285             return -TARGET_EFAULT;
9286         ret = get_errno(umount2(p, arg2));
9287         unlock_user(p, arg1, 0);
9288         return ret;
9289 #endif
9290     case TARGET_NR_ioctl:
9291         return do_ioctl(arg1, arg2, arg3);
9292 #ifdef TARGET_NR_fcntl
9293     case TARGET_NR_fcntl:
9294         return do_fcntl(arg1, arg2, arg3);
9295 #endif
9296     case TARGET_NR_setpgid:
9297         return get_errno(setpgid(arg1, arg2));
9298     case TARGET_NR_umask:
9299         return get_errno(umask(arg1));
9300     case TARGET_NR_chroot:
9301         if (!(p = lock_user_string(arg1)))
9302             return -TARGET_EFAULT;
9303         ret = get_errno(chroot(p));
9304         unlock_user(p, arg1, 0);
9305         return ret;
9306 #ifdef TARGET_NR_dup2
9307     case TARGET_NR_dup2:
9308         ret = get_errno(dup2(arg1, arg2));
9309         if (ret >= 0) {
9310             fd_trans_dup(arg1, arg2);
9311         }
9312         return ret;
9313 #endif
9314 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9315     case TARGET_NR_dup3:
9316     {
9317         int host_flags;
9318 
9319         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9320             return -EINVAL;
9321         }
9322         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9323         ret = get_errno(dup3(arg1, arg2, host_flags));
9324         if (ret >= 0) {
9325             fd_trans_dup(arg1, arg2);
9326         }
9327         return ret;
9328     }
9329 #endif
9330 #ifdef TARGET_NR_getppid /* not on alpha */
9331     case TARGET_NR_getppid:
9332         return get_errno(getppid());
9333 #endif
9334 #ifdef TARGET_NR_getpgrp
9335     case TARGET_NR_getpgrp:
9336         return get_errno(getpgrp());
9337 #endif
9338     case TARGET_NR_setsid:
9339         return get_errno(setsid());
9340 #ifdef TARGET_NR_sigaction
9341     case TARGET_NR_sigaction:
9342         {
9343 #if defined(TARGET_MIPS)
9344 	    struct target_sigaction act, oact, *pact, *old_act;
9345 
9346 	    if (arg2) {
9347                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9348                     return -TARGET_EFAULT;
9349 		act._sa_handler = old_act->_sa_handler;
9350 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9351 		act.sa_flags = old_act->sa_flags;
9352 		unlock_user_struct(old_act, arg2, 0);
9353 		pact = &act;
9354 	    } else {
9355 		pact = NULL;
9356 	    }
9357 
9358         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9359 
9360 	    if (!is_error(ret) && arg3) {
9361                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9362                     return -TARGET_EFAULT;
9363 		old_act->_sa_handler = oact._sa_handler;
9364 		old_act->sa_flags = oact.sa_flags;
9365 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9366 		old_act->sa_mask.sig[1] = 0;
9367 		old_act->sa_mask.sig[2] = 0;
9368 		old_act->sa_mask.sig[3] = 0;
9369 		unlock_user_struct(old_act, arg3, 1);
9370 	    }
9371 #else
9372             struct target_old_sigaction *old_act;
9373             struct target_sigaction act, oact, *pact;
9374             if (arg2) {
9375                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9376                     return -TARGET_EFAULT;
9377                 act._sa_handler = old_act->_sa_handler;
9378                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9379                 act.sa_flags = old_act->sa_flags;
9380 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9381                 act.sa_restorer = old_act->sa_restorer;
9382 #endif
9383                 unlock_user_struct(old_act, arg2, 0);
9384                 pact = &act;
9385             } else {
9386                 pact = NULL;
9387             }
9388             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9389             if (!is_error(ret) && arg3) {
9390                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9391                     return -TARGET_EFAULT;
9392                 old_act->_sa_handler = oact._sa_handler;
9393                 old_act->sa_mask = oact.sa_mask.sig[0];
9394                 old_act->sa_flags = oact.sa_flags;
9395 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9396                 old_act->sa_restorer = oact.sa_restorer;
9397 #endif
9398                 unlock_user_struct(old_act, arg3, 1);
9399             }
9400 #endif
9401         }
9402         return ret;
9403 #endif
9404     case TARGET_NR_rt_sigaction:
9405         {
9406             /*
9407              * For Alpha and SPARC this is a 5 argument syscall, with
9408              * a 'restorer' parameter which must be copied into the
9409              * sa_restorer field of the sigaction struct.
9410              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9411              * and arg5 is the sigsetsize.
9412              */
9413 #if defined(TARGET_ALPHA)
9414             target_ulong sigsetsize = arg4;
9415             target_ulong restorer = arg5;
9416 #elif defined(TARGET_SPARC)
9417             target_ulong restorer = arg4;
9418             target_ulong sigsetsize = arg5;
9419 #else
9420             target_ulong sigsetsize = arg4;
9421             target_ulong restorer = 0;
9422 #endif
9423             struct target_sigaction *act = NULL;
9424             struct target_sigaction *oact = NULL;
9425 
9426             if (sigsetsize != sizeof(target_sigset_t)) {
9427                 return -TARGET_EINVAL;
9428             }
9429             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9430                 return -TARGET_EFAULT;
9431             }
9432             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9433                 ret = -TARGET_EFAULT;
9434             } else {
9435                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9436                 if (oact) {
9437                     unlock_user_struct(oact, arg3, 1);
9438                 }
9439             }
9440             if (act) {
9441                 unlock_user_struct(act, arg2, 0);
9442             }
9443         }
9444         return ret;
9445 #ifdef TARGET_NR_sgetmask /* not on alpha */
9446     case TARGET_NR_sgetmask:
9447         {
9448             sigset_t cur_set;
9449             abi_ulong target_set;
9450             ret = do_sigprocmask(0, NULL, &cur_set);
9451             if (!ret) {
9452                 host_to_target_old_sigset(&target_set, &cur_set);
9453                 ret = target_set;
9454             }
9455         }
9456         return ret;
9457 #endif
9458 #ifdef TARGET_NR_ssetmask /* not on alpha */
9459     case TARGET_NR_ssetmask:
9460         {
9461             sigset_t set, oset;
9462             abi_ulong target_set = arg1;
9463             target_to_host_old_sigset(&set, &target_set);
9464             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9465             if (!ret) {
9466                 host_to_target_old_sigset(&target_set, &oset);
9467                 ret = target_set;
9468             }
9469         }
9470         return ret;
9471 #endif
9472 #ifdef TARGET_NR_sigprocmask
9473     case TARGET_NR_sigprocmask:
9474         {
9475 #if defined(TARGET_ALPHA)
9476             sigset_t set, oldset;
9477             abi_ulong mask;
9478             int how;
9479 
9480             switch (arg1) {
9481             case TARGET_SIG_BLOCK:
9482                 how = SIG_BLOCK;
9483                 break;
9484             case TARGET_SIG_UNBLOCK:
9485                 how = SIG_UNBLOCK;
9486                 break;
9487             case TARGET_SIG_SETMASK:
9488                 how = SIG_SETMASK;
9489                 break;
9490             default:
9491                 return -TARGET_EINVAL;
9492             }
9493             mask = arg2;
9494             target_to_host_old_sigset(&set, &mask);
9495 
9496             ret = do_sigprocmask(how, &set, &oldset);
9497             if (!is_error(ret)) {
9498                 host_to_target_old_sigset(&mask, &oldset);
9499                 ret = mask;
9500                 cpu_env->ir[IR_V0] = 0; /* force no error */
9501             }
9502 #else
9503             sigset_t set, oldset, *set_ptr;
9504             int how;
9505 
9506             if (arg2) {
9507                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9508                 if (!p) {
9509                     return -TARGET_EFAULT;
9510                 }
9511                 target_to_host_old_sigset(&set, p);
9512                 unlock_user(p, arg2, 0);
9513                 set_ptr = &set;
9514                 switch (arg1) {
9515                 case TARGET_SIG_BLOCK:
9516                     how = SIG_BLOCK;
9517                     break;
9518                 case TARGET_SIG_UNBLOCK:
9519                     how = SIG_UNBLOCK;
9520                     break;
9521                 case TARGET_SIG_SETMASK:
9522                     how = SIG_SETMASK;
9523                     break;
9524                 default:
9525                     return -TARGET_EINVAL;
9526                 }
9527             } else {
9528                 how = 0;
9529                 set_ptr = NULL;
9530             }
9531             ret = do_sigprocmask(how, set_ptr, &oldset);
9532             if (!is_error(ret) && arg3) {
9533                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9534                     return -TARGET_EFAULT;
9535                 host_to_target_old_sigset(p, &oldset);
9536                 unlock_user(p, arg3, sizeof(target_sigset_t));
9537             }
9538 #endif
9539         }
9540         return ret;
9541 #endif
9542     case TARGET_NR_rt_sigprocmask:
9543         {
9544             int how = arg1;
9545             sigset_t set, oldset, *set_ptr;
9546 
9547             if (arg4 != sizeof(target_sigset_t)) {
9548                 return -TARGET_EINVAL;
9549             }
9550 
9551             if (arg2) {
9552                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9553                 if (!p) {
9554                     return -TARGET_EFAULT;
9555                 }
9556                 target_to_host_sigset(&set, p);
9557                 unlock_user(p, arg2, 0);
9558                 set_ptr = &set;
9559                 switch(how) {
9560                 case TARGET_SIG_BLOCK:
9561                     how = SIG_BLOCK;
9562                     break;
9563                 case TARGET_SIG_UNBLOCK:
9564                     how = SIG_UNBLOCK;
9565                     break;
9566                 case TARGET_SIG_SETMASK:
9567                     how = SIG_SETMASK;
9568                     break;
9569                 default:
9570                     return -TARGET_EINVAL;
9571                 }
9572             } else {
9573                 how = 0;
9574                 set_ptr = NULL;
9575             }
9576             ret = do_sigprocmask(how, set_ptr, &oldset);
9577             if (!is_error(ret) && arg3) {
9578                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9579                     return -TARGET_EFAULT;
9580                 host_to_target_sigset(p, &oldset);
9581                 unlock_user(p, arg3, sizeof(target_sigset_t));
9582             }
9583         }
9584         return ret;
9585 #ifdef TARGET_NR_sigpending
9586     case TARGET_NR_sigpending:
9587         {
9588             sigset_t set;
9589             ret = get_errno(sigpending(&set));
9590             if (!is_error(ret)) {
9591                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9592                     return -TARGET_EFAULT;
9593                 host_to_target_old_sigset(p, &set);
9594                 unlock_user(p, arg1, sizeof(target_sigset_t));
9595             }
9596         }
9597         return ret;
9598 #endif
9599     case TARGET_NR_rt_sigpending:
9600         {
9601             sigset_t set;
9602 
9603             /* Yes, this check is >, not != like most. We follow the kernel's
9604              * logic and it does it like this because it implements
9605              * NR_sigpending through the same code path, and in that case
9606              * the old_sigset_t is smaller in size.
9607              */
9608             if (arg2 > sizeof(target_sigset_t)) {
9609                 return -TARGET_EINVAL;
9610             }
9611 
9612             ret = get_errno(sigpending(&set));
9613             if (!is_error(ret)) {
9614                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9615                     return -TARGET_EFAULT;
9616                 host_to_target_sigset(p, &set);
9617                 unlock_user(p, arg1, sizeof(target_sigset_t));
9618             }
9619         }
9620         return ret;
9621 #ifdef TARGET_NR_sigsuspend
9622     case TARGET_NR_sigsuspend:
9623         {
9624             sigset_t *set;
9625 
9626 #if defined(TARGET_ALPHA)
9627             TaskState *ts = cpu->opaque;
9628             /* target_to_host_old_sigset will bswap back */
9629             abi_ulong mask = tswapal(arg1);
9630             set = &ts->sigsuspend_mask;
9631             target_to_host_old_sigset(set, &mask);
9632 #else
9633             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9634             if (ret != 0) {
9635                 return ret;
9636             }
9637 #endif
9638             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9639             finish_sigsuspend_mask(ret);
9640         }
9641         return ret;
9642 #endif
9643     case TARGET_NR_rt_sigsuspend:
9644         {
9645             sigset_t *set;
9646 
9647             ret = process_sigsuspend_mask(&set, arg1, arg2);
9648             if (ret != 0) {
9649                 return ret;
9650             }
9651             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9652             finish_sigsuspend_mask(ret);
9653         }
9654         return ret;
9655 #ifdef TARGET_NR_rt_sigtimedwait
9656     case TARGET_NR_rt_sigtimedwait:
9657         {
9658             sigset_t set;
9659             struct timespec uts, *puts;
9660             siginfo_t uinfo;
9661 
9662             if (arg4 != sizeof(target_sigset_t)) {
9663                 return -TARGET_EINVAL;
9664             }
9665 
9666             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9667                 return -TARGET_EFAULT;
9668             target_to_host_sigset(&set, p);
9669             unlock_user(p, arg1, 0);
9670             if (arg3) {
9671                 puts = &uts;
9672                 if (target_to_host_timespec(puts, arg3)) {
9673                     return -TARGET_EFAULT;
9674                 }
9675             } else {
9676                 puts = NULL;
9677             }
9678             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9679                                                  SIGSET_T_SIZE));
9680             if (!is_error(ret)) {
9681                 if (arg2) {
9682                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9683                                   0);
9684                     if (!p) {
9685                         return -TARGET_EFAULT;
9686                     }
9687                     host_to_target_siginfo(p, &uinfo);
9688                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9689                 }
9690                 ret = host_to_target_signal(ret);
9691             }
9692         }
9693         return ret;
9694 #endif
9695 #ifdef TARGET_NR_rt_sigtimedwait_time64
9696     case TARGET_NR_rt_sigtimedwait_time64:
9697         {
9698             sigset_t set;
9699             struct timespec uts, *puts;
9700             siginfo_t uinfo;
9701 
9702             if (arg4 != sizeof(target_sigset_t)) {
9703                 return -TARGET_EINVAL;
9704             }
9705 
9706             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9707             if (!p) {
9708                 return -TARGET_EFAULT;
9709             }
9710             target_to_host_sigset(&set, p);
9711             unlock_user(p, arg1, 0);
9712             if (arg3) {
9713                 puts = &uts;
9714                 if (target_to_host_timespec64(puts, arg3)) {
9715                     return -TARGET_EFAULT;
9716                 }
9717             } else {
9718                 puts = NULL;
9719             }
9720             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9721                                                  SIGSET_T_SIZE));
9722             if (!is_error(ret)) {
9723                 if (arg2) {
9724                     p = lock_user(VERIFY_WRITE, arg2,
9725                                   sizeof(target_siginfo_t), 0);
9726                     if (!p) {
9727                         return -TARGET_EFAULT;
9728                     }
9729                     host_to_target_siginfo(p, &uinfo);
9730                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9731                 }
9732                 ret = host_to_target_signal(ret);
9733             }
9734         }
9735         return ret;
9736 #endif
9737     case TARGET_NR_rt_sigqueueinfo:
9738         {
9739             siginfo_t uinfo;
9740 
9741             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9742             if (!p) {
9743                 return -TARGET_EFAULT;
9744             }
9745             target_to_host_siginfo(&uinfo, p);
9746             unlock_user(p, arg3, 0);
9747             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9748         }
9749         return ret;
9750     case TARGET_NR_rt_tgsigqueueinfo:
9751         {
9752             siginfo_t uinfo;
9753 
9754             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9755             if (!p) {
9756                 return -TARGET_EFAULT;
9757             }
9758             target_to_host_siginfo(&uinfo, p);
9759             unlock_user(p, arg4, 0);
9760             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9761         }
9762         return ret;
9763 #ifdef TARGET_NR_sigreturn
9764     case TARGET_NR_sigreturn:
9765         if (block_signals()) {
9766             return -QEMU_ERESTARTSYS;
9767         }
9768         return do_sigreturn(cpu_env);
9769 #endif
9770     case TARGET_NR_rt_sigreturn:
9771         if (block_signals()) {
9772             return -QEMU_ERESTARTSYS;
9773         }
9774         return do_rt_sigreturn(cpu_env);
9775     case TARGET_NR_sethostname:
9776         if (!(p = lock_user_string(arg1)))
9777             return -TARGET_EFAULT;
9778         ret = get_errno(sethostname(p, arg2));
9779         unlock_user(p, arg1, 0);
9780         return ret;
9781 #ifdef TARGET_NR_setrlimit
9782     case TARGET_NR_setrlimit:
9783         {
9784             int resource = target_to_host_resource(arg1);
9785             struct target_rlimit *target_rlim;
9786             struct rlimit rlim;
9787             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9788                 return -TARGET_EFAULT;
9789             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9790             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9791             unlock_user_struct(target_rlim, arg2, 0);
9792             /*
9793              * If we just passed through resource limit settings for memory then
9794              * they would also apply to QEMU's own allocations, and QEMU will
9795              * crash or hang or die if its allocations fail. Ideally we would
9796              * track the guest allocations in QEMU and apply the limits ourselves.
9797              * For now, just tell the guest the call succeeded but don't actually
9798              * limit anything.
9799              */
9800             if (resource != RLIMIT_AS &&
9801                 resource != RLIMIT_DATA &&
9802                 resource != RLIMIT_STACK) {
9803                 return get_errno(setrlimit(resource, &rlim));
9804             } else {
9805                 return 0;
9806             }
9807         }
9808 #endif
9809 #ifdef TARGET_NR_getrlimit
9810     case TARGET_NR_getrlimit:
9811         {
9812             int resource = target_to_host_resource(arg1);
9813             struct target_rlimit *target_rlim;
9814             struct rlimit rlim;
9815 
9816             ret = get_errno(getrlimit(resource, &rlim));
9817             if (!is_error(ret)) {
9818                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9819                     return -TARGET_EFAULT;
9820                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9821                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9822                 unlock_user_struct(target_rlim, arg2, 1);
9823             }
9824         }
9825         return ret;
9826 #endif
9827     case TARGET_NR_getrusage:
9828         {
9829             struct rusage rusage;
9830             ret = get_errno(getrusage(arg1, &rusage));
9831             if (!is_error(ret)) {
9832                 ret = host_to_target_rusage(arg2, &rusage);
9833             }
9834         }
9835         return ret;
9836 #if defined(TARGET_NR_gettimeofday)
9837     case TARGET_NR_gettimeofday:
9838         {
9839             struct timeval tv;
9840             struct timezone tz;
9841 
9842             ret = get_errno(gettimeofday(&tv, &tz));
9843             if (!is_error(ret)) {
9844                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9845                     return -TARGET_EFAULT;
9846                 }
9847                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9848                     return -TARGET_EFAULT;
9849                 }
9850             }
9851         }
9852         return ret;
9853 #endif
9854 #if defined(TARGET_NR_settimeofday)
9855     case TARGET_NR_settimeofday:
9856         {
9857             struct timeval tv, *ptv = NULL;
9858             struct timezone tz, *ptz = NULL;
9859 
9860             if (arg1) {
9861                 if (copy_from_user_timeval(&tv, arg1)) {
9862                     return -TARGET_EFAULT;
9863                 }
9864                 ptv = &tv;
9865             }
9866 
9867             if (arg2) {
9868                 if (copy_from_user_timezone(&tz, arg2)) {
9869                     return -TARGET_EFAULT;
9870                 }
9871                 ptz = &tz;
9872             }
9873 
9874             return get_errno(settimeofday(ptv, ptz));
9875         }
9876 #endif
9877 #if defined(TARGET_NR_select)
9878     case TARGET_NR_select:
9879 #if defined(TARGET_WANT_NI_OLD_SELECT)
9880         /* some architectures used to have old_select here
9881          * but now ENOSYS it.
9882          */
9883         ret = -TARGET_ENOSYS;
9884 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9885         ret = do_old_select(arg1);
9886 #else
9887         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9888 #endif
9889         return ret;
9890 #endif
9891 #ifdef TARGET_NR_pselect6
9892     case TARGET_NR_pselect6:
9893         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9894 #endif
9895 #ifdef TARGET_NR_pselect6_time64
9896     case TARGET_NR_pselect6_time64:
9897         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9898 #endif
9899 #ifdef TARGET_NR_symlink
9900     case TARGET_NR_symlink:
9901         {
9902             void *p2;
9903             p = lock_user_string(arg1);
9904             p2 = lock_user_string(arg2);
9905             if (!p || !p2)
9906                 ret = -TARGET_EFAULT;
9907             else
9908                 ret = get_errno(symlink(p, p2));
9909             unlock_user(p2, arg2, 0);
9910             unlock_user(p, arg1, 0);
9911         }
9912         return ret;
9913 #endif
9914 #if defined(TARGET_NR_symlinkat)
9915     case TARGET_NR_symlinkat:
9916         {
9917             void *p2;
9918             p  = lock_user_string(arg1);
9919             p2 = lock_user_string(arg3);
9920             if (!p || !p2)
9921                 ret = -TARGET_EFAULT;
9922             else
9923                 ret = get_errno(symlinkat(p, arg2, p2));
9924             unlock_user(p2, arg3, 0);
9925             unlock_user(p, arg1, 0);
9926         }
9927         return ret;
9928 #endif
9929 #ifdef TARGET_NR_readlink
9930     case TARGET_NR_readlink:
9931         {
9932             void *p2;
9933             p = lock_user_string(arg1);
9934             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9935             if (!p || !p2) {
9936                 ret = -TARGET_EFAULT;
9937             } else if (!arg3) {
9938                 /* Short circuit this for the magic exe check. */
9939                 ret = -TARGET_EINVAL;
9940             } else if (is_proc_myself((const char *)p, "exe")) {
9941                 char real[PATH_MAX], *temp;
9942                 temp = realpath(exec_path, real);
9943                 /* Return value is # of bytes that we wrote to the buffer. */
9944                 if (temp == NULL) {
9945                     ret = get_errno(-1);
9946                 } else {
9947                     /* Don't worry about sign mismatch as earlier mapping
9948                      * logic would have thrown a bad address error. */
9949                     ret = MIN(strlen(real), arg3);
9950                     /* We cannot NUL terminate the string. */
9951                     memcpy(p2, real, ret);
9952                 }
9953             } else {
9954                 ret = get_errno(readlink(path(p), p2, arg3));
9955             }
9956             unlock_user(p2, arg2, ret);
9957             unlock_user(p, arg1, 0);
9958         }
9959         return ret;
9960 #endif
9961 #if defined(TARGET_NR_readlinkat)
9962     case TARGET_NR_readlinkat:
9963         {
9964             void *p2;
9965             p  = lock_user_string(arg2);
9966             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9967             if (!p || !p2) {
9968                 ret = -TARGET_EFAULT;
9969             } else if (!arg4) {
9970                 /* Short circuit this for the magic exe check. */
9971                 ret = -TARGET_EINVAL;
9972             } else if (is_proc_myself((const char *)p, "exe")) {
9973                 char real[PATH_MAX], *temp;
9974                 temp = realpath(exec_path, real);
9975                 /* Return value is # of bytes that we wrote to the buffer. */
9976                 if (temp == NULL) {
9977                     ret = get_errno(-1);
9978                 } else {
9979                     /* Don't worry about sign mismatch as earlier mapping
9980                      * logic would have thrown a bad address error. */
9981                     ret = MIN(strlen(real), arg4);
9982                     /* We cannot NUL terminate the string. */
9983                     memcpy(p2, real, ret);
9984                 }
9985             } else {
9986                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9987             }
9988             unlock_user(p2, arg3, ret);
9989             unlock_user(p, arg2, 0);
9990         }
9991         return ret;
9992 #endif
9993 #ifdef TARGET_NR_swapon
9994     case TARGET_NR_swapon:
9995         if (!(p = lock_user_string(arg1)))
9996             return -TARGET_EFAULT;
9997         ret = get_errno(swapon(p, arg2));
9998         unlock_user(p, arg1, 0);
9999         return ret;
10000 #endif
10001     case TARGET_NR_reboot:
10002         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10003            /* arg4 must be ignored in all other cases */
10004            p = lock_user_string(arg4);
10005            if (!p) {
10006                return -TARGET_EFAULT;
10007            }
10008            ret = get_errno(reboot(arg1, arg2, arg3, p));
10009            unlock_user(p, arg4, 0);
10010         } else {
10011            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10012         }
10013         return ret;
10014 #ifdef TARGET_NR_mmap
10015     case TARGET_NR_mmap:
10016 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10017     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10018     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10019     || defined(TARGET_S390X)
10020         {
10021             abi_ulong *v;
10022             abi_ulong v1, v2, v3, v4, v5, v6;
10023             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10024                 return -TARGET_EFAULT;
10025             v1 = tswapal(v[0]);
10026             v2 = tswapal(v[1]);
10027             v3 = tswapal(v[2]);
10028             v4 = tswapal(v[3]);
10029             v5 = tswapal(v[4]);
10030             v6 = tswapal(v[5]);
10031             unlock_user(v, arg1, 0);
10032             ret = get_errno(target_mmap(v1, v2, v3,
10033                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10034                                         v5, v6));
10035         }
10036 #else
10037         /* mmap pointers are always untagged */
10038         ret = get_errno(target_mmap(arg1, arg2, arg3,
10039                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10040                                     arg5,
10041                                     arg6));
10042 #endif
10043         return ret;
10044 #endif
10045 #ifdef TARGET_NR_mmap2
10046     case TARGET_NR_mmap2:
10047 #ifndef MMAP_SHIFT
10048 #define MMAP_SHIFT 12
10049 #endif
10050         ret = target_mmap(arg1, arg2, arg3,
10051                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10052                           arg5, arg6 << MMAP_SHIFT);
10053         return get_errno(ret);
10054 #endif
10055     case TARGET_NR_munmap:
10056         arg1 = cpu_untagged_addr(cpu, arg1);
10057         return get_errno(target_munmap(arg1, arg2));
10058     case TARGET_NR_mprotect:
10059         arg1 = cpu_untagged_addr(cpu, arg1);
10060         {
10061             TaskState *ts = cpu->opaque;
10062             /* Special hack to detect libc making the stack executable.  */
10063             if ((arg3 & PROT_GROWSDOWN)
10064                 && arg1 >= ts->info->stack_limit
10065                 && arg1 <= ts->info->start_stack) {
10066                 arg3 &= ~PROT_GROWSDOWN;
10067                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10068                 arg1 = ts->info->stack_limit;
10069             }
10070         }
10071         return get_errno(target_mprotect(arg1, arg2, arg3));
10072 #ifdef TARGET_NR_mremap
10073     case TARGET_NR_mremap:
10074         arg1 = cpu_untagged_addr(cpu, arg1);
10075         /* mremap new_addr (arg5) is always untagged */
10076         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10077 #endif
10078         /* ??? msync/mlock/munlock are broken for softmmu.  */
10079 #ifdef TARGET_NR_msync
10080     case TARGET_NR_msync:
10081         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10082 #endif
10083 #ifdef TARGET_NR_mlock
10084     case TARGET_NR_mlock:
10085         return get_errno(mlock(g2h(cpu, arg1), arg2));
10086 #endif
10087 #ifdef TARGET_NR_munlock
10088     case TARGET_NR_munlock:
10089         return get_errno(munlock(g2h(cpu, arg1), arg2));
10090 #endif
10091 #ifdef TARGET_NR_mlockall
10092     case TARGET_NR_mlockall:
10093         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10094 #endif
10095 #ifdef TARGET_NR_munlockall
10096     case TARGET_NR_munlockall:
10097         return get_errno(munlockall());
10098 #endif
10099 #ifdef TARGET_NR_truncate
10100     case TARGET_NR_truncate:
10101         if (!(p = lock_user_string(arg1)))
10102             return -TARGET_EFAULT;
10103         ret = get_errno(truncate(p, arg2));
10104         unlock_user(p, arg1, 0);
10105         return ret;
10106 #endif
10107 #ifdef TARGET_NR_ftruncate
10108     case TARGET_NR_ftruncate:
10109         return get_errno(ftruncate(arg1, arg2));
10110 #endif
10111     case TARGET_NR_fchmod:
10112         return get_errno(fchmod(arg1, arg2));
10113 #if defined(TARGET_NR_fchmodat)
10114     case TARGET_NR_fchmodat:
10115         if (!(p = lock_user_string(arg2)))
10116             return -TARGET_EFAULT;
10117         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10118         unlock_user(p, arg2, 0);
10119         return ret;
10120 #endif
10121     case TARGET_NR_getpriority:
10122         /* Note that negative values are valid for getpriority, so we must
10123            differentiate based on errno settings.  */
10124         errno = 0;
10125         ret = getpriority(arg1, arg2);
10126         if (ret == -1 && errno != 0) {
10127             return -host_to_target_errno(errno);
10128         }
10129 #ifdef TARGET_ALPHA
10130         /* Return value is the unbiased priority.  Signal no error.  */
10131         cpu_env->ir[IR_V0] = 0;
10132 #else
10133         /* Return value is a biased priority to avoid negative numbers.  */
10134         ret = 20 - ret;
10135 #endif
10136         return ret;
10137     case TARGET_NR_setpriority:
10138         return get_errno(setpriority(arg1, arg2, arg3));
10139 #ifdef TARGET_NR_statfs
10140     case TARGET_NR_statfs:
10141         if (!(p = lock_user_string(arg1))) {
10142             return -TARGET_EFAULT;
10143         }
10144         ret = get_errno(statfs(path(p), &stfs));
10145         unlock_user(p, arg1, 0);
10146     convert_statfs:
10147         if (!is_error(ret)) {
10148             struct target_statfs *target_stfs;
10149 
10150             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10151                 return -TARGET_EFAULT;
10152             __put_user(stfs.f_type, &target_stfs->f_type);
10153             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10154             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10155             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10156             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10157             __put_user(stfs.f_files, &target_stfs->f_files);
10158             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10159             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10160             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10161             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10162             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10163 #ifdef _STATFS_F_FLAGS
10164             __put_user(stfs.f_flags, &target_stfs->f_flags);
10165 #else
10166             __put_user(0, &target_stfs->f_flags);
10167 #endif
10168             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10169             unlock_user_struct(target_stfs, arg2, 1);
10170         }
10171         return ret;
10172 #endif
10173 #ifdef TARGET_NR_fstatfs
10174     case TARGET_NR_fstatfs:
10175         ret = get_errno(fstatfs(arg1, &stfs));
10176         goto convert_statfs;
10177 #endif
10178 #ifdef TARGET_NR_statfs64
10179     case TARGET_NR_statfs64:
10180         if (!(p = lock_user_string(arg1))) {
10181             return -TARGET_EFAULT;
10182         }
10183         ret = get_errno(statfs(path(p), &stfs));
10184         unlock_user(p, arg1, 0);
10185     convert_statfs64:
10186         if (!is_error(ret)) {
10187             struct target_statfs64 *target_stfs;
10188 
10189             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10190                 return -TARGET_EFAULT;
10191             __put_user(stfs.f_type, &target_stfs->f_type);
10192             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10193             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10194             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10195             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10196             __put_user(stfs.f_files, &target_stfs->f_files);
10197             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10198             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10199             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10200             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10201             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10202 #ifdef _STATFS_F_FLAGS
10203             __put_user(stfs.f_flags, &target_stfs->f_flags);
10204 #else
10205             __put_user(0, &target_stfs->f_flags);
10206 #endif
10207             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10208             unlock_user_struct(target_stfs, arg3, 1);
10209         }
10210         return ret;
10211     case TARGET_NR_fstatfs64:
10212         ret = get_errno(fstatfs(arg1, &stfs));
10213         goto convert_statfs64;
10214 #endif
10215 #ifdef TARGET_NR_socketcall
10216     case TARGET_NR_socketcall:
10217         return do_socketcall(arg1, arg2);
10218 #endif
10219 #ifdef TARGET_NR_accept
10220     case TARGET_NR_accept:
10221         return do_accept4(arg1, arg2, arg3, 0);
10222 #endif
10223 #ifdef TARGET_NR_accept4
10224     case TARGET_NR_accept4:
10225         return do_accept4(arg1, arg2, arg3, arg4);
10226 #endif
10227 #ifdef TARGET_NR_bind
10228     case TARGET_NR_bind:
10229         return do_bind(arg1, arg2, arg3);
10230 #endif
10231 #ifdef TARGET_NR_connect
10232     case TARGET_NR_connect:
10233         return do_connect(arg1, arg2, arg3);
10234 #endif
10235 #ifdef TARGET_NR_getpeername
10236     case TARGET_NR_getpeername:
10237         return do_getpeername(arg1, arg2, arg3);
10238 #endif
10239 #ifdef TARGET_NR_getsockname
10240     case TARGET_NR_getsockname:
10241         return do_getsockname(arg1, arg2, arg3);
10242 #endif
10243 #ifdef TARGET_NR_getsockopt
10244     case TARGET_NR_getsockopt:
10245         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10246 #endif
10247 #ifdef TARGET_NR_listen
10248     case TARGET_NR_listen:
10249         return get_errno(listen(arg1, arg2));
10250 #endif
10251 #ifdef TARGET_NR_recv
10252     case TARGET_NR_recv:
10253         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10254 #endif
10255 #ifdef TARGET_NR_recvfrom
10256     case TARGET_NR_recvfrom:
10257         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10258 #endif
10259 #ifdef TARGET_NR_recvmsg
10260     case TARGET_NR_recvmsg:
10261         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10262 #endif
10263 #ifdef TARGET_NR_send
10264     case TARGET_NR_send:
10265         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10266 #endif
10267 #ifdef TARGET_NR_sendmsg
10268     case TARGET_NR_sendmsg:
10269         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10270 #endif
10271 #ifdef TARGET_NR_sendmmsg
10272     case TARGET_NR_sendmmsg:
10273         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10274 #endif
10275 #ifdef TARGET_NR_recvmmsg
10276     case TARGET_NR_recvmmsg:
10277         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10278 #endif
10279 #ifdef TARGET_NR_sendto
10280     case TARGET_NR_sendto:
10281         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10282 #endif
10283 #ifdef TARGET_NR_shutdown
10284     case TARGET_NR_shutdown:
10285         return get_errno(shutdown(arg1, arg2));
10286 #endif
10287 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10288     case TARGET_NR_getrandom:
10289         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10290         if (!p) {
10291             return -TARGET_EFAULT;
10292         }
10293         ret = get_errno(getrandom(p, arg2, arg3));
10294         unlock_user(p, arg1, ret);
10295         return ret;
10296 #endif
10297 #ifdef TARGET_NR_socket
10298     case TARGET_NR_socket:
10299         return do_socket(arg1, arg2, arg3);
10300 #endif
10301 #ifdef TARGET_NR_socketpair
10302     case TARGET_NR_socketpair:
10303         return do_socketpair(arg1, arg2, arg3, arg4);
10304 #endif
10305 #ifdef TARGET_NR_setsockopt
10306     case TARGET_NR_setsockopt:
10307         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10308 #endif
10309 #if defined(TARGET_NR_syslog)
10310     case TARGET_NR_syslog:
10311         {
10312             int len = arg2;
10313 
10314             switch (arg1) {
10315             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10316             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10317             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10318             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10319             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10320             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10321             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10322             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10323                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10324             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10325             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10326             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10327                 {
10328                     if (len < 0) {
10329                         return -TARGET_EINVAL;
10330                     }
10331                     if (len == 0) {
10332                         return 0;
10333                     }
10334                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10335                     if (!p) {
10336                         return -TARGET_EFAULT;
10337                     }
10338                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10339                     unlock_user(p, arg2, arg3);
10340                 }
10341                 return ret;
10342             default:
10343                 return -TARGET_EINVAL;
10344             }
10345         }
10346         break;
10347 #endif
10348     case TARGET_NR_setitimer:
10349         {
10350             struct itimerval value, ovalue, *pvalue;
10351 
10352             if (arg2) {
10353                 pvalue = &value;
10354                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10355                     || copy_from_user_timeval(&pvalue->it_value,
10356                                               arg2 + sizeof(struct target_timeval)))
10357                     return -TARGET_EFAULT;
10358             } else {
10359                 pvalue = NULL;
10360             }
10361             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10362             if (!is_error(ret) && arg3) {
10363                 if (copy_to_user_timeval(arg3,
10364                                          &ovalue.it_interval)
10365                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10366                                             &ovalue.it_value))
10367                     return -TARGET_EFAULT;
10368             }
10369         }
10370         return ret;
10371     case TARGET_NR_getitimer:
10372         {
10373             struct itimerval value;
10374 
10375             ret = get_errno(getitimer(arg1, &value));
10376             if (!is_error(ret) && arg2) {
10377                 if (copy_to_user_timeval(arg2,
10378                                          &value.it_interval)
10379                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10380                                             &value.it_value))
10381                     return -TARGET_EFAULT;
10382             }
10383         }
10384         return ret;
10385 #ifdef TARGET_NR_stat
10386     case TARGET_NR_stat:
10387         if (!(p = lock_user_string(arg1))) {
10388             return -TARGET_EFAULT;
10389         }
10390         ret = get_errno(stat(path(p), &st));
10391         unlock_user(p, arg1, 0);
10392         goto do_stat;
10393 #endif
10394 #ifdef TARGET_NR_lstat
10395     case TARGET_NR_lstat:
10396         if (!(p = lock_user_string(arg1))) {
10397             return -TARGET_EFAULT;
10398         }
10399         ret = get_errno(lstat(path(p), &st));
10400         unlock_user(p, arg1, 0);
10401         goto do_stat;
10402 #endif
10403 #ifdef TARGET_NR_fstat
10404     case TARGET_NR_fstat:
10405         {
10406             ret = get_errno(fstat(arg1, &st));
10407 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10408         do_stat:
10409 #endif
10410             if (!is_error(ret)) {
10411                 struct target_stat *target_st;
10412 
10413                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10414                     return -TARGET_EFAULT;
10415                 memset(target_st, 0, sizeof(*target_st));
10416                 __put_user(st.st_dev, &target_st->st_dev);
10417                 __put_user(st.st_ino, &target_st->st_ino);
10418                 __put_user(st.st_mode, &target_st->st_mode);
10419                 __put_user(st.st_uid, &target_st->st_uid);
10420                 __put_user(st.st_gid, &target_st->st_gid);
10421                 __put_user(st.st_nlink, &target_st->st_nlink);
10422                 __put_user(st.st_rdev, &target_st->st_rdev);
10423                 __put_user(st.st_size, &target_st->st_size);
10424                 __put_user(st.st_blksize, &target_st->st_blksize);
10425                 __put_user(st.st_blocks, &target_st->st_blocks);
10426                 __put_user(st.st_atime, &target_st->target_st_atime);
10427                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10428                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10429 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10430                 __put_user(st.st_atim.tv_nsec,
10431                            &target_st->target_st_atime_nsec);
10432                 __put_user(st.st_mtim.tv_nsec,
10433                            &target_st->target_st_mtime_nsec);
10434                 __put_user(st.st_ctim.tv_nsec,
10435                            &target_st->target_st_ctime_nsec);
10436 #endif
10437                 unlock_user_struct(target_st, arg2, 1);
10438             }
10439         }
10440         return ret;
10441 #endif
10442     case TARGET_NR_vhangup:
10443         return get_errno(vhangup());
10444 #ifdef TARGET_NR_syscall
10445     case TARGET_NR_syscall:
10446         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10447                           arg6, arg7, arg8, 0);
10448 #endif
10449 #if defined(TARGET_NR_wait4)
10450     case TARGET_NR_wait4:
10451         {
10452             int status;
10453             abi_long status_ptr = arg2;
10454             struct rusage rusage, *rusage_ptr;
10455             abi_ulong target_rusage = arg4;
10456             abi_long rusage_err;
10457             if (target_rusage)
10458                 rusage_ptr = &rusage;
10459             else
10460                 rusage_ptr = NULL;
10461             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10462             if (!is_error(ret)) {
10463                 if (status_ptr && ret) {
10464                     status = host_to_target_waitstatus(status);
10465                     if (put_user_s32(status, status_ptr))
10466                         return -TARGET_EFAULT;
10467                 }
10468                 if (target_rusage) {
10469                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10470                     if (rusage_err) {
10471                         ret = rusage_err;
10472                     }
10473                 }
10474             }
10475         }
10476         return ret;
10477 #endif
10478 #ifdef TARGET_NR_swapoff
10479     case TARGET_NR_swapoff:
10480         if (!(p = lock_user_string(arg1)))
10481             return -TARGET_EFAULT;
10482         ret = get_errno(swapoff(p));
10483         unlock_user(p, arg1, 0);
10484         return ret;
10485 #endif
10486     case TARGET_NR_sysinfo:
10487         {
10488             struct target_sysinfo *target_value;
10489             struct sysinfo value;
10490             ret = get_errno(sysinfo(&value));
10491             if (!is_error(ret) && arg1)
10492             {
10493                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10494                     return -TARGET_EFAULT;
10495                 __put_user(value.uptime, &target_value->uptime);
10496                 __put_user(value.loads[0], &target_value->loads[0]);
10497                 __put_user(value.loads[1], &target_value->loads[1]);
10498                 __put_user(value.loads[2], &target_value->loads[2]);
10499                 __put_user(value.totalram, &target_value->totalram);
10500                 __put_user(value.freeram, &target_value->freeram);
10501                 __put_user(value.sharedram, &target_value->sharedram);
10502                 __put_user(value.bufferram, &target_value->bufferram);
10503                 __put_user(value.totalswap, &target_value->totalswap);
10504                 __put_user(value.freeswap, &target_value->freeswap);
10505                 __put_user(value.procs, &target_value->procs);
10506                 __put_user(value.totalhigh, &target_value->totalhigh);
10507                 __put_user(value.freehigh, &target_value->freehigh);
10508                 __put_user(value.mem_unit, &target_value->mem_unit);
10509                 unlock_user_struct(target_value, arg1, 1);
10510             }
10511         }
10512         return ret;
10513 #ifdef TARGET_NR_ipc
10514     case TARGET_NR_ipc:
10515         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10516 #endif
10517 #ifdef TARGET_NR_semget
10518     case TARGET_NR_semget:
10519         return get_errno(semget(arg1, arg2, arg3));
10520 #endif
10521 #ifdef TARGET_NR_semop
10522     case TARGET_NR_semop:
10523         return do_semtimedop(arg1, arg2, arg3, 0, false);
10524 #endif
10525 #ifdef TARGET_NR_semtimedop
10526     case TARGET_NR_semtimedop:
10527         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10528 #endif
10529 #ifdef TARGET_NR_semtimedop_time64
10530     case TARGET_NR_semtimedop_time64:
10531         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10532 #endif
10533 #ifdef TARGET_NR_semctl
10534     case TARGET_NR_semctl:
10535         return do_semctl(arg1, arg2, arg3, arg4);
10536 #endif
10537 #ifdef TARGET_NR_msgctl
10538     case TARGET_NR_msgctl:
10539         return do_msgctl(arg1, arg2, arg3);
10540 #endif
10541 #ifdef TARGET_NR_msgget
10542     case TARGET_NR_msgget:
10543         return get_errno(msgget(arg1, arg2));
10544 #endif
10545 #ifdef TARGET_NR_msgrcv
10546     case TARGET_NR_msgrcv:
10547         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10548 #endif
10549 #ifdef TARGET_NR_msgsnd
10550     case TARGET_NR_msgsnd:
10551         return do_msgsnd(arg1, arg2, arg3, arg4);
10552 #endif
10553 #ifdef TARGET_NR_shmget
10554     case TARGET_NR_shmget:
10555         return get_errno(shmget(arg1, arg2, arg3));
10556 #endif
10557 #ifdef TARGET_NR_shmctl
10558     case TARGET_NR_shmctl:
10559         return do_shmctl(arg1, arg2, arg3);
10560 #endif
10561 #ifdef TARGET_NR_shmat
10562     case TARGET_NR_shmat:
10563         return do_shmat(cpu_env, arg1, arg2, arg3);
10564 #endif
10565 #ifdef TARGET_NR_shmdt
10566     case TARGET_NR_shmdt:
10567         return do_shmdt(arg1);
10568 #endif
10569     case TARGET_NR_fsync:
10570         return get_errno(fsync(arg1));
10571     case TARGET_NR_clone:
10572         /* Linux manages to have three different orderings for its
10573          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10574          * match the kernel's CONFIG_CLONE_* settings.
10575          * Microblaze is further special in that it uses a sixth
10576          * implicit argument to clone for the TLS pointer.
10577          */
10578 #if defined(TARGET_MICROBLAZE)
10579         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10580 #elif defined(TARGET_CLONE_BACKWARDS)
10581         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10582 #elif defined(TARGET_CLONE_BACKWARDS2)
10583         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10584 #else
10585         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10586 #endif
10587         return ret;
10588 #ifdef __NR_exit_group
10589         /* new thread calls */
10590     case TARGET_NR_exit_group:
10591         preexit_cleanup(cpu_env, arg1);
10592         return get_errno(exit_group(arg1));
10593 #endif
10594     case TARGET_NR_setdomainname:
10595         if (!(p = lock_user_string(arg1)))
10596             return -TARGET_EFAULT;
10597         ret = get_errno(setdomainname(p, arg2));
10598         unlock_user(p, arg1, 0);
10599         return ret;
10600     case TARGET_NR_uname:
10601         /* no need to transcode because we use the linux syscall */
10602         {
10603             struct new_utsname * buf;
10604 
10605             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10606                 return -TARGET_EFAULT;
10607             ret = get_errno(sys_uname(buf));
10608             if (!is_error(ret)) {
10609                 /* Overwrite the native machine name with whatever is being
10610                    emulated. */
10611                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10612                           sizeof(buf->machine));
10613                 /* Allow the user to override the reported release.  */
10614                 if (qemu_uname_release && *qemu_uname_release) {
10615                     g_strlcpy(buf->release, qemu_uname_release,
10616                               sizeof(buf->release));
10617                 }
10618             }
10619             unlock_user_struct(buf, arg1, 1);
10620         }
10621         return ret;
10622 #ifdef TARGET_I386
10623     case TARGET_NR_modify_ldt:
10624         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10625 #if !defined(TARGET_X86_64)
10626     case TARGET_NR_vm86:
10627         return do_vm86(cpu_env, arg1, arg2);
10628 #endif
10629 #endif
10630 #if defined(TARGET_NR_adjtimex)
10631     case TARGET_NR_adjtimex:
10632         {
10633             struct timex host_buf;
10634 
10635             if (target_to_host_timex(&host_buf, arg1) != 0) {
10636                 return -TARGET_EFAULT;
10637             }
10638             ret = get_errno(adjtimex(&host_buf));
10639             if (!is_error(ret)) {
10640                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10641                     return -TARGET_EFAULT;
10642                 }
10643             }
10644         }
10645         return ret;
10646 #endif
10647 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10648     case TARGET_NR_clock_adjtime:
10649         {
10650             struct timex htx, *phtx = &htx;
10651 
10652             if (target_to_host_timex(phtx, arg2) != 0) {
10653                 return -TARGET_EFAULT;
10654             }
10655             ret = get_errno(clock_adjtime(arg1, phtx));
10656             if (!is_error(ret) && phtx) {
10657                 if (host_to_target_timex(arg2, phtx) != 0) {
10658                     return -TARGET_EFAULT;
10659                 }
10660             }
10661         }
10662         return ret;
10663 #endif
10664 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10665     case TARGET_NR_clock_adjtime64:
10666         {
10667             struct timex htx;
10668 
10669             if (target_to_host_timex64(&htx, arg2) != 0) {
10670                 return -TARGET_EFAULT;
10671             }
10672             ret = get_errno(clock_adjtime(arg1, &htx));
10673             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10674                     return -TARGET_EFAULT;
10675             }
10676         }
10677         return ret;
10678 #endif
10679     case TARGET_NR_getpgid:
10680         return get_errno(getpgid(arg1));
10681     case TARGET_NR_fchdir:
10682         return get_errno(fchdir(arg1));
10683     case TARGET_NR_personality:
10684         return get_errno(personality(arg1));
10685 #ifdef TARGET_NR__llseek /* Not on alpha */
10686     case TARGET_NR__llseek:
10687         {
10688             int64_t res;
10689 #if !defined(__NR_llseek)
10690             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10691             if (res == -1) {
10692                 ret = get_errno(res);
10693             } else {
10694                 ret = 0;
10695             }
10696 #else
10697             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10698 #endif
10699             if ((ret == 0) && put_user_s64(res, arg4)) {
10700                 return -TARGET_EFAULT;
10701             }
10702         }
10703         return ret;
10704 #endif
10705 #ifdef TARGET_NR_getdents
10706     case TARGET_NR_getdents:
10707         return do_getdents(arg1, arg2, arg3);
10708 #endif /* TARGET_NR_getdents */
10709 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10710     case TARGET_NR_getdents64:
10711         return do_getdents64(arg1, arg2, arg3);
10712 #endif /* TARGET_NR_getdents64 */
10713 #if defined(TARGET_NR__newselect)
10714     case TARGET_NR__newselect:
10715         return do_select(arg1, arg2, arg3, arg4, arg5);
10716 #endif
10717 #ifdef TARGET_NR_poll
10718     case TARGET_NR_poll:
10719         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10720 #endif
10721 #ifdef TARGET_NR_ppoll
10722     case TARGET_NR_ppoll:
10723         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10724 #endif
10725 #ifdef TARGET_NR_ppoll_time64
10726     case TARGET_NR_ppoll_time64:
10727         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10728 #endif
10729     case TARGET_NR_flock:
10730         /* NOTE: the flock constant seems to be the same for every
10731            Linux platform */
10732         return get_errno(safe_flock(arg1, arg2));
10733     case TARGET_NR_readv:
10734         {
10735             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10736             if (vec != NULL) {
10737                 ret = get_errno(safe_readv(arg1, vec, arg3));
10738                 unlock_iovec(vec, arg2, arg3, 1);
10739             } else {
10740                 ret = -host_to_target_errno(errno);
10741             }
10742         }
10743         return ret;
10744     case TARGET_NR_writev:
10745         {
10746             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10747             if (vec != NULL) {
10748                 ret = get_errno(safe_writev(arg1, vec, arg3));
10749                 unlock_iovec(vec, arg2, arg3, 0);
10750             } else {
10751                 ret = -host_to_target_errno(errno);
10752             }
10753         }
10754         return ret;
10755 #if defined(TARGET_NR_preadv)
10756     case TARGET_NR_preadv:
10757         {
10758             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10759             if (vec != NULL) {
10760                 unsigned long low, high;
10761 
10762                 target_to_host_low_high(arg4, arg5, &low, &high);
10763                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10764                 unlock_iovec(vec, arg2, arg3, 1);
10765             } else {
10766                 ret = -host_to_target_errno(errno);
10767            }
10768         }
10769         return ret;
10770 #endif
10771 #if defined(TARGET_NR_pwritev)
10772     case TARGET_NR_pwritev:
10773         {
10774             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10775             if (vec != NULL) {
10776                 unsigned long low, high;
10777 
10778                 target_to_host_low_high(arg4, arg5, &low, &high);
10779                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10780                 unlock_iovec(vec, arg2, arg3, 0);
10781             } else {
10782                 ret = -host_to_target_errno(errno);
10783            }
10784         }
10785         return ret;
10786 #endif
10787     case TARGET_NR_getsid:
10788         return get_errno(getsid(arg1));
10789 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10790     case TARGET_NR_fdatasync:
10791         return get_errno(fdatasync(arg1));
10792 #endif
10793     case TARGET_NR_sched_getaffinity:
10794         {
10795             unsigned int mask_size;
10796             unsigned long *mask;
10797 
10798             /*
10799              * sched_getaffinity needs multiples of ulong, so need to take
10800              * care of mismatches between target ulong and host ulong sizes.
10801              */
10802             if (arg2 & (sizeof(abi_ulong) - 1)) {
10803                 return -TARGET_EINVAL;
10804             }
10805             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10806 
10807             mask = alloca(mask_size);
10808             memset(mask, 0, mask_size);
10809             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10810 
10811             if (!is_error(ret)) {
10812                 if (ret > arg2) {
10813                     /* More data returned than the caller's buffer will fit.
10814                      * This only happens if sizeof(abi_long) < sizeof(long)
10815                      * and the caller passed us a buffer holding an odd number
10816                      * of abi_longs. If the host kernel is actually using the
10817                      * extra 4 bytes then fail EINVAL; otherwise we can just
10818                      * ignore them and only copy the interesting part.
10819                      */
10820                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10821                     if (numcpus > arg2 * 8) {
10822                         return -TARGET_EINVAL;
10823                     }
10824                     ret = arg2;
10825                 }
10826 
10827                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10828                     return -TARGET_EFAULT;
10829                 }
10830             }
10831         }
10832         return ret;
10833     case TARGET_NR_sched_setaffinity:
10834         {
10835             unsigned int mask_size;
10836             unsigned long *mask;
10837 
10838             /*
10839              * sched_setaffinity needs multiples of ulong, so need to take
10840              * care of mismatches between target ulong and host ulong sizes.
10841              */
10842             if (arg2 & (sizeof(abi_ulong) - 1)) {
10843                 return -TARGET_EINVAL;
10844             }
10845             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10846             mask = alloca(mask_size);
10847 
10848             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10849             if (ret) {
10850                 return ret;
10851             }
10852 
10853             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10854         }
10855     case TARGET_NR_getcpu:
10856         {
10857             unsigned cpu, node;
10858             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10859                                        arg2 ? &node : NULL,
10860                                        NULL));
10861             if (is_error(ret)) {
10862                 return ret;
10863             }
10864             if (arg1 && put_user_u32(cpu, arg1)) {
10865                 return -TARGET_EFAULT;
10866             }
10867             if (arg2 && put_user_u32(node, arg2)) {
10868                 return -TARGET_EFAULT;
10869             }
10870         }
10871         return ret;
10872     case TARGET_NR_sched_setparam:
10873         {
10874             struct target_sched_param *target_schp;
10875             struct sched_param schp;
10876 
10877             if (arg2 == 0) {
10878                 return -TARGET_EINVAL;
10879             }
10880             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10881                 return -TARGET_EFAULT;
10882             }
10883             schp.sched_priority = tswap32(target_schp->sched_priority);
10884             unlock_user_struct(target_schp, arg2, 0);
10885             return get_errno(sys_sched_setparam(arg1, &schp));
10886         }
10887     case TARGET_NR_sched_getparam:
10888         {
10889             struct target_sched_param *target_schp;
10890             struct sched_param schp;
10891 
10892             if (arg2 == 0) {
10893                 return -TARGET_EINVAL;
10894             }
10895             ret = get_errno(sys_sched_getparam(arg1, &schp));
10896             if (!is_error(ret)) {
10897                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10898                     return -TARGET_EFAULT;
10899                 }
10900                 target_schp->sched_priority = tswap32(schp.sched_priority);
10901                 unlock_user_struct(target_schp, arg2, 1);
10902             }
10903         }
10904         return ret;
10905     case TARGET_NR_sched_setscheduler:
10906         {
10907             struct target_sched_param *target_schp;
10908             struct sched_param schp;
10909             if (arg3 == 0) {
10910                 return -TARGET_EINVAL;
10911             }
10912             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10913                 return -TARGET_EFAULT;
10914             }
10915             schp.sched_priority = tswap32(target_schp->sched_priority);
10916             unlock_user_struct(target_schp, arg3, 0);
10917             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10918         }
10919     case TARGET_NR_sched_getscheduler:
10920         return get_errno(sys_sched_getscheduler(arg1));
10921     case TARGET_NR_sched_getattr:
10922         {
10923             struct target_sched_attr *target_scha;
10924             struct sched_attr scha;
10925             if (arg2 == 0) {
10926                 return -TARGET_EINVAL;
10927             }
10928             if (arg3 > sizeof(scha)) {
10929                 arg3 = sizeof(scha);
10930             }
10931             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10932             if (!is_error(ret)) {
10933                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10934                 if (!target_scha) {
10935                     return -TARGET_EFAULT;
10936                 }
10937                 target_scha->size = tswap32(scha.size);
10938                 target_scha->sched_policy = tswap32(scha.sched_policy);
10939                 target_scha->sched_flags = tswap64(scha.sched_flags);
10940                 target_scha->sched_nice = tswap32(scha.sched_nice);
10941                 target_scha->sched_priority = tswap32(scha.sched_priority);
10942                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10943                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10944                 target_scha->sched_period = tswap64(scha.sched_period);
10945                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10946                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10947                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10948                 }
10949                 unlock_user(target_scha, arg2, arg3);
10950             }
10951             return ret;
10952         }
10953     case TARGET_NR_sched_setattr:
10954         {
10955             struct target_sched_attr *target_scha;
10956             struct sched_attr scha;
10957             uint32_t size;
10958             int zeroed;
10959             if (arg2 == 0) {
10960                 return -TARGET_EINVAL;
10961             }
10962             if (get_user_u32(size, arg2)) {
10963                 return -TARGET_EFAULT;
10964             }
10965             if (!size) {
10966                 size = offsetof(struct target_sched_attr, sched_util_min);
10967             }
10968             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10969                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10970                     return -TARGET_EFAULT;
10971                 }
10972                 return -TARGET_E2BIG;
10973             }
10974 
10975             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
10976             if (zeroed < 0) {
10977                 return zeroed;
10978             } else if (zeroed == 0) {
10979                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10980                     return -TARGET_EFAULT;
10981                 }
10982                 return -TARGET_E2BIG;
10983             }
10984             if (size > sizeof(struct target_sched_attr)) {
10985                 size = sizeof(struct target_sched_attr);
10986             }
10987 
10988             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
10989             if (!target_scha) {
10990                 return -TARGET_EFAULT;
10991             }
10992             scha.size = size;
10993             scha.sched_policy = tswap32(target_scha->sched_policy);
10994             scha.sched_flags = tswap64(target_scha->sched_flags);
10995             scha.sched_nice = tswap32(target_scha->sched_nice);
10996             scha.sched_priority = tswap32(target_scha->sched_priority);
10997             scha.sched_runtime = tswap64(target_scha->sched_runtime);
10998             scha.sched_deadline = tswap64(target_scha->sched_deadline);
10999             scha.sched_period = tswap64(target_scha->sched_period);
11000             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11001                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11002                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11003             }
11004             unlock_user(target_scha, arg2, 0);
11005             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11006         }
11007     case TARGET_NR_sched_yield:
11008         return get_errno(sched_yield());
11009     case TARGET_NR_sched_get_priority_max:
11010         return get_errno(sched_get_priority_max(arg1));
11011     case TARGET_NR_sched_get_priority_min:
11012         return get_errno(sched_get_priority_min(arg1));
11013 #ifdef TARGET_NR_sched_rr_get_interval
11014     case TARGET_NR_sched_rr_get_interval:
11015         {
11016             struct timespec ts;
11017             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11018             if (!is_error(ret)) {
11019                 ret = host_to_target_timespec(arg2, &ts);
11020             }
11021         }
11022         return ret;
11023 #endif
11024 #ifdef TARGET_NR_sched_rr_get_interval_time64
11025     case TARGET_NR_sched_rr_get_interval_time64:
11026         {
11027             struct timespec ts;
11028             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11029             if (!is_error(ret)) {
11030                 ret = host_to_target_timespec64(arg2, &ts);
11031             }
11032         }
11033         return ret;
11034 #endif
11035 #if defined(TARGET_NR_nanosleep)
11036     case TARGET_NR_nanosleep:
11037         {
11038             struct timespec req, rem;
11039             target_to_host_timespec(&req, arg1);
11040             ret = get_errno(safe_nanosleep(&req, &rem));
11041             if (is_error(ret) && arg2) {
11042                 host_to_target_timespec(arg2, &rem);
11043             }
11044         }
11045         return ret;
11046 #endif
11047     case TARGET_NR_prctl:
11048         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11049         break;
11050 #ifdef TARGET_NR_arch_prctl
11051     case TARGET_NR_arch_prctl:
11052         return do_arch_prctl(cpu_env, arg1, arg2);
11053 #endif
11054 #ifdef TARGET_NR_pread64
11055     case TARGET_NR_pread64:
11056         if (regpairs_aligned(cpu_env, num)) {
11057             arg4 = arg5;
11058             arg5 = arg6;
11059         }
11060         if (arg2 == 0 && arg3 == 0) {
11061             /* Special-case NULL buffer and zero length, which should succeed */
11062             p = 0;
11063         } else {
11064             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11065             if (!p) {
11066                 return -TARGET_EFAULT;
11067             }
11068         }
11069         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11070         unlock_user(p, arg2, ret);
11071         return ret;
11072     case TARGET_NR_pwrite64:
11073         if (regpairs_aligned(cpu_env, num)) {
11074             arg4 = arg5;
11075             arg5 = arg6;
11076         }
11077         if (arg2 == 0 && arg3 == 0) {
11078             /* Special-case NULL buffer and zero length, which should succeed */
11079             p = 0;
11080         } else {
11081             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11082             if (!p) {
11083                 return -TARGET_EFAULT;
11084             }
11085         }
11086         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11087         unlock_user(p, arg2, 0);
11088         return ret;
11089 #endif
11090     case TARGET_NR_getcwd:
11091         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11092             return -TARGET_EFAULT;
11093         ret = get_errno(sys_getcwd1(p, arg2));
11094         unlock_user(p, arg1, ret);
11095         return ret;
11096     case TARGET_NR_capget:
11097     case TARGET_NR_capset:
11098     {
11099         struct target_user_cap_header *target_header;
11100         struct target_user_cap_data *target_data = NULL;
11101         struct __user_cap_header_struct header;
11102         struct __user_cap_data_struct data[2];
11103         struct __user_cap_data_struct *dataptr = NULL;
11104         int i, target_datalen;
11105         int data_items = 1;
11106 
11107         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11108             return -TARGET_EFAULT;
11109         }
11110         header.version = tswap32(target_header->version);
11111         header.pid = tswap32(target_header->pid);
11112 
11113         if (header.version != _LINUX_CAPABILITY_VERSION) {
11114             /* Version 2 and up takes pointer to two user_data structs */
11115             data_items = 2;
11116         }
11117 
11118         target_datalen = sizeof(*target_data) * data_items;
11119 
11120         if (arg2) {
11121             if (num == TARGET_NR_capget) {
11122                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11123             } else {
11124                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11125             }
11126             if (!target_data) {
11127                 unlock_user_struct(target_header, arg1, 0);
11128                 return -TARGET_EFAULT;
11129             }
11130 
11131             if (num == TARGET_NR_capset) {
11132                 for (i = 0; i < data_items; i++) {
11133                     data[i].effective = tswap32(target_data[i].effective);
11134                     data[i].permitted = tswap32(target_data[i].permitted);
11135                     data[i].inheritable = tswap32(target_data[i].inheritable);
11136                 }
11137             }
11138 
11139             dataptr = data;
11140         }
11141 
11142         if (num == TARGET_NR_capget) {
11143             ret = get_errno(capget(&header, dataptr));
11144         } else {
11145             ret = get_errno(capset(&header, dataptr));
11146         }
11147 
11148         /* The kernel always updates version for both capget and capset */
11149         target_header->version = tswap32(header.version);
11150         unlock_user_struct(target_header, arg1, 1);
11151 
11152         if (arg2) {
11153             if (num == TARGET_NR_capget) {
11154                 for (i = 0; i < data_items; i++) {
11155                     target_data[i].effective = tswap32(data[i].effective);
11156                     target_data[i].permitted = tswap32(data[i].permitted);
11157                     target_data[i].inheritable = tswap32(data[i].inheritable);
11158                 }
11159                 unlock_user(target_data, arg2, target_datalen);
11160             } else {
11161                 unlock_user(target_data, arg2, 0);
11162             }
11163         }
11164         return ret;
11165     }
11166     case TARGET_NR_sigaltstack:
11167         return do_sigaltstack(arg1, arg2, cpu_env);
11168 
11169 #ifdef CONFIG_SENDFILE
11170 #ifdef TARGET_NR_sendfile
11171     case TARGET_NR_sendfile:
11172     {
11173         off_t *offp = NULL;
11174         off_t off;
11175         if (arg3) {
11176             ret = get_user_sal(off, arg3);
11177             if (is_error(ret)) {
11178                 return ret;
11179             }
11180             offp = &off;
11181         }
11182         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11183         if (!is_error(ret) && arg3) {
11184             abi_long ret2 = put_user_sal(off, arg3);
11185             if (is_error(ret2)) {
11186                 ret = ret2;
11187             }
11188         }
11189         return ret;
11190     }
11191 #endif
11192 #ifdef TARGET_NR_sendfile64
11193     case TARGET_NR_sendfile64:
11194     {
11195         off_t *offp = NULL;
11196         off_t off;
11197         if (arg3) {
11198             ret = get_user_s64(off, arg3);
11199             if (is_error(ret)) {
11200                 return ret;
11201             }
11202             offp = &off;
11203         }
11204         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11205         if (!is_error(ret) && arg3) {
11206             abi_long ret2 = put_user_s64(off, arg3);
11207             if (is_error(ret2)) {
11208                 ret = ret2;
11209             }
11210         }
11211         return ret;
11212     }
11213 #endif
11214 #endif
11215 #ifdef TARGET_NR_vfork
11216     case TARGET_NR_vfork:
11217         return get_errno(do_fork(cpu_env,
11218                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11219                          0, 0, 0, 0));
11220 #endif
11221 #ifdef TARGET_NR_ugetrlimit
11222     case TARGET_NR_ugetrlimit:
11223     {
11224 	struct rlimit rlim;
11225 	int resource = target_to_host_resource(arg1);
11226 	ret = get_errno(getrlimit(resource, &rlim));
11227 	if (!is_error(ret)) {
11228 	    struct target_rlimit *target_rlim;
11229             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11230                 return -TARGET_EFAULT;
11231 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11232 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11233             unlock_user_struct(target_rlim, arg2, 1);
11234 	}
11235         return ret;
11236     }
11237 #endif
11238 #ifdef TARGET_NR_truncate64
11239     case TARGET_NR_truncate64:
11240         if (!(p = lock_user_string(arg1)))
11241             return -TARGET_EFAULT;
11242 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11243         unlock_user(p, arg1, 0);
11244         return ret;
11245 #endif
11246 #ifdef TARGET_NR_ftruncate64
11247     case TARGET_NR_ftruncate64:
11248         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11249 #endif
11250 #ifdef TARGET_NR_stat64
11251     case TARGET_NR_stat64:
11252         if (!(p = lock_user_string(arg1))) {
11253             return -TARGET_EFAULT;
11254         }
11255         ret = get_errno(stat(path(p), &st));
11256         unlock_user(p, arg1, 0);
11257         if (!is_error(ret))
11258             ret = host_to_target_stat64(cpu_env, arg2, &st);
11259         return ret;
11260 #endif
11261 #ifdef TARGET_NR_lstat64
11262     case TARGET_NR_lstat64:
11263         if (!(p = lock_user_string(arg1))) {
11264             return -TARGET_EFAULT;
11265         }
11266         ret = get_errno(lstat(path(p), &st));
11267         unlock_user(p, arg1, 0);
11268         if (!is_error(ret))
11269             ret = host_to_target_stat64(cpu_env, arg2, &st);
11270         return ret;
11271 #endif
11272 #ifdef TARGET_NR_fstat64
11273     case TARGET_NR_fstat64:
11274         ret = get_errno(fstat(arg1, &st));
11275         if (!is_error(ret))
11276             ret = host_to_target_stat64(cpu_env, arg2, &st);
11277         return ret;
11278 #endif
11279 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11280 #ifdef TARGET_NR_fstatat64
11281     case TARGET_NR_fstatat64:
11282 #endif
11283 #ifdef TARGET_NR_newfstatat
11284     case TARGET_NR_newfstatat:
11285 #endif
11286         if (!(p = lock_user_string(arg2))) {
11287             return -TARGET_EFAULT;
11288         }
11289         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11290         unlock_user(p, arg2, 0);
11291         if (!is_error(ret))
11292             ret = host_to_target_stat64(cpu_env, arg3, &st);
11293         return ret;
11294 #endif
11295 #if defined(TARGET_NR_statx)
11296     case TARGET_NR_statx:
11297         {
11298             struct target_statx *target_stx;
11299             int dirfd = arg1;
11300             int flags = arg3;
11301 
11302             p = lock_user_string(arg2);
11303             if (p == NULL) {
11304                 return -TARGET_EFAULT;
11305             }
11306 #if defined(__NR_statx)
11307             {
11308                 /*
11309                  * It is assumed that struct statx is architecture independent.
11310                  */
11311                 struct target_statx host_stx;
11312                 int mask = arg4;
11313 
11314                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11315                 if (!is_error(ret)) {
11316                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11317                         unlock_user(p, arg2, 0);
11318                         return -TARGET_EFAULT;
11319                     }
11320                 }
11321 
11322                 if (ret != -TARGET_ENOSYS) {
11323                     unlock_user(p, arg2, 0);
11324                     return ret;
11325                 }
11326             }
11327 #endif
11328             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11329             unlock_user(p, arg2, 0);
11330 
11331             if (!is_error(ret)) {
11332                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11333                     return -TARGET_EFAULT;
11334                 }
11335                 memset(target_stx, 0, sizeof(*target_stx));
11336                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11337                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11338                 __put_user(st.st_ino, &target_stx->stx_ino);
11339                 __put_user(st.st_mode, &target_stx->stx_mode);
11340                 __put_user(st.st_uid, &target_stx->stx_uid);
11341                 __put_user(st.st_gid, &target_stx->stx_gid);
11342                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11343                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11344                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11345                 __put_user(st.st_size, &target_stx->stx_size);
11346                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11347                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11348                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11349                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11350                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11351                 unlock_user_struct(target_stx, arg5, 1);
11352             }
11353         }
11354         return ret;
11355 #endif
11356 #ifdef TARGET_NR_lchown
11357     case TARGET_NR_lchown:
11358         if (!(p = lock_user_string(arg1)))
11359             return -TARGET_EFAULT;
11360         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11361         unlock_user(p, arg1, 0);
11362         return ret;
11363 #endif
11364 #ifdef TARGET_NR_getuid
11365     case TARGET_NR_getuid:
11366         return get_errno(high2lowuid(getuid()));
11367 #endif
11368 #ifdef TARGET_NR_getgid
11369     case TARGET_NR_getgid:
11370         return get_errno(high2lowgid(getgid()));
11371 #endif
11372 #ifdef TARGET_NR_geteuid
11373     case TARGET_NR_geteuid:
11374         return get_errno(high2lowuid(geteuid()));
11375 #endif
11376 #ifdef TARGET_NR_getegid
11377     case TARGET_NR_getegid:
11378         return get_errno(high2lowgid(getegid()));
11379 #endif
11380     case TARGET_NR_setreuid:
11381         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11382     case TARGET_NR_setregid:
11383         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11384     case TARGET_NR_getgroups:
11385         {
11386             int gidsetsize = arg1;
11387             target_id *target_grouplist;
11388             gid_t *grouplist;
11389             int i;
11390 
11391             grouplist = alloca(gidsetsize * sizeof(gid_t));
11392             ret = get_errno(getgroups(gidsetsize, grouplist));
11393             if (gidsetsize == 0)
11394                 return ret;
11395             if (!is_error(ret)) {
11396                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11397                 if (!target_grouplist)
11398                     return -TARGET_EFAULT;
11399                 for(i = 0;i < ret; i++)
11400                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11401                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11402             }
11403         }
11404         return ret;
11405     case TARGET_NR_setgroups:
11406         {
11407             int gidsetsize = arg1;
11408             target_id *target_grouplist;
11409             gid_t *grouplist = NULL;
11410             int i;
11411             if (gidsetsize) {
11412                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11413                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11414                 if (!target_grouplist) {
11415                     return -TARGET_EFAULT;
11416                 }
11417                 for (i = 0; i < gidsetsize; i++) {
11418                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11419                 }
11420                 unlock_user(target_grouplist, arg2, 0);
11421             }
11422             return get_errno(setgroups(gidsetsize, grouplist));
11423         }
11424     case TARGET_NR_fchown:
11425         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11426 #if defined(TARGET_NR_fchownat)
11427     case TARGET_NR_fchownat:
11428         if (!(p = lock_user_string(arg2)))
11429             return -TARGET_EFAULT;
11430         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11431                                  low2highgid(arg4), arg5));
11432         unlock_user(p, arg2, 0);
11433         return ret;
11434 #endif
11435 #ifdef TARGET_NR_setresuid
11436     case TARGET_NR_setresuid:
11437         return get_errno(sys_setresuid(low2highuid(arg1),
11438                                        low2highuid(arg2),
11439                                        low2highuid(arg3)));
11440 #endif
11441 #ifdef TARGET_NR_getresuid
11442     case TARGET_NR_getresuid:
11443         {
11444             uid_t ruid, euid, suid;
11445             ret = get_errno(getresuid(&ruid, &euid, &suid));
11446             if (!is_error(ret)) {
11447                 if (put_user_id(high2lowuid(ruid), arg1)
11448                     || put_user_id(high2lowuid(euid), arg2)
11449                     || put_user_id(high2lowuid(suid), arg3))
11450                     return -TARGET_EFAULT;
11451             }
11452         }
11453         return ret;
11454 #endif
11455 #ifdef TARGET_NR_getresgid
11456     case TARGET_NR_setresgid:
11457         return get_errno(sys_setresgid(low2highgid(arg1),
11458                                        low2highgid(arg2),
11459                                        low2highgid(arg3)));
11460 #endif
11461 #ifdef TARGET_NR_getresgid
11462     case TARGET_NR_getresgid:
11463         {
11464             gid_t rgid, egid, sgid;
11465             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11466             if (!is_error(ret)) {
11467                 if (put_user_id(high2lowgid(rgid), arg1)
11468                     || put_user_id(high2lowgid(egid), arg2)
11469                     || put_user_id(high2lowgid(sgid), arg3))
11470                     return -TARGET_EFAULT;
11471             }
11472         }
11473         return ret;
11474 #endif
11475 #ifdef TARGET_NR_chown
11476     case TARGET_NR_chown:
11477         if (!(p = lock_user_string(arg1)))
11478             return -TARGET_EFAULT;
11479         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11480         unlock_user(p, arg1, 0);
11481         return ret;
11482 #endif
11483     case TARGET_NR_setuid:
11484         return get_errno(sys_setuid(low2highuid(arg1)));
11485     case TARGET_NR_setgid:
11486         return get_errno(sys_setgid(low2highgid(arg1)));
11487     case TARGET_NR_setfsuid:
11488         return get_errno(setfsuid(arg1));
11489     case TARGET_NR_setfsgid:
11490         return get_errno(setfsgid(arg1));
11491 
11492 #ifdef TARGET_NR_lchown32
11493     case TARGET_NR_lchown32:
11494         if (!(p = lock_user_string(arg1)))
11495             return -TARGET_EFAULT;
11496         ret = get_errno(lchown(p, arg2, arg3));
11497         unlock_user(p, arg1, 0);
11498         return ret;
11499 #endif
11500 #ifdef TARGET_NR_getuid32
11501     case TARGET_NR_getuid32:
11502         return get_errno(getuid());
11503 #endif
11504 
11505 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11506    /* Alpha specific */
11507     case TARGET_NR_getxuid:
11508          {
11509             uid_t euid;
11510             euid=geteuid();
11511             cpu_env->ir[IR_A4]=euid;
11512          }
11513         return get_errno(getuid());
11514 #endif
11515 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11516    /* Alpha specific */
11517     case TARGET_NR_getxgid:
11518          {
11519             uid_t egid;
11520             egid=getegid();
11521             cpu_env->ir[IR_A4]=egid;
11522          }
11523         return get_errno(getgid());
11524 #endif
11525 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11526     /* Alpha specific */
11527     case TARGET_NR_osf_getsysinfo:
11528         ret = -TARGET_EOPNOTSUPP;
11529         switch (arg1) {
11530           case TARGET_GSI_IEEE_FP_CONTROL:
11531             {
11532                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11533                 uint64_t swcr = cpu_env->swcr;
11534 
11535                 swcr &= ~SWCR_STATUS_MASK;
11536                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11537 
11538                 if (put_user_u64 (swcr, arg2))
11539                         return -TARGET_EFAULT;
11540                 ret = 0;
11541             }
11542             break;
11543 
11544           /* case GSI_IEEE_STATE_AT_SIGNAL:
11545              -- Not implemented in linux kernel.
11546              case GSI_UACPROC:
11547              -- Retrieves current unaligned access state; not much used.
11548              case GSI_PROC_TYPE:
11549              -- Retrieves implver information; surely not used.
11550              case GSI_GET_HWRPB:
11551              -- Grabs a copy of the HWRPB; surely not used.
11552           */
11553         }
11554         return ret;
11555 #endif
11556 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11557     /* Alpha specific */
11558     case TARGET_NR_osf_setsysinfo:
11559         ret = -TARGET_EOPNOTSUPP;
11560         switch (arg1) {
11561           case TARGET_SSI_IEEE_FP_CONTROL:
11562             {
11563                 uint64_t swcr, fpcr;
11564 
11565                 if (get_user_u64 (swcr, arg2)) {
11566                     return -TARGET_EFAULT;
11567                 }
11568 
11569                 /*
11570                  * The kernel calls swcr_update_status to update the
11571                  * status bits from the fpcr at every point that it
11572                  * could be queried.  Therefore, we store the status
11573                  * bits only in FPCR.
11574                  */
11575                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11576 
11577                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11578                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11579                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11580                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11581                 ret = 0;
11582             }
11583             break;
11584 
11585           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11586             {
11587                 uint64_t exc, fpcr, fex;
11588 
11589                 if (get_user_u64(exc, arg2)) {
11590                     return -TARGET_EFAULT;
11591                 }
11592                 exc &= SWCR_STATUS_MASK;
11593                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11594 
11595                 /* Old exceptions are not signaled.  */
11596                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11597                 fex = exc & ~fex;
11598                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11599                 fex &= (cpu_env)->swcr;
11600 
11601                 /* Update the hardware fpcr.  */
11602                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11603                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11604 
11605                 if (fex) {
11606                     int si_code = TARGET_FPE_FLTUNK;
11607                     target_siginfo_t info;
11608 
11609                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11610                         si_code = TARGET_FPE_FLTUND;
11611                     }
11612                     if (fex & SWCR_TRAP_ENABLE_INE) {
11613                         si_code = TARGET_FPE_FLTRES;
11614                     }
11615                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11616                         si_code = TARGET_FPE_FLTUND;
11617                     }
11618                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11619                         si_code = TARGET_FPE_FLTOVF;
11620                     }
11621                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11622                         si_code = TARGET_FPE_FLTDIV;
11623                     }
11624                     if (fex & SWCR_TRAP_ENABLE_INV) {
11625                         si_code = TARGET_FPE_FLTINV;
11626                     }
11627 
11628                     info.si_signo = SIGFPE;
11629                     info.si_errno = 0;
11630                     info.si_code = si_code;
11631                     info._sifields._sigfault._addr = (cpu_env)->pc;
11632                     queue_signal(cpu_env, info.si_signo,
11633                                  QEMU_SI_FAULT, &info);
11634                 }
11635                 ret = 0;
11636             }
11637             break;
11638 
11639           /* case SSI_NVPAIRS:
11640              -- Used with SSIN_UACPROC to enable unaligned accesses.
11641              case SSI_IEEE_STATE_AT_SIGNAL:
11642              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11643              -- Not implemented in linux kernel
11644           */
11645         }
11646         return ret;
11647 #endif
11648 #ifdef TARGET_NR_osf_sigprocmask
11649     /* Alpha specific.  */
11650     case TARGET_NR_osf_sigprocmask:
11651         {
11652             abi_ulong mask;
11653             int how;
11654             sigset_t set, oldset;
11655 
11656             switch(arg1) {
11657             case TARGET_SIG_BLOCK:
11658                 how = SIG_BLOCK;
11659                 break;
11660             case TARGET_SIG_UNBLOCK:
11661                 how = SIG_UNBLOCK;
11662                 break;
11663             case TARGET_SIG_SETMASK:
11664                 how = SIG_SETMASK;
11665                 break;
11666             default:
11667                 return -TARGET_EINVAL;
11668             }
11669             mask = arg2;
11670             target_to_host_old_sigset(&set, &mask);
11671             ret = do_sigprocmask(how, &set, &oldset);
11672             if (!ret) {
11673                 host_to_target_old_sigset(&mask, &oldset);
11674                 ret = mask;
11675             }
11676         }
11677         return ret;
11678 #endif
11679 
11680 #ifdef TARGET_NR_getgid32
11681     case TARGET_NR_getgid32:
11682         return get_errno(getgid());
11683 #endif
11684 #ifdef TARGET_NR_geteuid32
11685     case TARGET_NR_geteuid32:
11686         return get_errno(geteuid());
11687 #endif
11688 #ifdef TARGET_NR_getegid32
11689     case TARGET_NR_getegid32:
11690         return get_errno(getegid());
11691 #endif
11692 #ifdef TARGET_NR_setreuid32
11693     case TARGET_NR_setreuid32:
11694         return get_errno(setreuid(arg1, arg2));
11695 #endif
11696 #ifdef TARGET_NR_setregid32
11697     case TARGET_NR_setregid32:
11698         return get_errno(setregid(arg1, arg2));
11699 #endif
11700 #ifdef TARGET_NR_getgroups32
11701     case TARGET_NR_getgroups32:
11702         {
11703             int gidsetsize = arg1;
11704             uint32_t *target_grouplist;
11705             gid_t *grouplist;
11706             int i;
11707 
11708             grouplist = alloca(gidsetsize * sizeof(gid_t));
11709             ret = get_errno(getgroups(gidsetsize, grouplist));
11710             if (gidsetsize == 0)
11711                 return ret;
11712             if (!is_error(ret)) {
11713                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11714                 if (!target_grouplist) {
11715                     return -TARGET_EFAULT;
11716                 }
11717                 for(i = 0;i < ret; i++)
11718                     target_grouplist[i] = tswap32(grouplist[i]);
11719                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11720             }
11721         }
11722         return ret;
11723 #endif
11724 #ifdef TARGET_NR_setgroups32
11725     case TARGET_NR_setgroups32:
11726         {
11727             int gidsetsize = arg1;
11728             uint32_t *target_grouplist;
11729             gid_t *grouplist;
11730             int i;
11731 
11732             grouplist = alloca(gidsetsize * sizeof(gid_t));
11733             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11734             if (!target_grouplist) {
11735                 return -TARGET_EFAULT;
11736             }
11737             for(i = 0;i < gidsetsize; i++)
11738                 grouplist[i] = tswap32(target_grouplist[i]);
11739             unlock_user(target_grouplist, arg2, 0);
11740             return get_errno(setgroups(gidsetsize, grouplist));
11741         }
11742 #endif
11743 #ifdef TARGET_NR_fchown32
11744     case TARGET_NR_fchown32:
11745         return get_errno(fchown(arg1, arg2, arg3));
11746 #endif
11747 #ifdef TARGET_NR_setresuid32
11748     case TARGET_NR_setresuid32:
11749         return get_errno(sys_setresuid(arg1, arg2, arg3));
11750 #endif
11751 #ifdef TARGET_NR_getresuid32
11752     case TARGET_NR_getresuid32:
11753         {
11754             uid_t ruid, euid, suid;
11755             ret = get_errno(getresuid(&ruid, &euid, &suid));
11756             if (!is_error(ret)) {
11757                 if (put_user_u32(ruid, arg1)
11758                     || put_user_u32(euid, arg2)
11759                     || put_user_u32(suid, arg3))
11760                     return -TARGET_EFAULT;
11761             }
11762         }
11763         return ret;
11764 #endif
11765 #ifdef TARGET_NR_setresgid32
11766     case TARGET_NR_setresgid32:
11767         return get_errno(sys_setresgid(arg1, arg2, arg3));
11768 #endif
11769 #ifdef TARGET_NR_getresgid32
11770     case TARGET_NR_getresgid32:
11771         {
11772             gid_t rgid, egid, sgid;
11773             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11774             if (!is_error(ret)) {
11775                 if (put_user_u32(rgid, arg1)
11776                     || put_user_u32(egid, arg2)
11777                     || put_user_u32(sgid, arg3))
11778                     return -TARGET_EFAULT;
11779             }
11780         }
11781         return ret;
11782 #endif
11783 #ifdef TARGET_NR_chown32
11784     case TARGET_NR_chown32:
11785         if (!(p = lock_user_string(arg1)))
11786             return -TARGET_EFAULT;
11787         ret = get_errno(chown(p, arg2, arg3));
11788         unlock_user(p, arg1, 0);
11789         return ret;
11790 #endif
11791 #ifdef TARGET_NR_setuid32
11792     case TARGET_NR_setuid32:
11793         return get_errno(sys_setuid(arg1));
11794 #endif
11795 #ifdef TARGET_NR_setgid32
11796     case TARGET_NR_setgid32:
11797         return get_errno(sys_setgid(arg1));
11798 #endif
11799 #ifdef TARGET_NR_setfsuid32
11800     case TARGET_NR_setfsuid32:
11801         return get_errno(setfsuid(arg1));
11802 #endif
11803 #ifdef TARGET_NR_setfsgid32
11804     case TARGET_NR_setfsgid32:
11805         return get_errno(setfsgid(arg1));
11806 #endif
11807 #ifdef TARGET_NR_mincore
11808     case TARGET_NR_mincore:
11809         {
11810             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11811             if (!a) {
11812                 return -TARGET_ENOMEM;
11813             }
11814             p = lock_user_string(arg3);
11815             if (!p) {
11816                 ret = -TARGET_EFAULT;
11817             } else {
11818                 ret = get_errno(mincore(a, arg2, p));
11819                 unlock_user(p, arg3, ret);
11820             }
11821             unlock_user(a, arg1, 0);
11822         }
11823         return ret;
11824 #endif
11825 #ifdef TARGET_NR_arm_fadvise64_64
11826     case TARGET_NR_arm_fadvise64_64:
11827         /* arm_fadvise64_64 looks like fadvise64_64 but
11828          * with different argument order: fd, advice, offset, len
11829          * rather than the usual fd, offset, len, advice.
11830          * Note that offset and len are both 64-bit so appear as
11831          * pairs of 32-bit registers.
11832          */
11833         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11834                             target_offset64(arg5, arg6), arg2);
11835         return -host_to_target_errno(ret);
11836 #endif
11837 
11838 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11839 
11840 #ifdef TARGET_NR_fadvise64_64
11841     case TARGET_NR_fadvise64_64:
11842 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11843         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11844         ret = arg2;
11845         arg2 = arg3;
11846         arg3 = arg4;
11847         arg4 = arg5;
11848         arg5 = arg6;
11849         arg6 = ret;
11850 #else
11851         /* 6 args: fd, offset (high, low), len (high, low), advice */
11852         if (regpairs_aligned(cpu_env, num)) {
11853             /* offset is in (3,4), len in (5,6) and advice in 7 */
11854             arg2 = arg3;
11855             arg3 = arg4;
11856             arg4 = arg5;
11857             arg5 = arg6;
11858             arg6 = arg7;
11859         }
11860 #endif
11861         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11862                             target_offset64(arg4, arg5), arg6);
11863         return -host_to_target_errno(ret);
11864 #endif
11865 
11866 #ifdef TARGET_NR_fadvise64
11867     case TARGET_NR_fadvise64:
11868         /* 5 args: fd, offset (high, low), len, advice */
11869         if (regpairs_aligned(cpu_env, num)) {
11870             /* offset is in (3,4), len in 5 and advice in 6 */
11871             arg2 = arg3;
11872             arg3 = arg4;
11873             arg4 = arg5;
11874             arg5 = arg6;
11875         }
11876         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11877         return -host_to_target_errno(ret);
11878 #endif
11879 
11880 #else /* not a 32-bit ABI */
11881 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11882 #ifdef TARGET_NR_fadvise64_64
11883     case TARGET_NR_fadvise64_64:
11884 #endif
11885 #ifdef TARGET_NR_fadvise64
11886     case TARGET_NR_fadvise64:
11887 #endif
11888 #ifdef TARGET_S390X
11889         switch (arg4) {
11890         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11891         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11892         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11893         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11894         default: break;
11895         }
11896 #endif
11897         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11898 #endif
11899 #endif /* end of 64-bit ABI fadvise handling */
11900 
11901 #ifdef TARGET_NR_madvise
11902     case TARGET_NR_madvise:
11903         return target_madvise(arg1, arg2, arg3);
11904 #endif
11905 #ifdef TARGET_NR_fcntl64
11906     case TARGET_NR_fcntl64:
11907     {
11908         int cmd;
11909         struct flock64 fl;
11910         from_flock64_fn *copyfrom = copy_from_user_flock64;
11911         to_flock64_fn *copyto = copy_to_user_flock64;
11912 
11913 #ifdef TARGET_ARM
11914         if (!cpu_env->eabi) {
11915             copyfrom = copy_from_user_oabi_flock64;
11916             copyto = copy_to_user_oabi_flock64;
11917         }
11918 #endif
11919 
11920         cmd = target_to_host_fcntl_cmd(arg2);
11921         if (cmd == -TARGET_EINVAL) {
11922             return cmd;
11923         }
11924 
11925         switch(arg2) {
11926         case TARGET_F_GETLK64:
11927             ret = copyfrom(&fl, arg3);
11928             if (ret) {
11929                 break;
11930             }
11931             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11932             if (ret == 0) {
11933                 ret = copyto(arg3, &fl);
11934             }
11935 	    break;
11936 
11937         case TARGET_F_SETLK64:
11938         case TARGET_F_SETLKW64:
11939             ret = copyfrom(&fl, arg3);
11940             if (ret) {
11941                 break;
11942             }
11943             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11944 	    break;
11945         default:
11946             ret = do_fcntl(arg1, arg2, arg3);
11947             break;
11948         }
11949         return ret;
11950     }
11951 #endif
11952 #ifdef TARGET_NR_cacheflush
11953     case TARGET_NR_cacheflush:
11954         /* self-modifying code is handled automatically, so nothing needed */
11955         return 0;
11956 #endif
11957 #ifdef TARGET_NR_getpagesize
11958     case TARGET_NR_getpagesize:
11959         return TARGET_PAGE_SIZE;
11960 #endif
11961     case TARGET_NR_gettid:
11962         return get_errno(sys_gettid());
11963 #ifdef TARGET_NR_readahead
11964     case TARGET_NR_readahead:
11965 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11966         if (regpairs_aligned(cpu_env, num)) {
11967             arg2 = arg3;
11968             arg3 = arg4;
11969             arg4 = arg5;
11970         }
11971         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11972 #else
11973         ret = get_errno(readahead(arg1, arg2, arg3));
11974 #endif
11975         return ret;
11976 #endif
11977 #ifdef CONFIG_ATTR
11978 #ifdef TARGET_NR_setxattr
11979     case TARGET_NR_listxattr:
11980     case TARGET_NR_llistxattr:
11981     {
11982         void *p, *b = 0;
11983         if (arg2) {
11984             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11985             if (!b) {
11986                 return -TARGET_EFAULT;
11987             }
11988         }
11989         p = lock_user_string(arg1);
11990         if (p) {
11991             if (num == TARGET_NR_listxattr) {
11992                 ret = get_errno(listxattr(p, b, arg3));
11993             } else {
11994                 ret = get_errno(llistxattr(p, b, arg3));
11995             }
11996         } else {
11997             ret = -TARGET_EFAULT;
11998         }
11999         unlock_user(p, arg1, 0);
12000         unlock_user(b, arg2, arg3);
12001         return ret;
12002     }
12003     case TARGET_NR_flistxattr:
12004     {
12005         void *b = 0;
12006         if (arg2) {
12007             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12008             if (!b) {
12009                 return -TARGET_EFAULT;
12010             }
12011         }
12012         ret = get_errno(flistxattr(arg1, b, arg3));
12013         unlock_user(b, arg2, arg3);
12014         return ret;
12015     }
12016     case TARGET_NR_setxattr:
12017     case TARGET_NR_lsetxattr:
12018         {
12019             void *p, *n, *v = 0;
12020             if (arg3) {
12021                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12022                 if (!v) {
12023                     return -TARGET_EFAULT;
12024                 }
12025             }
12026             p = lock_user_string(arg1);
12027             n = lock_user_string(arg2);
12028             if (p && n) {
12029                 if (num == TARGET_NR_setxattr) {
12030                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12031                 } else {
12032                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12033                 }
12034             } else {
12035                 ret = -TARGET_EFAULT;
12036             }
12037             unlock_user(p, arg1, 0);
12038             unlock_user(n, arg2, 0);
12039             unlock_user(v, arg3, 0);
12040         }
12041         return ret;
12042     case TARGET_NR_fsetxattr:
12043         {
12044             void *n, *v = 0;
12045             if (arg3) {
12046                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12047                 if (!v) {
12048                     return -TARGET_EFAULT;
12049                 }
12050             }
12051             n = lock_user_string(arg2);
12052             if (n) {
12053                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12054             } else {
12055                 ret = -TARGET_EFAULT;
12056             }
12057             unlock_user(n, arg2, 0);
12058             unlock_user(v, arg3, 0);
12059         }
12060         return ret;
12061     case TARGET_NR_getxattr:
12062     case TARGET_NR_lgetxattr:
12063         {
12064             void *p, *n, *v = 0;
12065             if (arg3) {
12066                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12067                 if (!v) {
12068                     return -TARGET_EFAULT;
12069                 }
12070             }
12071             p = lock_user_string(arg1);
12072             n = lock_user_string(arg2);
12073             if (p && n) {
12074                 if (num == TARGET_NR_getxattr) {
12075                     ret = get_errno(getxattr(p, n, v, arg4));
12076                 } else {
12077                     ret = get_errno(lgetxattr(p, n, v, arg4));
12078                 }
12079             } else {
12080                 ret = -TARGET_EFAULT;
12081             }
12082             unlock_user(p, arg1, 0);
12083             unlock_user(n, arg2, 0);
12084             unlock_user(v, arg3, arg4);
12085         }
12086         return ret;
12087     case TARGET_NR_fgetxattr:
12088         {
12089             void *n, *v = 0;
12090             if (arg3) {
12091                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12092                 if (!v) {
12093                     return -TARGET_EFAULT;
12094                 }
12095             }
12096             n = lock_user_string(arg2);
12097             if (n) {
12098                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12099             } else {
12100                 ret = -TARGET_EFAULT;
12101             }
12102             unlock_user(n, arg2, 0);
12103             unlock_user(v, arg3, arg4);
12104         }
12105         return ret;
12106     case TARGET_NR_removexattr:
12107     case TARGET_NR_lremovexattr:
12108         {
12109             void *p, *n;
12110             p = lock_user_string(arg1);
12111             n = lock_user_string(arg2);
12112             if (p && n) {
12113                 if (num == TARGET_NR_removexattr) {
12114                     ret = get_errno(removexattr(p, n));
12115                 } else {
12116                     ret = get_errno(lremovexattr(p, n));
12117                 }
12118             } else {
12119                 ret = -TARGET_EFAULT;
12120             }
12121             unlock_user(p, arg1, 0);
12122             unlock_user(n, arg2, 0);
12123         }
12124         return ret;
12125     case TARGET_NR_fremovexattr:
12126         {
12127             void *n;
12128             n = lock_user_string(arg2);
12129             if (n) {
12130                 ret = get_errno(fremovexattr(arg1, n));
12131             } else {
12132                 ret = -TARGET_EFAULT;
12133             }
12134             unlock_user(n, arg2, 0);
12135         }
12136         return ret;
12137 #endif
12138 #endif /* CONFIG_ATTR */
12139 #ifdef TARGET_NR_set_thread_area
12140     case TARGET_NR_set_thread_area:
12141 #if defined(TARGET_MIPS)
12142       cpu_env->active_tc.CP0_UserLocal = arg1;
12143       return 0;
12144 #elif defined(TARGET_CRIS)
12145       if (arg1 & 0xff)
12146           ret = -TARGET_EINVAL;
12147       else {
12148           cpu_env->pregs[PR_PID] = arg1;
12149           ret = 0;
12150       }
12151       return ret;
12152 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12153       return do_set_thread_area(cpu_env, arg1);
12154 #elif defined(TARGET_M68K)
12155       {
12156           TaskState *ts = cpu->opaque;
12157           ts->tp_value = arg1;
12158           return 0;
12159       }
12160 #else
12161       return -TARGET_ENOSYS;
12162 #endif
12163 #endif
12164 #ifdef TARGET_NR_get_thread_area
12165     case TARGET_NR_get_thread_area:
12166 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12167         return do_get_thread_area(cpu_env, arg1);
12168 #elif defined(TARGET_M68K)
12169         {
12170             TaskState *ts = cpu->opaque;
12171             return ts->tp_value;
12172         }
12173 #else
12174         return -TARGET_ENOSYS;
12175 #endif
12176 #endif
12177 #ifdef TARGET_NR_getdomainname
12178     case TARGET_NR_getdomainname:
12179         return -TARGET_ENOSYS;
12180 #endif
12181 
12182 #ifdef TARGET_NR_clock_settime
12183     case TARGET_NR_clock_settime:
12184     {
12185         struct timespec ts;
12186 
12187         ret = target_to_host_timespec(&ts, arg2);
12188         if (!is_error(ret)) {
12189             ret = get_errno(clock_settime(arg1, &ts));
12190         }
12191         return ret;
12192     }
12193 #endif
12194 #ifdef TARGET_NR_clock_settime64
12195     case TARGET_NR_clock_settime64:
12196     {
12197         struct timespec ts;
12198 
12199         ret = target_to_host_timespec64(&ts, arg2);
12200         if (!is_error(ret)) {
12201             ret = get_errno(clock_settime(arg1, &ts));
12202         }
12203         return ret;
12204     }
12205 #endif
12206 #ifdef TARGET_NR_clock_gettime
12207     case TARGET_NR_clock_gettime:
12208     {
12209         struct timespec ts;
12210         ret = get_errno(clock_gettime(arg1, &ts));
12211         if (!is_error(ret)) {
12212             ret = host_to_target_timespec(arg2, &ts);
12213         }
12214         return ret;
12215     }
12216 #endif
12217 #ifdef TARGET_NR_clock_gettime64
12218     case TARGET_NR_clock_gettime64:
12219     {
12220         struct timespec ts;
12221         ret = get_errno(clock_gettime(arg1, &ts));
12222         if (!is_error(ret)) {
12223             ret = host_to_target_timespec64(arg2, &ts);
12224         }
12225         return ret;
12226     }
12227 #endif
12228 #ifdef TARGET_NR_clock_getres
12229     case TARGET_NR_clock_getres:
12230     {
12231         struct timespec ts;
12232         ret = get_errno(clock_getres(arg1, &ts));
12233         if (!is_error(ret)) {
12234             host_to_target_timespec(arg2, &ts);
12235         }
12236         return ret;
12237     }
12238 #endif
12239 #ifdef TARGET_NR_clock_getres_time64
12240     case TARGET_NR_clock_getres_time64:
12241     {
12242         struct timespec ts;
12243         ret = get_errno(clock_getres(arg1, &ts));
12244         if (!is_error(ret)) {
12245             host_to_target_timespec64(arg2, &ts);
12246         }
12247         return ret;
12248     }
12249 #endif
12250 #ifdef TARGET_NR_clock_nanosleep
12251     case TARGET_NR_clock_nanosleep:
12252     {
12253         struct timespec ts;
12254         if (target_to_host_timespec(&ts, arg3)) {
12255             return -TARGET_EFAULT;
12256         }
12257         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12258                                              &ts, arg4 ? &ts : NULL));
12259         /*
12260          * if the call is interrupted by a signal handler, it fails
12261          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12262          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12263          */
12264         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12265             host_to_target_timespec(arg4, &ts)) {
12266               return -TARGET_EFAULT;
12267         }
12268 
12269         return ret;
12270     }
12271 #endif
12272 #ifdef TARGET_NR_clock_nanosleep_time64
12273     case TARGET_NR_clock_nanosleep_time64:
12274     {
12275         struct timespec ts;
12276 
12277         if (target_to_host_timespec64(&ts, arg3)) {
12278             return -TARGET_EFAULT;
12279         }
12280 
12281         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12282                                              &ts, arg4 ? &ts : NULL));
12283 
12284         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12285             host_to_target_timespec64(arg4, &ts)) {
12286             return -TARGET_EFAULT;
12287         }
12288         return ret;
12289     }
12290 #endif
12291 
12292 #if defined(TARGET_NR_set_tid_address)
12293     case TARGET_NR_set_tid_address:
12294     {
12295         TaskState *ts = cpu->opaque;
12296         ts->child_tidptr = arg1;
12297         /* do not call host set_tid_address() syscall, instead return tid() */
12298         return get_errno(sys_gettid());
12299     }
12300 #endif
12301 
12302     case TARGET_NR_tkill:
12303         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12304 
12305     case TARGET_NR_tgkill:
12306         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12307                          target_to_host_signal(arg3)));
12308 
12309 #ifdef TARGET_NR_set_robust_list
12310     case TARGET_NR_set_robust_list:
12311     case TARGET_NR_get_robust_list:
12312         /* The ABI for supporting robust futexes has userspace pass
12313          * the kernel a pointer to a linked list which is updated by
12314          * userspace after the syscall; the list is walked by the kernel
12315          * when the thread exits. Since the linked list in QEMU guest
12316          * memory isn't a valid linked list for the host and we have
12317          * no way to reliably intercept the thread-death event, we can't
12318          * support these. Silently return ENOSYS so that guest userspace
12319          * falls back to a non-robust futex implementation (which should
12320          * be OK except in the corner case of the guest crashing while
12321          * holding a mutex that is shared with another process via
12322          * shared memory).
12323          */
12324         return -TARGET_ENOSYS;
12325 #endif
12326 
12327 #if defined(TARGET_NR_utimensat)
12328     case TARGET_NR_utimensat:
12329         {
12330             struct timespec *tsp, ts[2];
12331             if (!arg3) {
12332                 tsp = NULL;
12333             } else {
12334                 if (target_to_host_timespec(ts, arg3)) {
12335                     return -TARGET_EFAULT;
12336                 }
12337                 if (target_to_host_timespec(ts + 1, arg3 +
12338                                             sizeof(struct target_timespec))) {
12339                     return -TARGET_EFAULT;
12340                 }
12341                 tsp = ts;
12342             }
12343             if (!arg2)
12344                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12345             else {
12346                 if (!(p = lock_user_string(arg2))) {
12347                     return -TARGET_EFAULT;
12348                 }
12349                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12350                 unlock_user(p, arg2, 0);
12351             }
12352         }
12353         return ret;
12354 #endif
12355 #ifdef TARGET_NR_utimensat_time64
12356     case TARGET_NR_utimensat_time64:
12357         {
12358             struct timespec *tsp, ts[2];
12359             if (!arg3) {
12360                 tsp = NULL;
12361             } else {
12362                 if (target_to_host_timespec64(ts, arg3)) {
12363                     return -TARGET_EFAULT;
12364                 }
12365                 if (target_to_host_timespec64(ts + 1, arg3 +
12366                                      sizeof(struct target__kernel_timespec))) {
12367                     return -TARGET_EFAULT;
12368                 }
12369                 tsp = ts;
12370             }
12371             if (!arg2)
12372                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12373             else {
12374                 p = lock_user_string(arg2);
12375                 if (!p) {
12376                     return -TARGET_EFAULT;
12377                 }
12378                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12379                 unlock_user(p, arg2, 0);
12380             }
12381         }
12382         return ret;
12383 #endif
12384 #ifdef TARGET_NR_futex
12385     case TARGET_NR_futex:
12386         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12387 #endif
12388 #ifdef TARGET_NR_futex_time64
12389     case TARGET_NR_futex_time64:
12390         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12391 #endif
12392 #ifdef CONFIG_INOTIFY
12393 #if defined(TARGET_NR_inotify_init)
12394     case TARGET_NR_inotify_init:
12395         ret = get_errno(inotify_init());
12396         if (ret >= 0) {
12397             fd_trans_register(ret, &target_inotify_trans);
12398         }
12399         return ret;
12400 #endif
12401 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12402     case TARGET_NR_inotify_init1:
12403         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12404                                           fcntl_flags_tbl)));
12405         if (ret >= 0) {
12406             fd_trans_register(ret, &target_inotify_trans);
12407         }
12408         return ret;
12409 #endif
12410 #if defined(TARGET_NR_inotify_add_watch)
12411     case TARGET_NR_inotify_add_watch:
12412         p = lock_user_string(arg2);
12413         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12414         unlock_user(p, arg2, 0);
12415         return ret;
12416 #endif
12417 #if defined(TARGET_NR_inotify_rm_watch)
12418     case TARGET_NR_inotify_rm_watch:
12419         return get_errno(inotify_rm_watch(arg1, arg2));
12420 #endif
12421 #endif
12422 
12423 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12424     case TARGET_NR_mq_open:
12425         {
12426             struct mq_attr posix_mq_attr;
12427             struct mq_attr *pposix_mq_attr;
12428             int host_flags;
12429 
12430             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12431             pposix_mq_attr = NULL;
12432             if (arg4) {
12433                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12434                     return -TARGET_EFAULT;
12435                 }
12436                 pposix_mq_attr = &posix_mq_attr;
12437             }
12438             p = lock_user_string(arg1 - 1);
12439             if (!p) {
12440                 return -TARGET_EFAULT;
12441             }
12442             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12443             unlock_user (p, arg1, 0);
12444         }
12445         return ret;
12446 
12447     case TARGET_NR_mq_unlink:
12448         p = lock_user_string(arg1 - 1);
12449         if (!p) {
12450             return -TARGET_EFAULT;
12451         }
12452         ret = get_errno(mq_unlink(p));
12453         unlock_user (p, arg1, 0);
12454         return ret;
12455 
12456 #ifdef TARGET_NR_mq_timedsend
12457     case TARGET_NR_mq_timedsend:
12458         {
12459             struct timespec ts;
12460 
12461             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12462             if (arg5 != 0) {
12463                 if (target_to_host_timespec(&ts, arg5)) {
12464                     return -TARGET_EFAULT;
12465                 }
12466                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12467                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12468                     return -TARGET_EFAULT;
12469                 }
12470             } else {
12471                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12472             }
12473             unlock_user (p, arg2, arg3);
12474         }
12475         return ret;
12476 #endif
12477 #ifdef TARGET_NR_mq_timedsend_time64
12478     case TARGET_NR_mq_timedsend_time64:
12479         {
12480             struct timespec ts;
12481 
12482             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12483             if (arg5 != 0) {
12484                 if (target_to_host_timespec64(&ts, arg5)) {
12485                     return -TARGET_EFAULT;
12486                 }
12487                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12488                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12489                     return -TARGET_EFAULT;
12490                 }
12491             } else {
12492                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12493             }
12494             unlock_user(p, arg2, arg3);
12495         }
12496         return ret;
12497 #endif
12498 
12499 #ifdef TARGET_NR_mq_timedreceive
12500     case TARGET_NR_mq_timedreceive:
12501         {
12502             struct timespec ts;
12503             unsigned int prio;
12504 
12505             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12506             if (arg5 != 0) {
12507                 if (target_to_host_timespec(&ts, arg5)) {
12508                     return -TARGET_EFAULT;
12509                 }
12510                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12511                                                      &prio, &ts));
12512                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12513                     return -TARGET_EFAULT;
12514                 }
12515             } else {
12516                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12517                                                      &prio, NULL));
12518             }
12519             unlock_user (p, arg2, arg3);
12520             if (arg4 != 0)
12521                 put_user_u32(prio, arg4);
12522         }
12523         return ret;
12524 #endif
12525 #ifdef TARGET_NR_mq_timedreceive_time64
12526     case TARGET_NR_mq_timedreceive_time64:
12527         {
12528             struct timespec ts;
12529             unsigned int prio;
12530 
12531             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12532             if (arg5 != 0) {
12533                 if (target_to_host_timespec64(&ts, arg5)) {
12534                     return -TARGET_EFAULT;
12535                 }
12536                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12537                                                      &prio, &ts));
12538                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12539                     return -TARGET_EFAULT;
12540                 }
12541             } else {
12542                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12543                                                      &prio, NULL));
12544             }
12545             unlock_user(p, arg2, arg3);
12546             if (arg4 != 0) {
12547                 put_user_u32(prio, arg4);
12548             }
12549         }
12550         return ret;
12551 #endif
12552 
12553     /* Not implemented for now... */
12554 /*     case TARGET_NR_mq_notify: */
12555 /*         break; */
12556 
12557     case TARGET_NR_mq_getsetattr:
12558         {
12559             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12560             ret = 0;
12561             if (arg2 != 0) {
12562                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12563                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12564                                            &posix_mq_attr_out));
12565             } else if (arg3 != 0) {
12566                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12567             }
12568             if (ret == 0 && arg3 != 0) {
12569                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12570             }
12571         }
12572         return ret;
12573 #endif
12574 
12575 #ifdef CONFIG_SPLICE
12576 #ifdef TARGET_NR_tee
12577     case TARGET_NR_tee:
12578         {
12579             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12580         }
12581         return ret;
12582 #endif
12583 #ifdef TARGET_NR_splice
12584     case TARGET_NR_splice:
12585         {
12586             loff_t loff_in, loff_out;
12587             loff_t *ploff_in = NULL, *ploff_out = NULL;
12588             if (arg2) {
12589                 if (get_user_u64(loff_in, arg2)) {
12590                     return -TARGET_EFAULT;
12591                 }
12592                 ploff_in = &loff_in;
12593             }
12594             if (arg4) {
12595                 if (get_user_u64(loff_out, arg4)) {
12596                     return -TARGET_EFAULT;
12597                 }
12598                 ploff_out = &loff_out;
12599             }
12600             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12601             if (arg2) {
12602                 if (put_user_u64(loff_in, arg2)) {
12603                     return -TARGET_EFAULT;
12604                 }
12605             }
12606             if (arg4) {
12607                 if (put_user_u64(loff_out, arg4)) {
12608                     return -TARGET_EFAULT;
12609                 }
12610             }
12611         }
12612         return ret;
12613 #endif
12614 #ifdef TARGET_NR_vmsplice
12615 	case TARGET_NR_vmsplice:
12616         {
12617             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12618             if (vec != NULL) {
12619                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12620                 unlock_iovec(vec, arg2, arg3, 0);
12621             } else {
12622                 ret = -host_to_target_errno(errno);
12623             }
12624         }
12625         return ret;
12626 #endif
12627 #endif /* CONFIG_SPLICE */
12628 #ifdef CONFIG_EVENTFD
12629 #if defined(TARGET_NR_eventfd)
12630     case TARGET_NR_eventfd:
12631         ret = get_errno(eventfd(arg1, 0));
12632         if (ret >= 0) {
12633             fd_trans_register(ret, &target_eventfd_trans);
12634         }
12635         return ret;
12636 #endif
12637 #if defined(TARGET_NR_eventfd2)
12638     case TARGET_NR_eventfd2:
12639     {
12640         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12641         if (arg2 & TARGET_O_NONBLOCK) {
12642             host_flags |= O_NONBLOCK;
12643         }
12644         if (arg2 & TARGET_O_CLOEXEC) {
12645             host_flags |= O_CLOEXEC;
12646         }
12647         ret = get_errno(eventfd(arg1, host_flags));
12648         if (ret >= 0) {
12649             fd_trans_register(ret, &target_eventfd_trans);
12650         }
12651         return ret;
12652     }
12653 #endif
12654 #endif /* CONFIG_EVENTFD  */
12655 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12656     case TARGET_NR_fallocate:
12657 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12658         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12659                                   target_offset64(arg5, arg6)));
12660 #else
12661         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12662 #endif
12663         return ret;
12664 #endif
12665 #if defined(CONFIG_SYNC_FILE_RANGE)
12666 #if defined(TARGET_NR_sync_file_range)
12667     case TARGET_NR_sync_file_range:
12668 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12669 #if defined(TARGET_MIPS)
12670         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12671                                         target_offset64(arg5, arg6), arg7));
12672 #else
12673         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12674                                         target_offset64(arg4, arg5), arg6));
12675 #endif /* !TARGET_MIPS */
12676 #else
12677         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12678 #endif
12679         return ret;
12680 #endif
12681 #if defined(TARGET_NR_sync_file_range2) || \
12682     defined(TARGET_NR_arm_sync_file_range)
12683 #if defined(TARGET_NR_sync_file_range2)
12684     case TARGET_NR_sync_file_range2:
12685 #endif
12686 #if defined(TARGET_NR_arm_sync_file_range)
12687     case TARGET_NR_arm_sync_file_range:
12688 #endif
12689         /* This is like sync_file_range but the arguments are reordered */
12690 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12691         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12692                                         target_offset64(arg5, arg6), arg2));
12693 #else
12694         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12695 #endif
12696         return ret;
12697 #endif
12698 #endif
12699 #if defined(TARGET_NR_signalfd4)
12700     case TARGET_NR_signalfd4:
12701         return do_signalfd4(arg1, arg2, arg4);
12702 #endif
12703 #if defined(TARGET_NR_signalfd)
12704     case TARGET_NR_signalfd:
12705         return do_signalfd4(arg1, arg2, 0);
12706 #endif
12707 #if defined(CONFIG_EPOLL)
12708 #if defined(TARGET_NR_epoll_create)
12709     case TARGET_NR_epoll_create:
12710         return get_errno(epoll_create(arg1));
12711 #endif
12712 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12713     case TARGET_NR_epoll_create1:
12714         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12715 #endif
12716 #if defined(TARGET_NR_epoll_ctl)
12717     case TARGET_NR_epoll_ctl:
12718     {
12719         struct epoll_event ep;
12720         struct epoll_event *epp = 0;
12721         if (arg4) {
12722             if (arg2 != EPOLL_CTL_DEL) {
12723                 struct target_epoll_event *target_ep;
12724                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12725                     return -TARGET_EFAULT;
12726                 }
12727                 ep.events = tswap32(target_ep->events);
12728                 /*
12729                  * The epoll_data_t union is just opaque data to the kernel,
12730                  * so we transfer all 64 bits across and need not worry what
12731                  * actual data type it is.
12732                  */
12733                 ep.data.u64 = tswap64(target_ep->data.u64);
12734                 unlock_user_struct(target_ep, arg4, 0);
12735             }
12736             /*
12737              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12738              * non-null pointer, even though this argument is ignored.
12739              *
12740              */
12741             epp = &ep;
12742         }
12743         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12744     }
12745 #endif
12746 
12747 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12748 #if defined(TARGET_NR_epoll_wait)
12749     case TARGET_NR_epoll_wait:
12750 #endif
12751 #if defined(TARGET_NR_epoll_pwait)
12752     case TARGET_NR_epoll_pwait:
12753 #endif
12754     {
12755         struct target_epoll_event *target_ep;
12756         struct epoll_event *ep;
12757         int epfd = arg1;
12758         int maxevents = arg3;
12759         int timeout = arg4;
12760 
12761         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12762             return -TARGET_EINVAL;
12763         }
12764 
12765         target_ep = lock_user(VERIFY_WRITE, arg2,
12766                               maxevents * sizeof(struct target_epoll_event), 1);
12767         if (!target_ep) {
12768             return -TARGET_EFAULT;
12769         }
12770 
12771         ep = g_try_new(struct epoll_event, maxevents);
12772         if (!ep) {
12773             unlock_user(target_ep, arg2, 0);
12774             return -TARGET_ENOMEM;
12775         }
12776 
12777         switch (num) {
12778 #if defined(TARGET_NR_epoll_pwait)
12779         case TARGET_NR_epoll_pwait:
12780         {
12781             sigset_t *set = NULL;
12782 
12783             if (arg5) {
12784                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12785                 if (ret != 0) {
12786                     break;
12787                 }
12788             }
12789 
12790             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12791                                              set, SIGSET_T_SIZE));
12792 
12793             if (set) {
12794                 finish_sigsuspend_mask(ret);
12795             }
12796             break;
12797         }
12798 #endif
12799 #if defined(TARGET_NR_epoll_wait)
12800         case TARGET_NR_epoll_wait:
12801             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12802                                              NULL, 0));
12803             break;
12804 #endif
12805         default:
12806             ret = -TARGET_ENOSYS;
12807         }
12808         if (!is_error(ret)) {
12809             int i;
12810             for (i = 0; i < ret; i++) {
12811                 target_ep[i].events = tswap32(ep[i].events);
12812                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12813             }
12814             unlock_user(target_ep, arg2,
12815                         ret * sizeof(struct target_epoll_event));
12816         } else {
12817             unlock_user(target_ep, arg2, 0);
12818         }
12819         g_free(ep);
12820         return ret;
12821     }
12822 #endif
12823 #endif
12824 #ifdef TARGET_NR_prlimit64
12825     case TARGET_NR_prlimit64:
12826     {
12827         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12828         struct target_rlimit64 *target_rnew, *target_rold;
12829         struct host_rlimit64 rnew, rold, *rnewp = 0;
12830         int resource = target_to_host_resource(arg2);
12831 
12832         if (arg3 && (resource != RLIMIT_AS &&
12833                      resource != RLIMIT_DATA &&
12834                      resource != RLIMIT_STACK)) {
12835             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12836                 return -TARGET_EFAULT;
12837             }
12838             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12839             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12840             unlock_user_struct(target_rnew, arg3, 0);
12841             rnewp = &rnew;
12842         }
12843 
12844         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12845         if (!is_error(ret) && arg4) {
12846             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12847                 return -TARGET_EFAULT;
12848             }
12849             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12850             target_rold->rlim_max = tswap64(rold.rlim_max);
12851             unlock_user_struct(target_rold, arg4, 1);
12852         }
12853         return ret;
12854     }
12855 #endif
12856 #ifdef TARGET_NR_gethostname
12857     case TARGET_NR_gethostname:
12858     {
12859         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12860         if (name) {
12861             ret = get_errno(gethostname(name, arg2));
12862             unlock_user(name, arg1, arg2);
12863         } else {
12864             ret = -TARGET_EFAULT;
12865         }
12866         return ret;
12867     }
12868 #endif
12869 #ifdef TARGET_NR_atomic_cmpxchg_32
12870     case TARGET_NR_atomic_cmpxchg_32:
12871     {
12872         /* should use start_exclusive from main.c */
12873         abi_ulong mem_value;
12874         if (get_user_u32(mem_value, arg6)) {
12875             target_siginfo_t info;
12876             info.si_signo = SIGSEGV;
12877             info.si_errno = 0;
12878             info.si_code = TARGET_SEGV_MAPERR;
12879             info._sifields._sigfault._addr = arg6;
12880             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12881             ret = 0xdeadbeef;
12882 
12883         }
12884         if (mem_value == arg2)
12885             put_user_u32(arg1, arg6);
12886         return mem_value;
12887     }
12888 #endif
12889 #ifdef TARGET_NR_atomic_barrier
12890     case TARGET_NR_atomic_barrier:
12891         /* Like the kernel implementation and the
12892            qemu arm barrier, no-op this? */
12893         return 0;
12894 #endif
12895 
12896 #ifdef TARGET_NR_timer_create
12897     case TARGET_NR_timer_create:
12898     {
12899         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12900 
12901         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12902 
12903         int clkid = arg1;
12904         int timer_index = next_free_host_timer();
12905 
12906         if (timer_index < 0) {
12907             ret = -TARGET_EAGAIN;
12908         } else {
12909             timer_t *phtimer = g_posix_timers  + timer_index;
12910 
12911             if (arg2) {
12912                 phost_sevp = &host_sevp;
12913                 ret = target_to_host_sigevent(phost_sevp, arg2);
12914                 if (ret != 0) {
12915                     free_host_timer_slot(timer_index);
12916                     return ret;
12917                 }
12918             }
12919 
12920             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12921             if (ret) {
12922                 free_host_timer_slot(timer_index);
12923             } else {
12924                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12925                     timer_delete(*phtimer);
12926                     free_host_timer_slot(timer_index);
12927                     return -TARGET_EFAULT;
12928                 }
12929             }
12930         }
12931         return ret;
12932     }
12933 #endif
12934 
12935 #ifdef TARGET_NR_timer_settime
12936     case TARGET_NR_timer_settime:
12937     {
12938         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12939          * struct itimerspec * old_value */
12940         target_timer_t timerid = get_timer_id(arg1);
12941 
12942         if (timerid < 0) {
12943             ret = timerid;
12944         } else if (arg3 == 0) {
12945             ret = -TARGET_EINVAL;
12946         } else {
12947             timer_t htimer = g_posix_timers[timerid];
12948             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12949 
12950             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12951                 return -TARGET_EFAULT;
12952             }
12953             ret = get_errno(
12954                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12955             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12956                 return -TARGET_EFAULT;
12957             }
12958         }
12959         return ret;
12960     }
12961 #endif
12962 
12963 #ifdef TARGET_NR_timer_settime64
12964     case TARGET_NR_timer_settime64:
12965     {
12966         target_timer_t timerid = get_timer_id(arg1);
12967 
12968         if (timerid < 0) {
12969             ret = timerid;
12970         } else if (arg3 == 0) {
12971             ret = -TARGET_EINVAL;
12972         } else {
12973             timer_t htimer = g_posix_timers[timerid];
12974             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12975 
12976             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12977                 return -TARGET_EFAULT;
12978             }
12979             ret = get_errno(
12980                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12981             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12982                 return -TARGET_EFAULT;
12983             }
12984         }
12985         return ret;
12986     }
12987 #endif
12988 
12989 #ifdef TARGET_NR_timer_gettime
12990     case TARGET_NR_timer_gettime:
12991     {
12992         /* args: timer_t timerid, struct itimerspec *curr_value */
12993         target_timer_t timerid = get_timer_id(arg1);
12994 
12995         if (timerid < 0) {
12996             ret = timerid;
12997         } else if (!arg2) {
12998             ret = -TARGET_EFAULT;
12999         } else {
13000             timer_t htimer = g_posix_timers[timerid];
13001             struct itimerspec hspec;
13002             ret = get_errno(timer_gettime(htimer, &hspec));
13003 
13004             if (host_to_target_itimerspec(arg2, &hspec)) {
13005                 ret = -TARGET_EFAULT;
13006             }
13007         }
13008         return ret;
13009     }
13010 #endif
13011 
13012 #ifdef TARGET_NR_timer_gettime64
13013     case TARGET_NR_timer_gettime64:
13014     {
13015         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13016         target_timer_t timerid = get_timer_id(arg1);
13017 
13018         if (timerid < 0) {
13019             ret = timerid;
13020         } else if (!arg2) {
13021             ret = -TARGET_EFAULT;
13022         } else {
13023             timer_t htimer = g_posix_timers[timerid];
13024             struct itimerspec hspec;
13025             ret = get_errno(timer_gettime(htimer, &hspec));
13026 
13027             if (host_to_target_itimerspec64(arg2, &hspec)) {
13028                 ret = -TARGET_EFAULT;
13029             }
13030         }
13031         return ret;
13032     }
13033 #endif
13034 
13035 #ifdef TARGET_NR_timer_getoverrun
13036     case TARGET_NR_timer_getoverrun:
13037     {
13038         /* args: timer_t timerid */
13039         target_timer_t timerid = get_timer_id(arg1);
13040 
13041         if (timerid < 0) {
13042             ret = timerid;
13043         } else {
13044             timer_t htimer = g_posix_timers[timerid];
13045             ret = get_errno(timer_getoverrun(htimer));
13046         }
13047         return ret;
13048     }
13049 #endif
13050 
13051 #ifdef TARGET_NR_timer_delete
13052     case TARGET_NR_timer_delete:
13053     {
13054         /* args: timer_t timerid */
13055         target_timer_t timerid = get_timer_id(arg1);
13056 
13057         if (timerid < 0) {
13058             ret = timerid;
13059         } else {
13060             timer_t htimer = g_posix_timers[timerid];
13061             ret = get_errno(timer_delete(htimer));
13062             free_host_timer_slot(timerid);
13063         }
13064         return ret;
13065     }
13066 #endif
13067 
13068 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13069     case TARGET_NR_timerfd_create:
13070         return get_errno(timerfd_create(arg1,
13071                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13072 #endif
13073 
13074 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13075     case TARGET_NR_timerfd_gettime:
13076         {
13077             struct itimerspec its_curr;
13078 
13079             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13080 
13081             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13082                 return -TARGET_EFAULT;
13083             }
13084         }
13085         return ret;
13086 #endif
13087 
13088 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13089     case TARGET_NR_timerfd_gettime64:
13090         {
13091             struct itimerspec its_curr;
13092 
13093             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13094 
13095             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13096                 return -TARGET_EFAULT;
13097             }
13098         }
13099         return ret;
13100 #endif
13101 
13102 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13103     case TARGET_NR_timerfd_settime:
13104         {
13105             struct itimerspec its_new, its_old, *p_new;
13106 
13107             if (arg3) {
13108                 if (target_to_host_itimerspec(&its_new, arg3)) {
13109                     return -TARGET_EFAULT;
13110                 }
13111                 p_new = &its_new;
13112             } else {
13113                 p_new = NULL;
13114             }
13115 
13116             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13117 
13118             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13119                 return -TARGET_EFAULT;
13120             }
13121         }
13122         return ret;
13123 #endif
13124 
13125 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13126     case TARGET_NR_timerfd_settime64:
13127         {
13128             struct itimerspec its_new, its_old, *p_new;
13129 
13130             if (arg3) {
13131                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13132                     return -TARGET_EFAULT;
13133                 }
13134                 p_new = &its_new;
13135             } else {
13136                 p_new = NULL;
13137             }
13138 
13139             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13140 
13141             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13142                 return -TARGET_EFAULT;
13143             }
13144         }
13145         return ret;
13146 #endif
13147 
13148 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13149     case TARGET_NR_ioprio_get:
13150         return get_errno(ioprio_get(arg1, arg2));
13151 #endif
13152 
13153 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13154     case TARGET_NR_ioprio_set:
13155         return get_errno(ioprio_set(arg1, arg2, arg3));
13156 #endif
13157 
13158 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13159     case TARGET_NR_setns:
13160         return get_errno(setns(arg1, arg2));
13161 #endif
13162 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13163     case TARGET_NR_unshare:
13164         return get_errno(unshare(arg1));
13165 #endif
13166 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13167     case TARGET_NR_kcmp:
13168         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13169 #endif
13170 #ifdef TARGET_NR_swapcontext
13171     case TARGET_NR_swapcontext:
13172         /* PowerPC specific.  */
13173         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13174 #endif
13175 #ifdef TARGET_NR_memfd_create
13176     case TARGET_NR_memfd_create:
13177         p = lock_user_string(arg1);
13178         if (!p) {
13179             return -TARGET_EFAULT;
13180         }
13181         ret = get_errno(memfd_create(p, arg2));
13182         fd_trans_unregister(ret);
13183         unlock_user(p, arg1, 0);
13184         return ret;
13185 #endif
13186 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13187     case TARGET_NR_membarrier:
13188         return get_errno(membarrier(arg1, arg2));
13189 #endif
13190 
13191 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13192     case TARGET_NR_copy_file_range:
13193         {
13194             loff_t inoff, outoff;
13195             loff_t *pinoff = NULL, *poutoff = NULL;
13196 
13197             if (arg2) {
13198                 if (get_user_u64(inoff, arg2)) {
13199                     return -TARGET_EFAULT;
13200                 }
13201                 pinoff = &inoff;
13202             }
13203             if (arg4) {
13204                 if (get_user_u64(outoff, arg4)) {
13205                     return -TARGET_EFAULT;
13206                 }
13207                 poutoff = &outoff;
13208             }
13209             /* Do not sign-extend the count parameter. */
13210             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13211                                                  (abi_ulong)arg5, arg6));
13212             if (!is_error(ret) && ret > 0) {
13213                 if (arg2) {
13214                     if (put_user_u64(inoff, arg2)) {
13215                         return -TARGET_EFAULT;
13216                     }
13217                 }
13218                 if (arg4) {
13219                     if (put_user_u64(outoff, arg4)) {
13220                         return -TARGET_EFAULT;
13221                     }
13222                 }
13223             }
13224         }
13225         return ret;
13226 #endif
13227 
13228 #if defined(TARGET_NR_pivot_root)
13229     case TARGET_NR_pivot_root:
13230         {
13231             void *p2;
13232             p = lock_user_string(arg1); /* new_root */
13233             p2 = lock_user_string(arg2); /* put_old */
13234             if (!p || !p2) {
13235                 ret = -TARGET_EFAULT;
13236             } else {
13237                 ret = get_errno(pivot_root(p, p2));
13238             }
13239             unlock_user(p2, arg2, 0);
13240             unlock_user(p, arg1, 0);
13241         }
13242         return ret;
13243 #endif
13244 
13245     default:
13246         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13247         return -TARGET_ENOSYS;
13248     }
13249     return ret;
13250 }
13251 
13252 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13253                     abi_long arg2, abi_long arg3, abi_long arg4,
13254                     abi_long arg5, abi_long arg6, abi_long arg7,
13255                     abi_long arg8)
13256 {
13257     CPUState *cpu = env_cpu(cpu_env);
13258     abi_long ret;
13259 
13260 #ifdef DEBUG_ERESTARTSYS
13261     /* Debug-only code for exercising the syscall-restart code paths
13262      * in the per-architecture cpu main loops: restart every syscall
13263      * the guest makes once before letting it through.
13264      */
13265     {
13266         static bool flag;
13267         flag = !flag;
13268         if (flag) {
13269             return -QEMU_ERESTARTSYS;
13270         }
13271     }
13272 #endif
13273 
13274     record_syscall_start(cpu, num, arg1,
13275                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13276 
13277     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13278         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13279     }
13280 
13281     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13282                       arg5, arg6, arg7, arg8);
13283 
13284     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13285         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13286                           arg3, arg4, arg5, arg6);
13287     }
13288 
13289     record_syscall_return(cpu, num, ret);
13290     return ret;
13291 }
13292