xref: /openbmc/qemu/linux-user/syscall.c (revision 3ea85609)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
135 #include "tcg/tcg.h"
136 
137 #ifndef CLONE_IO
138 #define CLONE_IO                0x80000000      /* Clone io context */
139 #endif
140 
141 /* We can't directly call the host clone syscall, because this will
142  * badly confuse libc (breaking mutexes, for example). So we must
143  * divide clone flags into:
144  *  * flag combinations that look like pthread_create()
145  *  * flag combinations that look like fork()
146  *  * flags we can implement within QEMU itself
147  *  * flags we can't support and will return an error for
148  */
149 /* For thread creation, all these flags must be present; for
150  * fork, none must be present.
151  */
152 #define CLONE_THREAD_FLAGS                              \
153     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
154      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
155 
156 /* These flags are ignored:
157  * CLONE_DETACHED is now ignored by the kernel;
158  * CLONE_IO is just an optimisation hint to the I/O scheduler
159  */
160 #define CLONE_IGNORED_FLAGS                     \
161     (CLONE_DETACHED | CLONE_IO)
162 
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS               \
165     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
166      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
167 
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
170     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
171      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
172 
173 #define CLONE_INVALID_FORK_FLAGS                                        \
174     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
175 
176 #define CLONE_INVALID_THREAD_FLAGS                                      \
177     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
178        CLONE_IGNORED_FLAGS))
179 
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181  * have almost all been allocated. We cannot support any of
182  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184  * The checks against the invalid thread masks above will catch these.
185  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
186  */
187 
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189  * once. This exercises the codepaths for restart.
190  */
191 //#define DEBUG_ERESTARTSYS
192 
193 //#include <linux/msdos_fs.h>
194 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
195 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
196 
197 #undef _syscall0
198 #undef _syscall1
199 #undef _syscall2
200 #undef _syscall3
201 #undef _syscall4
202 #undef _syscall5
203 #undef _syscall6
204 
205 #define _syscall0(type,name)		\
206 static type name (void)			\
207 {					\
208 	return syscall(__NR_##name);	\
209 }
210 
211 #define _syscall1(type,name,type1,arg1)		\
212 static type name (type1 arg1)			\
213 {						\
214 	return syscall(__NR_##name, arg1);	\
215 }
216 
217 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
218 static type name (type1 arg1,type2 arg2)		\
219 {							\
220 	return syscall(__NR_##name, arg1, arg2);	\
221 }
222 
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
224 static type name (type1 arg1,type2 arg2,type3 arg3)		\
225 {								\
226 	return syscall(__NR_##name, arg1, arg2, arg3);		\
227 }
228 
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
231 {										\
232 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
233 }
234 
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236 		  type5,arg5)							\
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
238 {										\
239 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
240 }
241 
242 
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
244 		  type5,arg5,type6,arg6)					\
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
246                   type6 arg6)							\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
249 }
250 
251 
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
262 #endif
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
265 #endif
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
270 
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
273 #endif
274 
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
278 #endif
279 
280 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281 #ifndef TARGET_O_NONBLOCK_MASK
282 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
283 #endif
284 
285 #define __NR_sys_gettid __NR_gettid
286 _syscall0(int, sys_gettid)
287 
288 /* For the 64-bit guest on 32-bit host case we must emulate
289  * getdents using getdents64, because otherwise the host
290  * might hand us back more dirent records than we can fit
291  * into the guest buffer after structure format conversion.
292  * Otherwise we emulate getdents with getdents if the host has it.
293  */
294 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295 #define EMULATE_GETDENTS_WITH_GETDENTS
296 #endif
297 
298 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
300 #endif
301 #if (defined(TARGET_NR_getdents) && \
302       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
305 #endif
306 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
308           loff_t *, res, uint, wh);
309 #endif
310 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
311 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
312           siginfo_t *, uinfo)
313 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
314 #ifdef __NR_exit_group
315 _syscall1(int,exit_group,int,error_code)
316 #endif
317 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318 _syscall1(int,set_tid_address,int *,tidptr)
319 #endif
320 #if defined(__NR_futex)
321 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
322           const struct timespec *,timeout,int *,uaddr2,int,val3)
323 #endif
324 #if defined(__NR_futex_time64)
325 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
326           const struct timespec *,timeout,int *,uaddr2,int,val3)
327 #endif
328 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
330           unsigned long *, user_mask_ptr);
331 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
333           unsigned long *, user_mask_ptr);
334 #define __NR_sys_getcpu __NR_getcpu
335 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
336 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
337           void *, arg);
338 _syscall2(int, capget, struct __user_cap_header_struct *, header,
339           struct __user_cap_data_struct *, data);
340 _syscall2(int, capset, struct __user_cap_header_struct *, header,
341           struct __user_cap_data_struct *, data);
342 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343 _syscall2(int, ioprio_get, int, which, int, who)
344 #endif
345 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
347 #endif
348 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
350 #endif
351 
352 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
354           unsigned long, idx1, unsigned long, idx2)
355 #endif
356 
357 /*
358  * It is assumed that struct statx is architecture independent.
359  */
360 #if defined(TARGET_NR_statx) && defined(__NR_statx)
361 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
362           unsigned int, mask, struct target_statx *, statxbuf)
363 #endif
364 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365 _syscall2(int, membarrier, int, cmd, int, flags)
366 #endif
367 
368 static bitmask_transtbl fcntl_flags_tbl[] = {
369   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
370   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
371   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
372   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
373   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
374   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
375   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
376   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
377   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
378   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
379   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
380   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
381   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
382 #if defined(O_DIRECT)
383   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
384 #endif
385 #if defined(O_NOATIME)
386   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
387 #endif
388 #if defined(O_CLOEXEC)
389   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
390 #endif
391 #if defined(O_PATH)
392   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
393 #endif
394 #if defined(O_TMPFILE)
395   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
396 #endif
397   /* Don't terminate the list prematurely on 64-bit host+guest.  */
398 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
400 #endif
401   { 0, 0, 0, 0 }
402 };
403 
404 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
405 
406 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407 #if defined(__NR_utimensat)
408 #define __NR_sys_utimensat __NR_utimensat
409 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
410           const struct timespec *,tsp,int,flags)
411 #else
412 static int sys_utimensat(int dirfd, const char *pathname,
413                          const struct timespec times[2], int flags)
414 {
415     errno = ENOSYS;
416     return -1;
417 }
418 #endif
419 #endif /* TARGET_NR_utimensat */
420 
421 #ifdef TARGET_NR_renameat2
422 #if defined(__NR_renameat2)
423 #define __NR_sys_renameat2 __NR_renameat2
424 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
425           const char *, new, unsigned int, flags)
426 #else
427 static int sys_renameat2(int oldfd, const char *old,
428                          int newfd, const char *new, int flags)
429 {
430     if (flags == 0) {
431         return renameat(oldfd, old, newfd, new);
432     }
433     errno = ENOSYS;
434     return -1;
435 }
436 #endif
437 #endif /* TARGET_NR_renameat2 */
438 
439 #ifdef CONFIG_INOTIFY
440 #include <sys/inotify.h>
441 
442 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443 static int sys_inotify_init(void)
444 {
445   return (inotify_init());
446 }
447 #endif
448 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
450 {
451   return (inotify_add_watch(fd, pathname, mask));
452 }
453 #endif
454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455 static int sys_inotify_rm_watch(int fd, int32_t wd)
456 {
457   return (inotify_rm_watch(fd, wd));
458 }
459 #endif
460 #ifdef CONFIG_INOTIFY1
461 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462 static int sys_inotify_init1(int flags)
463 {
464   return (inotify_init1(flags));
465 }
466 #endif
467 #endif
468 #else
469 /* Userspace can usually survive runtime without inotify */
470 #undef TARGET_NR_inotify_init
471 #undef TARGET_NR_inotify_init1
472 #undef TARGET_NR_inotify_add_watch
473 #undef TARGET_NR_inotify_rm_watch
474 #endif /* CONFIG_INOTIFY  */
475 
476 #if defined(TARGET_NR_prlimit64)
477 #ifndef __NR_prlimit64
478 # define __NR_prlimit64 -1
479 #endif
480 #define __NR_sys_prlimit64 __NR_prlimit64
481 /* The glibc rlimit structure may not be that used by the underlying syscall */
482 struct host_rlimit64 {
483     uint64_t rlim_cur;
484     uint64_t rlim_max;
485 };
486 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
487           const struct host_rlimit64 *, new_limit,
488           struct host_rlimit64 *, old_limit)
489 #endif
490 
491 
492 #if defined(TARGET_NR_timer_create)
493 /* Maximum of 32 active POSIX timers allowed at any one time. */
494 static timer_t g_posix_timers[32] = { 0, } ;
495 
496 static inline int next_free_host_timer(void)
497 {
498     int k ;
499     /* FIXME: Does finding the next free slot require a lock? */
500     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
501         if (g_posix_timers[k] == 0) {
502             g_posix_timers[k] = (timer_t) 1;
503             return k;
504         }
505     }
506     return -1;
507 }
508 #endif
509 
510 #define ERRNO_TABLE_SIZE 1200
511 
512 /* target_to_host_errno_table[] is initialized from
513  * host_to_target_errno_table[] in syscall_init(). */
514 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
515 };
516 
517 /*
518  * This list is the union of errno values overridden in asm-<arch>/errno.h
519  * minus the errnos that are not actually generic to all archs.
520  */
521 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
522     [EAGAIN]		= TARGET_EAGAIN,
523     [EIDRM]		= TARGET_EIDRM,
524     [ECHRNG]		= TARGET_ECHRNG,
525     [EL2NSYNC]		= TARGET_EL2NSYNC,
526     [EL3HLT]		= TARGET_EL3HLT,
527     [EL3RST]		= TARGET_EL3RST,
528     [ELNRNG]		= TARGET_ELNRNG,
529     [EUNATCH]		= TARGET_EUNATCH,
530     [ENOCSI]		= TARGET_ENOCSI,
531     [EL2HLT]		= TARGET_EL2HLT,
532     [EDEADLK]		= TARGET_EDEADLK,
533     [ENOLCK]		= TARGET_ENOLCK,
534     [EBADE]		= TARGET_EBADE,
535     [EBADR]		= TARGET_EBADR,
536     [EXFULL]		= TARGET_EXFULL,
537     [ENOANO]		= TARGET_ENOANO,
538     [EBADRQC]		= TARGET_EBADRQC,
539     [EBADSLT]		= TARGET_EBADSLT,
540     [EBFONT]		= TARGET_EBFONT,
541     [ENOSTR]		= TARGET_ENOSTR,
542     [ENODATA]		= TARGET_ENODATA,
543     [ETIME]		= TARGET_ETIME,
544     [ENOSR]		= TARGET_ENOSR,
545     [ENONET]		= TARGET_ENONET,
546     [ENOPKG]		= TARGET_ENOPKG,
547     [EREMOTE]		= TARGET_EREMOTE,
548     [ENOLINK]		= TARGET_ENOLINK,
549     [EADV]		= TARGET_EADV,
550     [ESRMNT]		= TARGET_ESRMNT,
551     [ECOMM]		= TARGET_ECOMM,
552     [EPROTO]		= TARGET_EPROTO,
553     [EDOTDOT]		= TARGET_EDOTDOT,
554     [EMULTIHOP]		= TARGET_EMULTIHOP,
555     [EBADMSG]		= TARGET_EBADMSG,
556     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
557     [EOVERFLOW]		= TARGET_EOVERFLOW,
558     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
559     [EBADFD]		= TARGET_EBADFD,
560     [EREMCHG]		= TARGET_EREMCHG,
561     [ELIBACC]		= TARGET_ELIBACC,
562     [ELIBBAD]		= TARGET_ELIBBAD,
563     [ELIBSCN]		= TARGET_ELIBSCN,
564     [ELIBMAX]		= TARGET_ELIBMAX,
565     [ELIBEXEC]		= TARGET_ELIBEXEC,
566     [EILSEQ]		= TARGET_EILSEQ,
567     [ENOSYS]		= TARGET_ENOSYS,
568     [ELOOP]		= TARGET_ELOOP,
569     [ERESTART]		= TARGET_ERESTART,
570     [ESTRPIPE]		= TARGET_ESTRPIPE,
571     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
572     [EUSERS]		= TARGET_EUSERS,
573     [ENOTSOCK]		= TARGET_ENOTSOCK,
574     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
575     [EMSGSIZE]		= TARGET_EMSGSIZE,
576     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
577     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
578     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
579     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
580     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
581     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
582     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
583     [EADDRINUSE]	= TARGET_EADDRINUSE,
584     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
585     [ENETDOWN]		= TARGET_ENETDOWN,
586     [ENETUNREACH]	= TARGET_ENETUNREACH,
587     [ENETRESET]		= TARGET_ENETRESET,
588     [ECONNABORTED]	= TARGET_ECONNABORTED,
589     [ECONNRESET]	= TARGET_ECONNRESET,
590     [ENOBUFS]		= TARGET_ENOBUFS,
591     [EISCONN]		= TARGET_EISCONN,
592     [ENOTCONN]		= TARGET_ENOTCONN,
593     [EUCLEAN]		= TARGET_EUCLEAN,
594     [ENOTNAM]		= TARGET_ENOTNAM,
595     [ENAVAIL]		= TARGET_ENAVAIL,
596     [EISNAM]		= TARGET_EISNAM,
597     [EREMOTEIO]		= TARGET_EREMOTEIO,
598     [EDQUOT]            = TARGET_EDQUOT,
599     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
600     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
601     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
602     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
603     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
604     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
605     [EALREADY]		= TARGET_EALREADY,
606     [EINPROGRESS]	= TARGET_EINPROGRESS,
607     [ESTALE]		= TARGET_ESTALE,
608     [ECANCELED]		= TARGET_ECANCELED,
609     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
610     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
611 #ifdef ENOKEY
612     [ENOKEY]		= TARGET_ENOKEY,
613 #endif
614 #ifdef EKEYEXPIRED
615     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
616 #endif
617 #ifdef EKEYREVOKED
618     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
619 #endif
620 #ifdef EKEYREJECTED
621     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
622 #endif
623 #ifdef EOWNERDEAD
624     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
625 #endif
626 #ifdef ENOTRECOVERABLE
627     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
628 #endif
629 #ifdef ENOMSG
630     [ENOMSG]            = TARGET_ENOMSG,
631 #endif
632 #ifdef ERKFILL
633     [ERFKILL]           = TARGET_ERFKILL,
634 #endif
635 #ifdef EHWPOISON
636     [EHWPOISON]         = TARGET_EHWPOISON,
637 #endif
638 };
639 
640 static inline int host_to_target_errno(int err)
641 {
642     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
643         host_to_target_errno_table[err]) {
644         return host_to_target_errno_table[err];
645     }
646     return err;
647 }
648 
649 static inline int target_to_host_errno(int err)
650 {
651     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
652         target_to_host_errno_table[err]) {
653         return target_to_host_errno_table[err];
654     }
655     return err;
656 }
657 
658 static inline abi_long get_errno(abi_long ret)
659 {
660     if (ret == -1)
661         return -host_to_target_errno(errno);
662     else
663         return ret;
664 }
665 
666 const char *target_strerror(int err)
667 {
668     if (err == TARGET_ERESTARTSYS) {
669         return "To be restarted";
670     }
671     if (err == TARGET_QEMU_ESIGRETURN) {
672         return "Successful exit from sigreturn";
673     }
674 
675     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
676         return NULL;
677     }
678     return strerror(target_to_host_errno(err));
679 }
680 
681 #define safe_syscall0(type, name) \
682 static type safe_##name(void) \
683 { \
684     return safe_syscall(__NR_##name); \
685 }
686 
687 #define safe_syscall1(type, name, type1, arg1) \
688 static type safe_##name(type1 arg1) \
689 { \
690     return safe_syscall(__NR_##name, arg1); \
691 }
692 
693 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
694 static type safe_##name(type1 arg1, type2 arg2) \
695 { \
696     return safe_syscall(__NR_##name, arg1, arg2); \
697 }
698 
699 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
701 { \
702     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
703 }
704 
705 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
706     type4, arg4) \
707 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
708 { \
709     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
710 }
711 
712 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
713     type4, arg4, type5, arg5) \
714 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
715     type5 arg5) \
716 { \
717     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
718 }
719 
720 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
721     type4, arg4, type5, arg5, type6, arg6) \
722 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
723     type5 arg5, type6 arg6) \
724 { \
725     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
726 }
727 
728 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
729 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
730 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
731               int, flags, mode_t, mode)
732 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
733 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
734               struct rusage *, rusage)
735 #endif
736 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
737               int, options, struct rusage *, rusage)
738 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
739 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
740     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
741 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
742               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
743 #endif
744 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
745 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
746               struct timespec *, tsp, const sigset_t *, sigmask,
747               size_t, sigsetsize)
748 #endif
749 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
750               int, maxevents, int, timeout, const sigset_t *, sigmask,
751               size_t, sigsetsize)
752 #if defined(__NR_futex)
753 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
754               const struct timespec *,timeout,int *,uaddr2,int,val3)
755 #endif
756 #if defined(__NR_futex_time64)
757 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
758               const struct timespec *,timeout,int *,uaddr2,int,val3)
759 #endif
760 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
761 safe_syscall2(int, kill, pid_t, pid, int, sig)
762 safe_syscall2(int, tkill, int, tid, int, sig)
763 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
764 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
765 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
766 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
767               unsigned long, pos_l, unsigned long, pos_h)
768 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
769               unsigned long, pos_l, unsigned long, pos_h)
770 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
771               socklen_t, addrlen)
772 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
773               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
774 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
775               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
776 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
777 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
778 safe_syscall2(int, flock, int, fd, int, operation)
779 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
780 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
781               const struct timespec *, uts, size_t, sigsetsize)
782 #endif
783 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
784               int, flags)
785 #if defined(TARGET_NR_nanosleep)
786 safe_syscall2(int, nanosleep, const struct timespec *, req,
787               struct timespec *, rem)
788 #endif
789 #if defined(TARGET_NR_clock_nanosleep) || \
790     defined(TARGET_NR_clock_nanosleep_time64)
791 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
792               const struct timespec *, req, struct timespec *, rem)
793 #endif
794 #ifdef __NR_ipc
795 #ifdef __s390x__
796 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
797               void *, ptr)
798 #else
799 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
800               void *, ptr, long, fifth)
801 #endif
802 #endif
803 #ifdef __NR_msgsnd
804 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
805               int, flags)
806 #endif
807 #ifdef __NR_msgrcv
808 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
809               long, msgtype, int, flags)
810 #endif
811 #ifdef __NR_semtimedop
812 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
813               unsigned, nsops, const struct timespec *, timeout)
814 #endif
815 #if defined(TARGET_NR_mq_timedsend) || \
816     defined(TARGET_NR_mq_timedsend_time64)
817 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
818               size_t, len, unsigned, prio, const struct timespec *, timeout)
819 #endif
820 #if defined(TARGET_NR_mq_timedreceive) || \
821     defined(TARGET_NR_mq_timedreceive_time64)
822 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
823               size_t, len, unsigned *, prio, const struct timespec *, timeout)
824 #endif
825 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
826 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
827               int, outfd, loff_t *, poutoff, size_t, length,
828               unsigned int, flags)
829 #endif
830 
831 /* We do ioctl like this rather than via safe_syscall3 to preserve the
832  * "third argument might be integer or pointer or not present" behaviour of
833  * the libc function.
834  */
835 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
836 /* Similarly for fcntl. Note that callers must always:
837  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
838  *  use the flock64 struct rather than unsuffixed flock
839  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
840  */
841 #ifdef __NR_fcntl64
842 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
843 #else
844 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
845 #endif
846 
847 static inline int host_to_target_sock_type(int host_type)
848 {
849     int target_type;
850 
851     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
852     case SOCK_DGRAM:
853         target_type = TARGET_SOCK_DGRAM;
854         break;
855     case SOCK_STREAM:
856         target_type = TARGET_SOCK_STREAM;
857         break;
858     default:
859         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
860         break;
861     }
862 
863 #if defined(SOCK_CLOEXEC)
864     if (host_type & SOCK_CLOEXEC) {
865         target_type |= TARGET_SOCK_CLOEXEC;
866     }
867 #endif
868 
869 #if defined(SOCK_NONBLOCK)
870     if (host_type & SOCK_NONBLOCK) {
871         target_type |= TARGET_SOCK_NONBLOCK;
872     }
873 #endif
874 
875     return target_type;
876 }
877 
878 static abi_ulong target_brk;
879 static abi_ulong target_original_brk;
880 static abi_ulong brk_page;
881 
882 void target_set_brk(abi_ulong new_brk)
883 {
884     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
885     brk_page = HOST_PAGE_ALIGN(target_brk);
886 }
887 
888 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
889 #define DEBUGF_BRK(message, args...)
890 
891 /* do_brk() must return target values and target errnos. */
892 abi_long do_brk(abi_ulong new_brk)
893 {
894     abi_long mapped_addr;
895     abi_ulong new_alloc_size;
896 
897     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
898 
899     if (!new_brk) {
900         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
901         return target_brk;
902     }
903     if (new_brk < target_original_brk) {
904         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
905                    target_brk);
906         return target_brk;
907     }
908 
909     /* If the new brk is less than the highest page reserved to the
910      * target heap allocation, set it and we're almost done...  */
911     if (new_brk <= brk_page) {
912         /* Heap contents are initialized to zero, as for anonymous
913          * mapped pages.  */
914         if (new_brk > target_brk) {
915             memset(g2h(target_brk), 0, new_brk - target_brk);
916         }
917 	target_brk = new_brk;
918         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
919 	return target_brk;
920     }
921 
922     /* We need to allocate more memory after the brk... Note that
923      * we don't use MAP_FIXED because that will map over the top of
924      * any existing mapping (like the one with the host libc or qemu
925      * itself); instead we treat "mapped but at wrong address" as
926      * a failure and unmap again.
927      */
928     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
929     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
930                                         PROT_READ|PROT_WRITE,
931                                         MAP_ANON|MAP_PRIVATE, 0, 0));
932 
933     if (mapped_addr == brk_page) {
934         /* Heap contents are initialized to zero, as for anonymous
935          * mapped pages.  Technically the new pages are already
936          * initialized to zero since they *are* anonymous mapped
937          * pages, however we have to take care with the contents that
938          * come from the remaining part of the previous page: it may
939          * contains garbage data due to a previous heap usage (grown
940          * then shrunken).  */
941         memset(g2h(target_brk), 0, brk_page - target_brk);
942 
943         target_brk = new_brk;
944         brk_page = HOST_PAGE_ALIGN(target_brk);
945         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
946             target_brk);
947         return target_brk;
948     } else if (mapped_addr != -1) {
949         /* Mapped but at wrong address, meaning there wasn't actually
950          * enough space for this brk.
951          */
952         target_munmap(mapped_addr, new_alloc_size);
953         mapped_addr = -1;
954         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
955     }
956     else {
957         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
958     }
959 
960 #if defined(TARGET_ALPHA)
961     /* We (partially) emulate OSF/1 on Alpha, which requires we
962        return a proper errno, not an unchanged brk value.  */
963     return -TARGET_ENOMEM;
964 #endif
965     /* For everything else, return the previous break. */
966     return target_brk;
967 }
968 
969 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
970     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
971 static inline abi_long copy_from_user_fdset(fd_set *fds,
972                                             abi_ulong target_fds_addr,
973                                             int n)
974 {
975     int i, nw, j, k;
976     abi_ulong b, *target_fds;
977 
978     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
979     if (!(target_fds = lock_user(VERIFY_READ,
980                                  target_fds_addr,
981                                  sizeof(abi_ulong) * nw,
982                                  1)))
983         return -TARGET_EFAULT;
984 
985     FD_ZERO(fds);
986     k = 0;
987     for (i = 0; i < nw; i++) {
988         /* grab the abi_ulong */
989         __get_user(b, &target_fds[i]);
990         for (j = 0; j < TARGET_ABI_BITS; j++) {
991             /* check the bit inside the abi_ulong */
992             if ((b >> j) & 1)
993                 FD_SET(k, fds);
994             k++;
995         }
996     }
997 
998     unlock_user(target_fds, target_fds_addr, 0);
999 
1000     return 0;
1001 }
1002 
1003 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1004                                                  abi_ulong target_fds_addr,
1005                                                  int n)
1006 {
1007     if (target_fds_addr) {
1008         if (copy_from_user_fdset(fds, target_fds_addr, n))
1009             return -TARGET_EFAULT;
1010         *fds_ptr = fds;
1011     } else {
1012         *fds_ptr = NULL;
1013     }
1014     return 0;
1015 }
1016 
1017 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1018                                           const fd_set *fds,
1019                                           int n)
1020 {
1021     int i, nw, j, k;
1022     abi_long v;
1023     abi_ulong *target_fds;
1024 
1025     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1026     if (!(target_fds = lock_user(VERIFY_WRITE,
1027                                  target_fds_addr,
1028                                  sizeof(abi_ulong) * nw,
1029                                  0)))
1030         return -TARGET_EFAULT;
1031 
1032     k = 0;
1033     for (i = 0; i < nw; i++) {
1034         v = 0;
1035         for (j = 0; j < TARGET_ABI_BITS; j++) {
1036             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1037             k++;
1038         }
1039         __put_user(v, &target_fds[i]);
1040     }
1041 
1042     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1043 
1044     return 0;
1045 }
1046 #endif
1047 
1048 #if defined(__alpha__)
1049 #define HOST_HZ 1024
1050 #else
1051 #define HOST_HZ 100
1052 #endif
1053 
1054 static inline abi_long host_to_target_clock_t(long ticks)
1055 {
1056 #if HOST_HZ == TARGET_HZ
1057     return ticks;
1058 #else
1059     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1060 #endif
1061 }
1062 
1063 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1064                                              const struct rusage *rusage)
1065 {
1066     struct target_rusage *target_rusage;
1067 
1068     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1069         return -TARGET_EFAULT;
1070     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1071     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1072     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1073     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1074     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1075     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1076     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1077     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1078     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1079     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1080     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1081     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1082     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1083     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1084     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1085     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1086     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1087     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1088     unlock_user_struct(target_rusage, target_addr, 1);
1089 
1090     return 0;
1091 }
1092 
1093 #ifdef TARGET_NR_setrlimit
1094 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1095 {
1096     abi_ulong target_rlim_swap;
1097     rlim_t result;
1098 
1099     target_rlim_swap = tswapal(target_rlim);
1100     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1101         return RLIM_INFINITY;
1102 
1103     result = target_rlim_swap;
1104     if (target_rlim_swap != (rlim_t)result)
1105         return RLIM_INFINITY;
1106 
1107     return result;
1108 }
1109 #endif
1110 
1111 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1112 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1113 {
1114     abi_ulong target_rlim_swap;
1115     abi_ulong result;
1116 
1117     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1118         target_rlim_swap = TARGET_RLIM_INFINITY;
1119     else
1120         target_rlim_swap = rlim;
1121     result = tswapal(target_rlim_swap);
1122 
1123     return result;
1124 }
1125 #endif
1126 
1127 static inline int target_to_host_resource(int code)
1128 {
1129     switch (code) {
1130     case TARGET_RLIMIT_AS:
1131         return RLIMIT_AS;
1132     case TARGET_RLIMIT_CORE:
1133         return RLIMIT_CORE;
1134     case TARGET_RLIMIT_CPU:
1135         return RLIMIT_CPU;
1136     case TARGET_RLIMIT_DATA:
1137         return RLIMIT_DATA;
1138     case TARGET_RLIMIT_FSIZE:
1139         return RLIMIT_FSIZE;
1140     case TARGET_RLIMIT_LOCKS:
1141         return RLIMIT_LOCKS;
1142     case TARGET_RLIMIT_MEMLOCK:
1143         return RLIMIT_MEMLOCK;
1144     case TARGET_RLIMIT_MSGQUEUE:
1145         return RLIMIT_MSGQUEUE;
1146     case TARGET_RLIMIT_NICE:
1147         return RLIMIT_NICE;
1148     case TARGET_RLIMIT_NOFILE:
1149         return RLIMIT_NOFILE;
1150     case TARGET_RLIMIT_NPROC:
1151         return RLIMIT_NPROC;
1152     case TARGET_RLIMIT_RSS:
1153         return RLIMIT_RSS;
1154     case TARGET_RLIMIT_RTPRIO:
1155         return RLIMIT_RTPRIO;
1156     case TARGET_RLIMIT_SIGPENDING:
1157         return RLIMIT_SIGPENDING;
1158     case TARGET_RLIMIT_STACK:
1159         return RLIMIT_STACK;
1160     default:
1161         return code;
1162     }
1163 }
1164 
1165 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1166                                               abi_ulong target_tv_addr)
1167 {
1168     struct target_timeval *target_tv;
1169 
1170     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1171         return -TARGET_EFAULT;
1172     }
1173 
1174     __get_user(tv->tv_sec, &target_tv->tv_sec);
1175     __get_user(tv->tv_usec, &target_tv->tv_usec);
1176 
1177     unlock_user_struct(target_tv, target_tv_addr, 0);
1178 
1179     return 0;
1180 }
1181 
1182 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1183                                             const struct timeval *tv)
1184 {
1185     struct target_timeval *target_tv;
1186 
1187     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1188         return -TARGET_EFAULT;
1189     }
1190 
1191     __put_user(tv->tv_sec, &target_tv->tv_sec);
1192     __put_user(tv->tv_usec, &target_tv->tv_usec);
1193 
1194     unlock_user_struct(target_tv, target_tv_addr, 1);
1195 
1196     return 0;
1197 }
1198 
1199 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1200 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1201                                                 abi_ulong target_tv_addr)
1202 {
1203     struct target__kernel_sock_timeval *target_tv;
1204 
1205     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1206         return -TARGET_EFAULT;
1207     }
1208 
1209     __get_user(tv->tv_sec, &target_tv->tv_sec);
1210     __get_user(tv->tv_usec, &target_tv->tv_usec);
1211 
1212     unlock_user_struct(target_tv, target_tv_addr, 0);
1213 
1214     return 0;
1215 }
1216 #endif
1217 
1218 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1219                                               const struct timeval *tv)
1220 {
1221     struct target__kernel_sock_timeval *target_tv;
1222 
1223     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1224         return -TARGET_EFAULT;
1225     }
1226 
1227     __put_user(tv->tv_sec, &target_tv->tv_sec);
1228     __put_user(tv->tv_usec, &target_tv->tv_usec);
1229 
1230     unlock_user_struct(target_tv, target_tv_addr, 1);
1231 
1232     return 0;
1233 }
1234 
1235 #if defined(TARGET_NR_futex) || \
1236     defined(TARGET_NR_rt_sigtimedwait) || \
1237     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1238     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1239     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1240     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1241     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1242     defined(TARGET_NR_timer_settime) || \
1243     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1244 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1245                                                abi_ulong target_addr)
1246 {
1247     struct target_timespec *target_ts;
1248 
1249     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1250         return -TARGET_EFAULT;
1251     }
1252     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1253     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1254     unlock_user_struct(target_ts, target_addr, 0);
1255     return 0;
1256 }
1257 #endif
1258 
1259 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1260     defined(TARGET_NR_timer_settime64) || \
1261     defined(TARGET_NR_mq_timedsend_time64) || \
1262     defined(TARGET_NR_mq_timedreceive_time64) || \
1263     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1264     defined(TARGET_NR_clock_nanosleep_time64) || \
1265     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1266     defined(TARGET_NR_utimensat) || \
1267     defined(TARGET_NR_utimensat_time64) || \
1268     defined(TARGET_NR_semtimedop_time64) || \
1269     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1270 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1271                                                  abi_ulong target_addr)
1272 {
1273     struct target__kernel_timespec *target_ts;
1274 
1275     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1276         return -TARGET_EFAULT;
1277     }
1278     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1279     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1280     /* in 32bit mode, this drops the padding */
1281     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1282     unlock_user_struct(target_ts, target_addr, 0);
1283     return 0;
1284 }
1285 #endif
1286 
1287 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1288                                                struct timespec *host_ts)
1289 {
1290     struct target_timespec *target_ts;
1291 
1292     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1293         return -TARGET_EFAULT;
1294     }
1295     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1296     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1297     unlock_user_struct(target_ts, target_addr, 1);
1298     return 0;
1299 }
1300 
1301 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1302                                                  struct timespec *host_ts)
1303 {
1304     struct target__kernel_timespec *target_ts;
1305 
1306     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1307         return -TARGET_EFAULT;
1308     }
1309     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1310     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1311     unlock_user_struct(target_ts, target_addr, 1);
1312     return 0;
1313 }
1314 
1315 #if defined(TARGET_NR_gettimeofday)
1316 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1317                                              struct timezone *tz)
1318 {
1319     struct target_timezone *target_tz;
1320 
1321     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1322         return -TARGET_EFAULT;
1323     }
1324 
1325     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1326     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1327 
1328     unlock_user_struct(target_tz, target_tz_addr, 1);
1329 
1330     return 0;
1331 }
1332 #endif
1333 
1334 #if defined(TARGET_NR_settimeofday)
1335 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1336                                                abi_ulong target_tz_addr)
1337 {
1338     struct target_timezone *target_tz;
1339 
1340     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1341         return -TARGET_EFAULT;
1342     }
1343 
1344     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1345     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1346 
1347     unlock_user_struct(target_tz, target_tz_addr, 0);
1348 
1349     return 0;
1350 }
1351 #endif
1352 
1353 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1354 #include <mqueue.h>
1355 
1356 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1357                                               abi_ulong target_mq_attr_addr)
1358 {
1359     struct target_mq_attr *target_mq_attr;
1360 
1361     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1362                           target_mq_attr_addr, 1))
1363         return -TARGET_EFAULT;
1364 
1365     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1366     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1367     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1368     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1369 
1370     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1371 
1372     return 0;
1373 }
1374 
1375 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1376                                             const struct mq_attr *attr)
1377 {
1378     struct target_mq_attr *target_mq_attr;
1379 
1380     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1381                           target_mq_attr_addr, 0))
1382         return -TARGET_EFAULT;
1383 
1384     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1385     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1386     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1387     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1388 
1389     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1390 
1391     return 0;
1392 }
1393 #endif
1394 
1395 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1396 /* do_select() must return target values and target errnos. */
1397 static abi_long do_select(int n,
1398                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1399                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1400 {
1401     fd_set rfds, wfds, efds;
1402     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1403     struct timeval tv;
1404     struct timespec ts, *ts_ptr;
1405     abi_long ret;
1406 
1407     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1408     if (ret) {
1409         return ret;
1410     }
1411     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1412     if (ret) {
1413         return ret;
1414     }
1415     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1416     if (ret) {
1417         return ret;
1418     }
1419 
1420     if (target_tv_addr) {
1421         if (copy_from_user_timeval(&tv, target_tv_addr))
1422             return -TARGET_EFAULT;
1423         ts.tv_sec = tv.tv_sec;
1424         ts.tv_nsec = tv.tv_usec * 1000;
1425         ts_ptr = &ts;
1426     } else {
1427         ts_ptr = NULL;
1428     }
1429 
1430     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1431                                   ts_ptr, NULL));
1432 
1433     if (!is_error(ret)) {
1434         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1435             return -TARGET_EFAULT;
1436         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1437             return -TARGET_EFAULT;
1438         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1439             return -TARGET_EFAULT;
1440 
1441         if (target_tv_addr) {
1442             tv.tv_sec = ts.tv_sec;
1443             tv.tv_usec = ts.tv_nsec / 1000;
1444             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1445                 return -TARGET_EFAULT;
1446             }
1447         }
1448     }
1449 
1450     return ret;
1451 }
1452 
1453 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1454 static abi_long do_old_select(abi_ulong arg1)
1455 {
1456     struct target_sel_arg_struct *sel;
1457     abi_ulong inp, outp, exp, tvp;
1458     long nsel;
1459 
1460     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1461         return -TARGET_EFAULT;
1462     }
1463 
1464     nsel = tswapal(sel->n);
1465     inp = tswapal(sel->inp);
1466     outp = tswapal(sel->outp);
1467     exp = tswapal(sel->exp);
1468     tvp = tswapal(sel->tvp);
1469 
1470     unlock_user_struct(sel, arg1, 0);
1471 
1472     return do_select(nsel, inp, outp, exp, tvp);
1473 }
1474 #endif
1475 #endif
1476 
1477 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1478 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1479                             abi_long arg4, abi_long arg5, abi_long arg6,
1480                             bool time64)
1481 {
1482     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1483     fd_set rfds, wfds, efds;
1484     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1485     struct timespec ts, *ts_ptr;
1486     abi_long ret;
1487 
1488     /*
1489      * The 6th arg is actually two args smashed together,
1490      * so we cannot use the C library.
1491      */
1492     sigset_t set;
1493     struct {
1494         sigset_t *set;
1495         size_t size;
1496     } sig, *sig_ptr;
1497 
1498     abi_ulong arg_sigset, arg_sigsize, *arg7;
1499     target_sigset_t *target_sigset;
1500 
1501     n = arg1;
1502     rfd_addr = arg2;
1503     wfd_addr = arg3;
1504     efd_addr = arg4;
1505     ts_addr = arg5;
1506 
1507     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1508     if (ret) {
1509         return ret;
1510     }
1511     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1512     if (ret) {
1513         return ret;
1514     }
1515     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1516     if (ret) {
1517         return ret;
1518     }
1519 
1520     /*
1521      * This takes a timespec, and not a timeval, so we cannot
1522      * use the do_select() helper ...
1523      */
1524     if (ts_addr) {
1525         if (time64) {
1526             if (target_to_host_timespec64(&ts, ts_addr)) {
1527                 return -TARGET_EFAULT;
1528             }
1529         } else {
1530             if (target_to_host_timespec(&ts, ts_addr)) {
1531                 return -TARGET_EFAULT;
1532             }
1533         }
1534             ts_ptr = &ts;
1535     } else {
1536         ts_ptr = NULL;
1537     }
1538 
1539     /* Extract the two packed args for the sigset */
1540     if (arg6) {
1541         sig_ptr = &sig;
1542         sig.size = SIGSET_T_SIZE;
1543 
1544         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1545         if (!arg7) {
1546             return -TARGET_EFAULT;
1547         }
1548         arg_sigset = tswapal(arg7[0]);
1549         arg_sigsize = tswapal(arg7[1]);
1550         unlock_user(arg7, arg6, 0);
1551 
1552         if (arg_sigset) {
1553             sig.set = &set;
1554             if (arg_sigsize != sizeof(*target_sigset)) {
1555                 /* Like the kernel, we enforce correct size sigsets */
1556                 return -TARGET_EINVAL;
1557             }
1558             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1559                                       sizeof(*target_sigset), 1);
1560             if (!target_sigset) {
1561                 return -TARGET_EFAULT;
1562             }
1563             target_to_host_sigset(&set, target_sigset);
1564             unlock_user(target_sigset, arg_sigset, 0);
1565         } else {
1566             sig.set = NULL;
1567         }
1568     } else {
1569         sig_ptr = NULL;
1570     }
1571 
1572     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1573                                   ts_ptr, sig_ptr));
1574 
1575     if (!is_error(ret)) {
1576         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1577             return -TARGET_EFAULT;
1578         }
1579         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1580             return -TARGET_EFAULT;
1581         }
1582         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1583             return -TARGET_EFAULT;
1584         }
1585         if (time64) {
1586             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1587                 return -TARGET_EFAULT;
1588             }
1589         } else {
1590             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1591                 return -TARGET_EFAULT;
1592             }
1593         }
1594     }
1595     return ret;
1596 }
1597 #endif
1598 
1599 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1600     defined(TARGET_NR_ppoll_time64)
1601 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1602                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1603 {
1604     struct target_pollfd *target_pfd;
1605     unsigned int nfds = arg2;
1606     struct pollfd *pfd;
1607     unsigned int i;
1608     abi_long ret;
1609 
1610     pfd = NULL;
1611     target_pfd = NULL;
1612     if (nfds) {
1613         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1614             return -TARGET_EINVAL;
1615         }
1616         target_pfd = lock_user(VERIFY_WRITE, arg1,
1617                                sizeof(struct target_pollfd) * nfds, 1);
1618         if (!target_pfd) {
1619             return -TARGET_EFAULT;
1620         }
1621 
1622         pfd = alloca(sizeof(struct pollfd) * nfds);
1623         for (i = 0; i < nfds; i++) {
1624             pfd[i].fd = tswap32(target_pfd[i].fd);
1625             pfd[i].events = tswap16(target_pfd[i].events);
1626         }
1627     }
1628     if (ppoll) {
1629         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1630         target_sigset_t *target_set;
1631         sigset_t _set, *set = &_set;
1632 
1633         if (arg3) {
1634             if (time64) {
1635                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1636                     unlock_user(target_pfd, arg1, 0);
1637                     return -TARGET_EFAULT;
1638                 }
1639             } else {
1640                 if (target_to_host_timespec(timeout_ts, arg3)) {
1641                     unlock_user(target_pfd, arg1, 0);
1642                     return -TARGET_EFAULT;
1643                 }
1644             }
1645         } else {
1646             timeout_ts = NULL;
1647         }
1648 
1649         if (arg4) {
1650             if (arg5 != sizeof(target_sigset_t)) {
1651                 unlock_user(target_pfd, arg1, 0);
1652                 return -TARGET_EINVAL;
1653             }
1654 
1655             target_set = lock_user(VERIFY_READ, arg4,
1656                                    sizeof(target_sigset_t), 1);
1657             if (!target_set) {
1658                 unlock_user(target_pfd, arg1, 0);
1659                 return -TARGET_EFAULT;
1660             }
1661             target_to_host_sigset(set, target_set);
1662         } else {
1663             set = NULL;
1664         }
1665 
1666         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1667                                    set, SIGSET_T_SIZE));
1668 
1669         if (!is_error(ret) && arg3) {
1670             if (time64) {
1671                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1672                     return -TARGET_EFAULT;
1673                 }
1674             } else {
1675                 if (host_to_target_timespec(arg3, timeout_ts)) {
1676                     return -TARGET_EFAULT;
1677                 }
1678             }
1679         }
1680         if (arg4) {
1681             unlock_user(target_set, arg4, 0);
1682         }
1683     } else {
1684           struct timespec ts, *pts;
1685 
1686           if (arg3 >= 0) {
1687               /* Convert ms to secs, ns */
1688               ts.tv_sec = arg3 / 1000;
1689               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1690               pts = &ts;
1691           } else {
1692               /* -ve poll() timeout means "infinite" */
1693               pts = NULL;
1694           }
1695           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1696     }
1697 
1698     if (!is_error(ret)) {
1699         for (i = 0; i < nfds; i++) {
1700             target_pfd[i].revents = tswap16(pfd[i].revents);
1701         }
1702     }
1703     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1704     return ret;
1705 }
1706 #endif
1707 
1708 static abi_long do_pipe2(int host_pipe[], int flags)
1709 {
1710 #ifdef CONFIG_PIPE2
1711     return pipe2(host_pipe, flags);
1712 #else
1713     return -ENOSYS;
1714 #endif
1715 }
1716 
1717 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1718                         int flags, int is_pipe2)
1719 {
1720     int host_pipe[2];
1721     abi_long ret;
1722     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1723 
1724     if (is_error(ret))
1725         return get_errno(ret);
1726 
1727     /* Several targets have special calling conventions for the original
1728        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1729     if (!is_pipe2) {
1730 #if defined(TARGET_ALPHA)
1731         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1732         return host_pipe[0];
1733 #elif defined(TARGET_MIPS)
1734         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1735         return host_pipe[0];
1736 #elif defined(TARGET_SH4)
1737         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1738         return host_pipe[0];
1739 #elif defined(TARGET_SPARC)
1740         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1741         return host_pipe[0];
1742 #endif
1743     }
1744 
1745     if (put_user_s32(host_pipe[0], pipedes)
1746         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1747         return -TARGET_EFAULT;
1748     return get_errno(ret);
1749 }
1750 
1751 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1752                                               abi_ulong target_addr,
1753                                               socklen_t len)
1754 {
1755     struct target_ip_mreqn *target_smreqn;
1756 
1757     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1758     if (!target_smreqn)
1759         return -TARGET_EFAULT;
1760     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1761     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1762     if (len == sizeof(struct target_ip_mreqn))
1763         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1764     unlock_user(target_smreqn, target_addr, 0);
1765 
1766     return 0;
1767 }
1768 
1769 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1770                                                abi_ulong target_addr,
1771                                                socklen_t len)
1772 {
1773     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1774     sa_family_t sa_family;
1775     struct target_sockaddr *target_saddr;
1776 
1777     if (fd_trans_target_to_host_addr(fd)) {
1778         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1779     }
1780 
1781     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1782     if (!target_saddr)
1783         return -TARGET_EFAULT;
1784 
1785     sa_family = tswap16(target_saddr->sa_family);
1786 
1787     /* Oops. The caller might send a incomplete sun_path; sun_path
1788      * must be terminated by \0 (see the manual page), but
1789      * unfortunately it is quite common to specify sockaddr_un
1790      * length as "strlen(x->sun_path)" while it should be
1791      * "strlen(...) + 1". We'll fix that here if needed.
1792      * Linux kernel has a similar feature.
1793      */
1794 
1795     if (sa_family == AF_UNIX) {
1796         if (len < unix_maxlen && len > 0) {
1797             char *cp = (char*)target_saddr;
1798 
1799             if ( cp[len-1] && !cp[len] )
1800                 len++;
1801         }
1802         if (len > unix_maxlen)
1803             len = unix_maxlen;
1804     }
1805 
1806     memcpy(addr, target_saddr, len);
1807     addr->sa_family = sa_family;
1808     if (sa_family == AF_NETLINK) {
1809         struct sockaddr_nl *nladdr;
1810 
1811         nladdr = (struct sockaddr_nl *)addr;
1812         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1813         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1814     } else if (sa_family == AF_PACKET) {
1815 	struct target_sockaddr_ll *lladdr;
1816 
1817 	lladdr = (struct target_sockaddr_ll *)addr;
1818 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1819 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1820     }
1821     unlock_user(target_saddr, target_addr, 0);
1822 
1823     return 0;
1824 }
1825 
1826 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1827                                                struct sockaddr *addr,
1828                                                socklen_t len)
1829 {
1830     struct target_sockaddr *target_saddr;
1831 
1832     if (len == 0) {
1833         return 0;
1834     }
1835     assert(addr);
1836 
1837     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1838     if (!target_saddr)
1839         return -TARGET_EFAULT;
1840     memcpy(target_saddr, addr, len);
1841     if (len >= offsetof(struct target_sockaddr, sa_family) +
1842         sizeof(target_saddr->sa_family)) {
1843         target_saddr->sa_family = tswap16(addr->sa_family);
1844     }
1845     if (addr->sa_family == AF_NETLINK &&
1846         len >= sizeof(struct target_sockaddr_nl)) {
1847         struct target_sockaddr_nl *target_nl =
1848                (struct target_sockaddr_nl *)target_saddr;
1849         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1850         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1851     } else if (addr->sa_family == AF_PACKET) {
1852         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1853         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1854         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1855     } else if (addr->sa_family == AF_INET6 &&
1856                len >= sizeof(struct target_sockaddr_in6)) {
1857         struct target_sockaddr_in6 *target_in6 =
1858                (struct target_sockaddr_in6 *)target_saddr;
1859         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1860     }
1861     unlock_user(target_saddr, target_addr, len);
1862 
1863     return 0;
1864 }
1865 
1866 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1867                                            struct target_msghdr *target_msgh)
1868 {
1869     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1870     abi_long msg_controllen;
1871     abi_ulong target_cmsg_addr;
1872     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1873     socklen_t space = 0;
1874 
1875     msg_controllen = tswapal(target_msgh->msg_controllen);
1876     if (msg_controllen < sizeof (struct target_cmsghdr))
1877         goto the_end;
1878     target_cmsg_addr = tswapal(target_msgh->msg_control);
1879     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1880     target_cmsg_start = target_cmsg;
1881     if (!target_cmsg)
1882         return -TARGET_EFAULT;
1883 
1884     while (cmsg && target_cmsg) {
1885         void *data = CMSG_DATA(cmsg);
1886         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1887 
1888         int len = tswapal(target_cmsg->cmsg_len)
1889             - sizeof(struct target_cmsghdr);
1890 
1891         space += CMSG_SPACE(len);
1892         if (space > msgh->msg_controllen) {
1893             space -= CMSG_SPACE(len);
1894             /* This is a QEMU bug, since we allocated the payload
1895              * area ourselves (unlike overflow in host-to-target
1896              * conversion, which is just the guest giving us a buffer
1897              * that's too small). It can't happen for the payload types
1898              * we currently support; if it becomes an issue in future
1899              * we would need to improve our allocation strategy to
1900              * something more intelligent than "twice the size of the
1901              * target buffer we're reading from".
1902              */
1903             qemu_log_mask(LOG_UNIMP,
1904                           ("Unsupported ancillary data %d/%d: "
1905                            "unhandled msg size\n"),
1906                           tswap32(target_cmsg->cmsg_level),
1907                           tswap32(target_cmsg->cmsg_type));
1908             break;
1909         }
1910 
1911         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1912             cmsg->cmsg_level = SOL_SOCKET;
1913         } else {
1914             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1915         }
1916         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1917         cmsg->cmsg_len = CMSG_LEN(len);
1918 
1919         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1920             int *fd = (int *)data;
1921             int *target_fd = (int *)target_data;
1922             int i, numfds = len / sizeof(int);
1923 
1924             for (i = 0; i < numfds; i++) {
1925                 __get_user(fd[i], target_fd + i);
1926             }
1927         } else if (cmsg->cmsg_level == SOL_SOCKET
1928                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1929             struct ucred *cred = (struct ucred *)data;
1930             struct target_ucred *target_cred =
1931                 (struct target_ucred *)target_data;
1932 
1933             __get_user(cred->pid, &target_cred->pid);
1934             __get_user(cred->uid, &target_cred->uid);
1935             __get_user(cred->gid, &target_cred->gid);
1936         } else {
1937             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1938                           cmsg->cmsg_level, cmsg->cmsg_type);
1939             memcpy(data, target_data, len);
1940         }
1941 
1942         cmsg = CMSG_NXTHDR(msgh, cmsg);
1943         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1944                                          target_cmsg_start);
1945     }
1946     unlock_user(target_cmsg, target_cmsg_addr, 0);
1947  the_end:
1948     msgh->msg_controllen = space;
1949     return 0;
1950 }
1951 
1952 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1953                                            struct msghdr *msgh)
1954 {
1955     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1956     abi_long msg_controllen;
1957     abi_ulong target_cmsg_addr;
1958     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1959     socklen_t space = 0;
1960 
1961     msg_controllen = tswapal(target_msgh->msg_controllen);
1962     if (msg_controllen < sizeof (struct target_cmsghdr))
1963         goto the_end;
1964     target_cmsg_addr = tswapal(target_msgh->msg_control);
1965     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1966     target_cmsg_start = target_cmsg;
1967     if (!target_cmsg)
1968         return -TARGET_EFAULT;
1969 
1970     while (cmsg && target_cmsg) {
1971         void *data = CMSG_DATA(cmsg);
1972         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1973 
1974         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1975         int tgt_len, tgt_space;
1976 
1977         /* We never copy a half-header but may copy half-data;
1978          * this is Linux's behaviour in put_cmsg(). Note that
1979          * truncation here is a guest problem (which we report
1980          * to the guest via the CTRUNC bit), unlike truncation
1981          * in target_to_host_cmsg, which is a QEMU bug.
1982          */
1983         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1984             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1985             break;
1986         }
1987 
1988         if (cmsg->cmsg_level == SOL_SOCKET) {
1989             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1990         } else {
1991             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1992         }
1993         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1994 
1995         /* Payload types which need a different size of payload on
1996          * the target must adjust tgt_len here.
1997          */
1998         tgt_len = len;
1999         switch (cmsg->cmsg_level) {
2000         case SOL_SOCKET:
2001             switch (cmsg->cmsg_type) {
2002             case SO_TIMESTAMP:
2003                 tgt_len = sizeof(struct target_timeval);
2004                 break;
2005             default:
2006                 break;
2007             }
2008             break;
2009         default:
2010             break;
2011         }
2012 
2013         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
2014             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2015             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2016         }
2017 
2018         /* We must now copy-and-convert len bytes of payload
2019          * into tgt_len bytes of destination space. Bear in mind
2020          * that in both source and destination we may be dealing
2021          * with a truncated value!
2022          */
2023         switch (cmsg->cmsg_level) {
2024         case SOL_SOCKET:
2025             switch (cmsg->cmsg_type) {
2026             case SCM_RIGHTS:
2027             {
2028                 int *fd = (int *)data;
2029                 int *target_fd = (int *)target_data;
2030                 int i, numfds = tgt_len / sizeof(int);
2031 
2032                 for (i = 0; i < numfds; i++) {
2033                     __put_user(fd[i], target_fd + i);
2034                 }
2035                 break;
2036             }
2037             case SO_TIMESTAMP:
2038             {
2039                 struct timeval *tv = (struct timeval *)data;
2040                 struct target_timeval *target_tv =
2041                     (struct target_timeval *)target_data;
2042 
2043                 if (len != sizeof(struct timeval) ||
2044                     tgt_len != sizeof(struct target_timeval)) {
2045                     goto unimplemented;
2046                 }
2047 
2048                 /* copy struct timeval to target */
2049                 __put_user(tv->tv_sec, &target_tv->tv_sec);
2050                 __put_user(tv->tv_usec, &target_tv->tv_usec);
2051                 break;
2052             }
2053             case SCM_CREDENTIALS:
2054             {
2055                 struct ucred *cred = (struct ucred *)data;
2056                 struct target_ucred *target_cred =
2057                     (struct target_ucred *)target_data;
2058 
2059                 __put_user(cred->pid, &target_cred->pid);
2060                 __put_user(cred->uid, &target_cred->uid);
2061                 __put_user(cred->gid, &target_cred->gid);
2062                 break;
2063             }
2064             default:
2065                 goto unimplemented;
2066             }
2067             break;
2068 
2069         case SOL_IP:
2070             switch (cmsg->cmsg_type) {
2071             case IP_TTL:
2072             {
2073                 uint32_t *v = (uint32_t *)data;
2074                 uint32_t *t_int = (uint32_t *)target_data;
2075 
2076                 if (len != sizeof(uint32_t) ||
2077                     tgt_len != sizeof(uint32_t)) {
2078                     goto unimplemented;
2079                 }
2080                 __put_user(*v, t_int);
2081                 break;
2082             }
2083             case IP_RECVERR:
2084             {
2085                 struct errhdr_t {
2086                    struct sock_extended_err ee;
2087                    struct sockaddr_in offender;
2088                 };
2089                 struct errhdr_t *errh = (struct errhdr_t *)data;
2090                 struct errhdr_t *target_errh =
2091                     (struct errhdr_t *)target_data;
2092 
2093                 if (len != sizeof(struct errhdr_t) ||
2094                     tgt_len != sizeof(struct errhdr_t)) {
2095                     goto unimplemented;
2096                 }
2097                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2098                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2099                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2100                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2101                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2102                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2103                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2104                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2105                     (void *) &errh->offender, sizeof(errh->offender));
2106                 break;
2107             }
2108             default:
2109                 goto unimplemented;
2110             }
2111             break;
2112 
2113         case SOL_IPV6:
2114             switch (cmsg->cmsg_type) {
2115             case IPV6_HOPLIMIT:
2116             {
2117                 uint32_t *v = (uint32_t *)data;
2118                 uint32_t *t_int = (uint32_t *)target_data;
2119 
2120                 if (len != sizeof(uint32_t) ||
2121                     tgt_len != sizeof(uint32_t)) {
2122                     goto unimplemented;
2123                 }
2124                 __put_user(*v, t_int);
2125                 break;
2126             }
2127             case IPV6_RECVERR:
2128             {
2129                 struct errhdr6_t {
2130                    struct sock_extended_err ee;
2131                    struct sockaddr_in6 offender;
2132                 };
2133                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2134                 struct errhdr6_t *target_errh =
2135                     (struct errhdr6_t *)target_data;
2136 
2137                 if (len != sizeof(struct errhdr6_t) ||
2138                     tgt_len != sizeof(struct errhdr6_t)) {
2139                     goto unimplemented;
2140                 }
2141                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2142                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2143                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2144                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2145                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2146                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2147                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2148                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2149                     (void *) &errh->offender, sizeof(errh->offender));
2150                 break;
2151             }
2152             default:
2153                 goto unimplemented;
2154             }
2155             break;
2156 
2157         default:
2158         unimplemented:
2159             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2160                           cmsg->cmsg_level, cmsg->cmsg_type);
2161             memcpy(target_data, data, MIN(len, tgt_len));
2162             if (tgt_len > len) {
2163                 memset(target_data + len, 0, tgt_len - len);
2164             }
2165         }
2166 
2167         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2168         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2169         if (msg_controllen < tgt_space) {
2170             tgt_space = msg_controllen;
2171         }
2172         msg_controllen -= tgt_space;
2173         space += tgt_space;
2174         cmsg = CMSG_NXTHDR(msgh, cmsg);
2175         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2176                                          target_cmsg_start);
2177     }
2178     unlock_user(target_cmsg, target_cmsg_addr, space);
2179  the_end:
2180     target_msgh->msg_controllen = tswapal(space);
2181     return 0;
2182 }
2183 
2184 /* do_setsockopt() Must return target values and target errnos. */
2185 static abi_long do_setsockopt(int sockfd, int level, int optname,
2186                               abi_ulong optval_addr, socklen_t optlen)
2187 {
2188     abi_long ret;
2189     int val;
2190     struct ip_mreqn *ip_mreq;
2191     struct ip_mreq_source *ip_mreq_source;
2192 
2193     switch(level) {
2194     case SOL_TCP:
2195     case SOL_UDP:
2196         /* TCP and UDP options all take an 'int' value.  */
2197         if (optlen < sizeof(uint32_t))
2198             return -TARGET_EINVAL;
2199 
2200         if (get_user_u32(val, optval_addr))
2201             return -TARGET_EFAULT;
2202         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2203         break;
2204     case SOL_IP:
2205         switch(optname) {
2206         case IP_TOS:
2207         case IP_TTL:
2208         case IP_HDRINCL:
2209         case IP_ROUTER_ALERT:
2210         case IP_RECVOPTS:
2211         case IP_RETOPTS:
2212         case IP_PKTINFO:
2213         case IP_MTU_DISCOVER:
2214         case IP_RECVERR:
2215         case IP_RECVTTL:
2216         case IP_RECVTOS:
2217 #ifdef IP_FREEBIND
2218         case IP_FREEBIND:
2219 #endif
2220         case IP_MULTICAST_TTL:
2221         case IP_MULTICAST_LOOP:
2222             val = 0;
2223             if (optlen >= sizeof(uint32_t)) {
2224                 if (get_user_u32(val, optval_addr))
2225                     return -TARGET_EFAULT;
2226             } else if (optlen >= 1) {
2227                 if (get_user_u8(val, optval_addr))
2228                     return -TARGET_EFAULT;
2229             }
2230             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2231             break;
2232         case IP_ADD_MEMBERSHIP:
2233         case IP_DROP_MEMBERSHIP:
2234             if (optlen < sizeof (struct target_ip_mreq) ||
2235                 optlen > sizeof (struct target_ip_mreqn))
2236                 return -TARGET_EINVAL;
2237 
2238             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2239             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2240             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2241             break;
2242 
2243         case IP_BLOCK_SOURCE:
2244         case IP_UNBLOCK_SOURCE:
2245         case IP_ADD_SOURCE_MEMBERSHIP:
2246         case IP_DROP_SOURCE_MEMBERSHIP:
2247             if (optlen != sizeof (struct target_ip_mreq_source))
2248                 return -TARGET_EINVAL;
2249 
2250             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2251             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2252             unlock_user (ip_mreq_source, optval_addr, 0);
2253             break;
2254 
2255         default:
2256             goto unimplemented;
2257         }
2258         break;
2259     case SOL_IPV6:
2260         switch (optname) {
2261         case IPV6_MTU_DISCOVER:
2262         case IPV6_MTU:
2263         case IPV6_V6ONLY:
2264         case IPV6_RECVPKTINFO:
2265         case IPV6_UNICAST_HOPS:
2266         case IPV6_MULTICAST_HOPS:
2267         case IPV6_MULTICAST_LOOP:
2268         case IPV6_RECVERR:
2269         case IPV6_RECVHOPLIMIT:
2270         case IPV6_2292HOPLIMIT:
2271         case IPV6_CHECKSUM:
2272         case IPV6_ADDRFORM:
2273         case IPV6_2292PKTINFO:
2274         case IPV6_RECVTCLASS:
2275         case IPV6_RECVRTHDR:
2276         case IPV6_2292RTHDR:
2277         case IPV6_RECVHOPOPTS:
2278         case IPV6_2292HOPOPTS:
2279         case IPV6_RECVDSTOPTS:
2280         case IPV6_2292DSTOPTS:
2281         case IPV6_TCLASS:
2282         case IPV6_ADDR_PREFERENCES:
2283 #ifdef IPV6_RECVPATHMTU
2284         case IPV6_RECVPATHMTU:
2285 #endif
2286 #ifdef IPV6_TRANSPARENT
2287         case IPV6_TRANSPARENT:
2288 #endif
2289 #ifdef IPV6_FREEBIND
2290         case IPV6_FREEBIND:
2291 #endif
2292 #ifdef IPV6_RECVORIGDSTADDR
2293         case IPV6_RECVORIGDSTADDR:
2294 #endif
2295             val = 0;
2296             if (optlen < sizeof(uint32_t)) {
2297                 return -TARGET_EINVAL;
2298             }
2299             if (get_user_u32(val, optval_addr)) {
2300                 return -TARGET_EFAULT;
2301             }
2302             ret = get_errno(setsockopt(sockfd, level, optname,
2303                                        &val, sizeof(val)));
2304             break;
2305         case IPV6_PKTINFO:
2306         {
2307             struct in6_pktinfo pki;
2308 
2309             if (optlen < sizeof(pki)) {
2310                 return -TARGET_EINVAL;
2311             }
2312 
2313             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2314                 return -TARGET_EFAULT;
2315             }
2316 
2317             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2318 
2319             ret = get_errno(setsockopt(sockfd, level, optname,
2320                                        &pki, sizeof(pki)));
2321             break;
2322         }
2323         case IPV6_ADD_MEMBERSHIP:
2324         case IPV6_DROP_MEMBERSHIP:
2325         {
2326             struct ipv6_mreq ipv6mreq;
2327 
2328             if (optlen < sizeof(ipv6mreq)) {
2329                 return -TARGET_EINVAL;
2330             }
2331 
2332             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2333                 return -TARGET_EFAULT;
2334             }
2335 
2336             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2337 
2338             ret = get_errno(setsockopt(sockfd, level, optname,
2339                                        &ipv6mreq, sizeof(ipv6mreq)));
2340             break;
2341         }
2342         default:
2343             goto unimplemented;
2344         }
2345         break;
2346     case SOL_ICMPV6:
2347         switch (optname) {
2348         case ICMPV6_FILTER:
2349         {
2350             struct icmp6_filter icmp6f;
2351 
2352             if (optlen > sizeof(icmp6f)) {
2353                 optlen = sizeof(icmp6f);
2354             }
2355 
2356             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2357                 return -TARGET_EFAULT;
2358             }
2359 
2360             for (val = 0; val < 8; val++) {
2361                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2362             }
2363 
2364             ret = get_errno(setsockopt(sockfd, level, optname,
2365                                        &icmp6f, optlen));
2366             break;
2367         }
2368         default:
2369             goto unimplemented;
2370         }
2371         break;
2372     case SOL_RAW:
2373         switch (optname) {
2374         case ICMP_FILTER:
2375         case IPV6_CHECKSUM:
2376             /* those take an u32 value */
2377             if (optlen < sizeof(uint32_t)) {
2378                 return -TARGET_EINVAL;
2379             }
2380 
2381             if (get_user_u32(val, optval_addr)) {
2382                 return -TARGET_EFAULT;
2383             }
2384             ret = get_errno(setsockopt(sockfd, level, optname,
2385                                        &val, sizeof(val)));
2386             break;
2387 
2388         default:
2389             goto unimplemented;
2390         }
2391         break;
2392 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2393     case SOL_ALG:
2394         switch (optname) {
2395         case ALG_SET_KEY:
2396         {
2397             char *alg_key = g_malloc(optlen);
2398 
2399             if (!alg_key) {
2400                 return -TARGET_ENOMEM;
2401             }
2402             if (copy_from_user(alg_key, optval_addr, optlen)) {
2403                 g_free(alg_key);
2404                 return -TARGET_EFAULT;
2405             }
2406             ret = get_errno(setsockopt(sockfd, level, optname,
2407                                        alg_key, optlen));
2408             g_free(alg_key);
2409             break;
2410         }
2411         case ALG_SET_AEAD_AUTHSIZE:
2412         {
2413             ret = get_errno(setsockopt(sockfd, level, optname,
2414                                        NULL, optlen));
2415             break;
2416         }
2417         default:
2418             goto unimplemented;
2419         }
2420         break;
2421 #endif
2422     case TARGET_SOL_SOCKET:
2423         switch (optname) {
2424         case TARGET_SO_RCVTIMEO:
2425         {
2426                 struct timeval tv;
2427 
2428                 optname = SO_RCVTIMEO;
2429 
2430 set_timeout:
2431                 if (optlen != sizeof(struct target_timeval)) {
2432                     return -TARGET_EINVAL;
2433                 }
2434 
2435                 if (copy_from_user_timeval(&tv, optval_addr)) {
2436                     return -TARGET_EFAULT;
2437                 }
2438 
2439                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2440                                 &tv, sizeof(tv)));
2441                 return ret;
2442         }
2443         case TARGET_SO_SNDTIMEO:
2444                 optname = SO_SNDTIMEO;
2445                 goto set_timeout;
2446         case TARGET_SO_ATTACH_FILTER:
2447         {
2448                 struct target_sock_fprog *tfprog;
2449                 struct target_sock_filter *tfilter;
2450                 struct sock_fprog fprog;
2451                 struct sock_filter *filter;
2452                 int i;
2453 
2454                 if (optlen != sizeof(*tfprog)) {
2455                     return -TARGET_EINVAL;
2456                 }
2457                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2458                     return -TARGET_EFAULT;
2459                 }
2460                 if (!lock_user_struct(VERIFY_READ, tfilter,
2461                                       tswapal(tfprog->filter), 0)) {
2462                     unlock_user_struct(tfprog, optval_addr, 1);
2463                     return -TARGET_EFAULT;
2464                 }
2465 
2466                 fprog.len = tswap16(tfprog->len);
2467                 filter = g_try_new(struct sock_filter, fprog.len);
2468                 if (filter == NULL) {
2469                     unlock_user_struct(tfilter, tfprog->filter, 1);
2470                     unlock_user_struct(tfprog, optval_addr, 1);
2471                     return -TARGET_ENOMEM;
2472                 }
2473                 for (i = 0; i < fprog.len; i++) {
2474                     filter[i].code = tswap16(tfilter[i].code);
2475                     filter[i].jt = tfilter[i].jt;
2476                     filter[i].jf = tfilter[i].jf;
2477                     filter[i].k = tswap32(tfilter[i].k);
2478                 }
2479                 fprog.filter = filter;
2480 
2481                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2482                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2483                 g_free(filter);
2484 
2485                 unlock_user_struct(tfilter, tfprog->filter, 1);
2486                 unlock_user_struct(tfprog, optval_addr, 1);
2487                 return ret;
2488         }
2489 	case TARGET_SO_BINDTODEVICE:
2490 	{
2491 		char *dev_ifname, *addr_ifname;
2492 
2493 		if (optlen > IFNAMSIZ - 1) {
2494 		    optlen = IFNAMSIZ - 1;
2495 		}
2496 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2497 		if (!dev_ifname) {
2498 		    return -TARGET_EFAULT;
2499 		}
2500 		optname = SO_BINDTODEVICE;
2501 		addr_ifname = alloca(IFNAMSIZ);
2502 		memcpy(addr_ifname, dev_ifname, optlen);
2503 		addr_ifname[optlen] = 0;
2504 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2505                                            addr_ifname, optlen));
2506 		unlock_user (dev_ifname, optval_addr, 0);
2507 		return ret;
2508 	}
2509         case TARGET_SO_LINGER:
2510         {
2511                 struct linger lg;
2512                 struct target_linger *tlg;
2513 
2514                 if (optlen != sizeof(struct target_linger)) {
2515                     return -TARGET_EINVAL;
2516                 }
2517                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2518                     return -TARGET_EFAULT;
2519                 }
2520                 __get_user(lg.l_onoff, &tlg->l_onoff);
2521                 __get_user(lg.l_linger, &tlg->l_linger);
2522                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2523                                 &lg, sizeof(lg)));
2524                 unlock_user_struct(tlg, optval_addr, 0);
2525                 return ret;
2526         }
2527             /* Options with 'int' argument.  */
2528         case TARGET_SO_DEBUG:
2529 		optname = SO_DEBUG;
2530 		break;
2531         case TARGET_SO_REUSEADDR:
2532 		optname = SO_REUSEADDR;
2533 		break;
2534 #ifdef SO_REUSEPORT
2535         case TARGET_SO_REUSEPORT:
2536                 optname = SO_REUSEPORT;
2537                 break;
2538 #endif
2539         case TARGET_SO_TYPE:
2540 		optname = SO_TYPE;
2541 		break;
2542         case TARGET_SO_ERROR:
2543 		optname = SO_ERROR;
2544 		break;
2545         case TARGET_SO_DONTROUTE:
2546 		optname = SO_DONTROUTE;
2547 		break;
2548         case TARGET_SO_BROADCAST:
2549 		optname = SO_BROADCAST;
2550 		break;
2551         case TARGET_SO_SNDBUF:
2552 		optname = SO_SNDBUF;
2553 		break;
2554         case TARGET_SO_SNDBUFFORCE:
2555                 optname = SO_SNDBUFFORCE;
2556                 break;
2557         case TARGET_SO_RCVBUF:
2558 		optname = SO_RCVBUF;
2559 		break;
2560         case TARGET_SO_RCVBUFFORCE:
2561                 optname = SO_RCVBUFFORCE;
2562                 break;
2563         case TARGET_SO_KEEPALIVE:
2564 		optname = SO_KEEPALIVE;
2565 		break;
2566         case TARGET_SO_OOBINLINE:
2567 		optname = SO_OOBINLINE;
2568 		break;
2569         case TARGET_SO_NO_CHECK:
2570 		optname = SO_NO_CHECK;
2571 		break;
2572         case TARGET_SO_PRIORITY:
2573 		optname = SO_PRIORITY;
2574 		break;
2575 #ifdef SO_BSDCOMPAT
2576         case TARGET_SO_BSDCOMPAT:
2577 		optname = SO_BSDCOMPAT;
2578 		break;
2579 #endif
2580         case TARGET_SO_PASSCRED:
2581 		optname = SO_PASSCRED;
2582 		break;
2583         case TARGET_SO_PASSSEC:
2584                 optname = SO_PASSSEC;
2585                 break;
2586         case TARGET_SO_TIMESTAMP:
2587 		optname = SO_TIMESTAMP;
2588 		break;
2589         case TARGET_SO_RCVLOWAT:
2590 		optname = SO_RCVLOWAT;
2591 		break;
2592         default:
2593             goto unimplemented;
2594         }
2595 	if (optlen < sizeof(uint32_t))
2596             return -TARGET_EINVAL;
2597 
2598 	if (get_user_u32(val, optval_addr))
2599             return -TARGET_EFAULT;
2600 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2601         break;
2602 #ifdef SOL_NETLINK
2603     case SOL_NETLINK:
2604         switch (optname) {
2605         case NETLINK_PKTINFO:
2606         case NETLINK_ADD_MEMBERSHIP:
2607         case NETLINK_DROP_MEMBERSHIP:
2608         case NETLINK_BROADCAST_ERROR:
2609         case NETLINK_NO_ENOBUFS:
2610 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2611         case NETLINK_LISTEN_ALL_NSID:
2612         case NETLINK_CAP_ACK:
2613 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2614 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2615         case NETLINK_EXT_ACK:
2616 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2617 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2618         case NETLINK_GET_STRICT_CHK:
2619 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2620             break;
2621         default:
2622             goto unimplemented;
2623         }
2624         val = 0;
2625         if (optlen < sizeof(uint32_t)) {
2626             return -TARGET_EINVAL;
2627         }
2628         if (get_user_u32(val, optval_addr)) {
2629             return -TARGET_EFAULT;
2630         }
2631         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2632                                    sizeof(val)));
2633         break;
2634 #endif /* SOL_NETLINK */
2635     default:
2636     unimplemented:
2637         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2638                       level, optname);
2639         ret = -TARGET_ENOPROTOOPT;
2640     }
2641     return ret;
2642 }
2643 
2644 /* do_getsockopt() Must return target values and target errnos. */
2645 static abi_long do_getsockopt(int sockfd, int level, int optname,
2646                               abi_ulong optval_addr, abi_ulong optlen)
2647 {
2648     abi_long ret;
2649     int len, val;
2650     socklen_t lv;
2651 
2652     switch(level) {
2653     case TARGET_SOL_SOCKET:
2654         level = SOL_SOCKET;
2655         switch (optname) {
2656         /* These don't just return a single integer */
2657         case TARGET_SO_PEERNAME:
2658             goto unimplemented;
2659         case TARGET_SO_RCVTIMEO: {
2660             struct timeval tv;
2661             socklen_t tvlen;
2662 
2663             optname = SO_RCVTIMEO;
2664 
2665 get_timeout:
2666             if (get_user_u32(len, optlen)) {
2667                 return -TARGET_EFAULT;
2668             }
2669             if (len < 0) {
2670                 return -TARGET_EINVAL;
2671             }
2672 
2673             tvlen = sizeof(tv);
2674             ret = get_errno(getsockopt(sockfd, level, optname,
2675                                        &tv, &tvlen));
2676             if (ret < 0) {
2677                 return ret;
2678             }
2679             if (len > sizeof(struct target_timeval)) {
2680                 len = sizeof(struct target_timeval);
2681             }
2682             if (copy_to_user_timeval(optval_addr, &tv)) {
2683                 return -TARGET_EFAULT;
2684             }
2685             if (put_user_u32(len, optlen)) {
2686                 return -TARGET_EFAULT;
2687             }
2688             break;
2689         }
2690         case TARGET_SO_SNDTIMEO:
2691             optname = SO_SNDTIMEO;
2692             goto get_timeout;
2693         case TARGET_SO_PEERCRED: {
2694             struct ucred cr;
2695             socklen_t crlen;
2696             struct target_ucred *tcr;
2697 
2698             if (get_user_u32(len, optlen)) {
2699                 return -TARGET_EFAULT;
2700             }
2701             if (len < 0) {
2702                 return -TARGET_EINVAL;
2703             }
2704 
2705             crlen = sizeof(cr);
2706             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2707                                        &cr, &crlen));
2708             if (ret < 0) {
2709                 return ret;
2710             }
2711             if (len > crlen) {
2712                 len = crlen;
2713             }
2714             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2715                 return -TARGET_EFAULT;
2716             }
2717             __put_user(cr.pid, &tcr->pid);
2718             __put_user(cr.uid, &tcr->uid);
2719             __put_user(cr.gid, &tcr->gid);
2720             unlock_user_struct(tcr, optval_addr, 1);
2721             if (put_user_u32(len, optlen)) {
2722                 return -TARGET_EFAULT;
2723             }
2724             break;
2725         }
2726         case TARGET_SO_PEERSEC: {
2727             char *name;
2728 
2729             if (get_user_u32(len, optlen)) {
2730                 return -TARGET_EFAULT;
2731             }
2732             if (len < 0) {
2733                 return -TARGET_EINVAL;
2734             }
2735             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2736             if (!name) {
2737                 return -TARGET_EFAULT;
2738             }
2739             lv = len;
2740             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2741                                        name, &lv));
2742             if (put_user_u32(lv, optlen)) {
2743                 ret = -TARGET_EFAULT;
2744             }
2745             unlock_user(name, optval_addr, lv);
2746             break;
2747         }
2748         case TARGET_SO_LINGER:
2749         {
2750             struct linger lg;
2751             socklen_t lglen;
2752             struct target_linger *tlg;
2753 
2754             if (get_user_u32(len, optlen)) {
2755                 return -TARGET_EFAULT;
2756             }
2757             if (len < 0) {
2758                 return -TARGET_EINVAL;
2759             }
2760 
2761             lglen = sizeof(lg);
2762             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2763                                        &lg, &lglen));
2764             if (ret < 0) {
2765                 return ret;
2766             }
2767             if (len > lglen) {
2768                 len = lglen;
2769             }
2770             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2771                 return -TARGET_EFAULT;
2772             }
2773             __put_user(lg.l_onoff, &tlg->l_onoff);
2774             __put_user(lg.l_linger, &tlg->l_linger);
2775             unlock_user_struct(tlg, optval_addr, 1);
2776             if (put_user_u32(len, optlen)) {
2777                 return -TARGET_EFAULT;
2778             }
2779             break;
2780         }
2781         /* Options with 'int' argument.  */
2782         case TARGET_SO_DEBUG:
2783             optname = SO_DEBUG;
2784             goto int_case;
2785         case TARGET_SO_REUSEADDR:
2786             optname = SO_REUSEADDR;
2787             goto int_case;
2788 #ifdef SO_REUSEPORT
2789         case TARGET_SO_REUSEPORT:
2790             optname = SO_REUSEPORT;
2791             goto int_case;
2792 #endif
2793         case TARGET_SO_TYPE:
2794             optname = SO_TYPE;
2795             goto int_case;
2796         case TARGET_SO_ERROR:
2797             optname = SO_ERROR;
2798             goto int_case;
2799         case TARGET_SO_DONTROUTE:
2800             optname = SO_DONTROUTE;
2801             goto int_case;
2802         case TARGET_SO_BROADCAST:
2803             optname = SO_BROADCAST;
2804             goto int_case;
2805         case TARGET_SO_SNDBUF:
2806             optname = SO_SNDBUF;
2807             goto int_case;
2808         case TARGET_SO_RCVBUF:
2809             optname = SO_RCVBUF;
2810             goto int_case;
2811         case TARGET_SO_KEEPALIVE:
2812             optname = SO_KEEPALIVE;
2813             goto int_case;
2814         case TARGET_SO_OOBINLINE:
2815             optname = SO_OOBINLINE;
2816             goto int_case;
2817         case TARGET_SO_NO_CHECK:
2818             optname = SO_NO_CHECK;
2819             goto int_case;
2820         case TARGET_SO_PRIORITY:
2821             optname = SO_PRIORITY;
2822             goto int_case;
2823 #ifdef SO_BSDCOMPAT
2824         case TARGET_SO_BSDCOMPAT:
2825             optname = SO_BSDCOMPAT;
2826             goto int_case;
2827 #endif
2828         case TARGET_SO_PASSCRED:
2829             optname = SO_PASSCRED;
2830             goto int_case;
2831         case TARGET_SO_TIMESTAMP:
2832             optname = SO_TIMESTAMP;
2833             goto int_case;
2834         case TARGET_SO_RCVLOWAT:
2835             optname = SO_RCVLOWAT;
2836             goto int_case;
2837         case TARGET_SO_ACCEPTCONN:
2838             optname = SO_ACCEPTCONN;
2839             goto int_case;
2840         case TARGET_SO_PROTOCOL:
2841             optname = SO_PROTOCOL;
2842             goto int_case;
2843         case TARGET_SO_DOMAIN:
2844             optname = SO_DOMAIN;
2845             goto int_case;
2846         default:
2847             goto int_case;
2848         }
2849         break;
2850     case SOL_TCP:
2851     case SOL_UDP:
2852         /* TCP and UDP options all take an 'int' value.  */
2853     int_case:
2854         if (get_user_u32(len, optlen))
2855             return -TARGET_EFAULT;
2856         if (len < 0)
2857             return -TARGET_EINVAL;
2858         lv = sizeof(lv);
2859         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2860         if (ret < 0)
2861             return ret;
2862         if (optname == SO_TYPE) {
2863             val = host_to_target_sock_type(val);
2864         }
2865         if (len > lv)
2866             len = lv;
2867         if (len == 4) {
2868             if (put_user_u32(val, optval_addr))
2869                 return -TARGET_EFAULT;
2870         } else {
2871             if (put_user_u8(val, optval_addr))
2872                 return -TARGET_EFAULT;
2873         }
2874         if (put_user_u32(len, optlen))
2875             return -TARGET_EFAULT;
2876         break;
2877     case SOL_IP:
2878         switch(optname) {
2879         case IP_TOS:
2880         case IP_TTL:
2881         case IP_HDRINCL:
2882         case IP_ROUTER_ALERT:
2883         case IP_RECVOPTS:
2884         case IP_RETOPTS:
2885         case IP_PKTINFO:
2886         case IP_MTU_DISCOVER:
2887         case IP_RECVERR:
2888         case IP_RECVTOS:
2889 #ifdef IP_FREEBIND
2890         case IP_FREEBIND:
2891 #endif
2892         case IP_MULTICAST_TTL:
2893         case IP_MULTICAST_LOOP:
2894             if (get_user_u32(len, optlen))
2895                 return -TARGET_EFAULT;
2896             if (len < 0)
2897                 return -TARGET_EINVAL;
2898             lv = sizeof(lv);
2899             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2900             if (ret < 0)
2901                 return ret;
2902             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2903                 len = 1;
2904                 if (put_user_u32(len, optlen)
2905                     || put_user_u8(val, optval_addr))
2906                     return -TARGET_EFAULT;
2907             } else {
2908                 if (len > sizeof(int))
2909                     len = sizeof(int);
2910                 if (put_user_u32(len, optlen)
2911                     || put_user_u32(val, optval_addr))
2912                     return -TARGET_EFAULT;
2913             }
2914             break;
2915         default:
2916             ret = -TARGET_ENOPROTOOPT;
2917             break;
2918         }
2919         break;
2920     case SOL_IPV6:
2921         switch (optname) {
2922         case IPV6_MTU_DISCOVER:
2923         case IPV6_MTU:
2924         case IPV6_V6ONLY:
2925         case IPV6_RECVPKTINFO:
2926         case IPV6_UNICAST_HOPS:
2927         case IPV6_MULTICAST_HOPS:
2928         case IPV6_MULTICAST_LOOP:
2929         case IPV6_RECVERR:
2930         case IPV6_RECVHOPLIMIT:
2931         case IPV6_2292HOPLIMIT:
2932         case IPV6_CHECKSUM:
2933         case IPV6_ADDRFORM:
2934         case IPV6_2292PKTINFO:
2935         case IPV6_RECVTCLASS:
2936         case IPV6_RECVRTHDR:
2937         case IPV6_2292RTHDR:
2938         case IPV6_RECVHOPOPTS:
2939         case IPV6_2292HOPOPTS:
2940         case IPV6_RECVDSTOPTS:
2941         case IPV6_2292DSTOPTS:
2942         case IPV6_TCLASS:
2943         case IPV6_ADDR_PREFERENCES:
2944 #ifdef IPV6_RECVPATHMTU
2945         case IPV6_RECVPATHMTU:
2946 #endif
2947 #ifdef IPV6_TRANSPARENT
2948         case IPV6_TRANSPARENT:
2949 #endif
2950 #ifdef IPV6_FREEBIND
2951         case IPV6_FREEBIND:
2952 #endif
2953 #ifdef IPV6_RECVORIGDSTADDR
2954         case IPV6_RECVORIGDSTADDR:
2955 #endif
2956             if (get_user_u32(len, optlen))
2957                 return -TARGET_EFAULT;
2958             if (len < 0)
2959                 return -TARGET_EINVAL;
2960             lv = sizeof(lv);
2961             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2962             if (ret < 0)
2963                 return ret;
2964             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2965                 len = 1;
2966                 if (put_user_u32(len, optlen)
2967                     || put_user_u8(val, optval_addr))
2968                     return -TARGET_EFAULT;
2969             } else {
2970                 if (len > sizeof(int))
2971                     len = sizeof(int);
2972                 if (put_user_u32(len, optlen)
2973                     || put_user_u32(val, optval_addr))
2974                     return -TARGET_EFAULT;
2975             }
2976             break;
2977         default:
2978             ret = -TARGET_ENOPROTOOPT;
2979             break;
2980         }
2981         break;
2982 #ifdef SOL_NETLINK
2983     case SOL_NETLINK:
2984         switch (optname) {
2985         case NETLINK_PKTINFO:
2986         case NETLINK_BROADCAST_ERROR:
2987         case NETLINK_NO_ENOBUFS:
2988 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2989         case NETLINK_LISTEN_ALL_NSID:
2990         case NETLINK_CAP_ACK:
2991 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2992 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2993         case NETLINK_EXT_ACK:
2994 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2995 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2996         case NETLINK_GET_STRICT_CHK:
2997 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2998             if (get_user_u32(len, optlen)) {
2999                 return -TARGET_EFAULT;
3000             }
3001             if (len != sizeof(val)) {
3002                 return -TARGET_EINVAL;
3003             }
3004             lv = len;
3005             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3006             if (ret < 0) {
3007                 return ret;
3008             }
3009             if (put_user_u32(lv, optlen)
3010                 || put_user_u32(val, optval_addr)) {
3011                 return -TARGET_EFAULT;
3012             }
3013             break;
3014 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3015         case NETLINK_LIST_MEMBERSHIPS:
3016         {
3017             uint32_t *results;
3018             int i;
3019             if (get_user_u32(len, optlen)) {
3020                 return -TARGET_EFAULT;
3021             }
3022             if (len < 0) {
3023                 return -TARGET_EINVAL;
3024             }
3025             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3026             if (!results) {
3027                 return -TARGET_EFAULT;
3028             }
3029             lv = len;
3030             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3031             if (ret < 0) {
3032                 unlock_user(results, optval_addr, 0);
3033                 return ret;
3034             }
3035             /* swap host endianess to target endianess. */
3036             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3037                 results[i] = tswap32(results[i]);
3038             }
3039             if (put_user_u32(lv, optlen)) {
3040                 return -TARGET_EFAULT;
3041             }
3042             unlock_user(results, optval_addr, 0);
3043             break;
3044         }
3045 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3046         default:
3047             goto unimplemented;
3048         }
3049         break;
3050 #endif /* SOL_NETLINK */
3051     default:
3052     unimplemented:
3053         qemu_log_mask(LOG_UNIMP,
3054                       "getsockopt level=%d optname=%d not yet supported\n",
3055                       level, optname);
3056         ret = -TARGET_EOPNOTSUPP;
3057         break;
3058     }
3059     return ret;
3060 }
3061 
3062 /* Convert target low/high pair representing file offset into the host
3063  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3064  * as the kernel doesn't handle them either.
3065  */
3066 static void target_to_host_low_high(abi_ulong tlow,
3067                                     abi_ulong thigh,
3068                                     unsigned long *hlow,
3069                                     unsigned long *hhigh)
3070 {
3071     uint64_t off = tlow |
3072         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3073         TARGET_LONG_BITS / 2;
3074 
3075     *hlow = off;
3076     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3077 }
3078 
3079 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3080                                 abi_ulong count, int copy)
3081 {
3082     struct target_iovec *target_vec;
3083     struct iovec *vec;
3084     abi_ulong total_len, max_len;
3085     int i;
3086     int err = 0;
3087     bool bad_address = false;
3088 
3089     if (count == 0) {
3090         errno = 0;
3091         return NULL;
3092     }
3093     if (count > IOV_MAX) {
3094         errno = EINVAL;
3095         return NULL;
3096     }
3097 
3098     vec = g_try_new0(struct iovec, count);
3099     if (vec == NULL) {
3100         errno = ENOMEM;
3101         return NULL;
3102     }
3103 
3104     target_vec = lock_user(VERIFY_READ, target_addr,
3105                            count * sizeof(struct target_iovec), 1);
3106     if (target_vec == NULL) {
3107         err = EFAULT;
3108         goto fail2;
3109     }
3110 
3111     /* ??? If host page size > target page size, this will result in a
3112        value larger than what we can actually support.  */
3113     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3114     total_len = 0;
3115 
3116     for (i = 0; i < count; i++) {
3117         abi_ulong base = tswapal(target_vec[i].iov_base);
3118         abi_long len = tswapal(target_vec[i].iov_len);
3119 
3120         if (len < 0) {
3121             err = EINVAL;
3122             goto fail;
3123         } else if (len == 0) {
3124             /* Zero length pointer is ignored.  */
3125             vec[i].iov_base = 0;
3126         } else {
3127             vec[i].iov_base = lock_user(type, base, len, copy);
3128             /* If the first buffer pointer is bad, this is a fault.  But
3129              * subsequent bad buffers will result in a partial write; this
3130              * is realized by filling the vector with null pointers and
3131              * zero lengths. */
3132             if (!vec[i].iov_base) {
3133                 if (i == 0) {
3134                     err = EFAULT;
3135                     goto fail;
3136                 } else {
3137                     bad_address = true;
3138                 }
3139             }
3140             if (bad_address) {
3141                 len = 0;
3142             }
3143             if (len > max_len - total_len) {
3144                 len = max_len - total_len;
3145             }
3146         }
3147         vec[i].iov_len = len;
3148         total_len += len;
3149     }
3150 
3151     unlock_user(target_vec, target_addr, 0);
3152     return vec;
3153 
3154  fail:
3155     while (--i >= 0) {
3156         if (tswapal(target_vec[i].iov_len) > 0) {
3157             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3158         }
3159     }
3160     unlock_user(target_vec, target_addr, 0);
3161  fail2:
3162     g_free(vec);
3163     errno = err;
3164     return NULL;
3165 }
3166 
3167 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3168                          abi_ulong count, int copy)
3169 {
3170     struct target_iovec *target_vec;
3171     int i;
3172 
3173     target_vec = lock_user(VERIFY_READ, target_addr,
3174                            count * sizeof(struct target_iovec), 1);
3175     if (target_vec) {
3176         for (i = 0; i < count; i++) {
3177             abi_ulong base = tswapal(target_vec[i].iov_base);
3178             abi_long len = tswapal(target_vec[i].iov_len);
3179             if (len < 0) {
3180                 break;
3181             }
3182             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3183         }
3184         unlock_user(target_vec, target_addr, 0);
3185     }
3186 
3187     g_free(vec);
3188 }
3189 
3190 static inline int target_to_host_sock_type(int *type)
3191 {
3192     int host_type = 0;
3193     int target_type = *type;
3194 
3195     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3196     case TARGET_SOCK_DGRAM:
3197         host_type = SOCK_DGRAM;
3198         break;
3199     case TARGET_SOCK_STREAM:
3200         host_type = SOCK_STREAM;
3201         break;
3202     default:
3203         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3204         break;
3205     }
3206     if (target_type & TARGET_SOCK_CLOEXEC) {
3207 #if defined(SOCK_CLOEXEC)
3208         host_type |= SOCK_CLOEXEC;
3209 #else
3210         return -TARGET_EINVAL;
3211 #endif
3212     }
3213     if (target_type & TARGET_SOCK_NONBLOCK) {
3214 #if defined(SOCK_NONBLOCK)
3215         host_type |= SOCK_NONBLOCK;
3216 #elif !defined(O_NONBLOCK)
3217         return -TARGET_EINVAL;
3218 #endif
3219     }
3220     *type = host_type;
3221     return 0;
3222 }
3223 
3224 /* Try to emulate socket type flags after socket creation.  */
3225 static int sock_flags_fixup(int fd, int target_type)
3226 {
3227 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3228     if (target_type & TARGET_SOCK_NONBLOCK) {
3229         int flags = fcntl(fd, F_GETFL);
3230         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3231             close(fd);
3232             return -TARGET_EINVAL;
3233         }
3234     }
3235 #endif
3236     return fd;
3237 }
3238 
3239 /* do_socket() Must return target values and target errnos. */
3240 static abi_long do_socket(int domain, int type, int protocol)
3241 {
3242     int target_type = type;
3243     int ret;
3244 
3245     ret = target_to_host_sock_type(&type);
3246     if (ret) {
3247         return ret;
3248     }
3249 
3250     if (domain == PF_NETLINK && !(
3251 #ifdef CONFIG_RTNETLINK
3252          protocol == NETLINK_ROUTE ||
3253 #endif
3254          protocol == NETLINK_KOBJECT_UEVENT ||
3255          protocol == NETLINK_AUDIT)) {
3256         return -TARGET_EPROTONOSUPPORT;
3257     }
3258 
3259     if (domain == AF_PACKET ||
3260         (domain == AF_INET && type == SOCK_PACKET)) {
3261         protocol = tswap16(protocol);
3262     }
3263 
3264     ret = get_errno(socket(domain, type, protocol));
3265     if (ret >= 0) {
3266         ret = sock_flags_fixup(ret, target_type);
3267         if (type == SOCK_PACKET) {
3268             /* Manage an obsolete case :
3269              * if socket type is SOCK_PACKET, bind by name
3270              */
3271             fd_trans_register(ret, &target_packet_trans);
3272         } else if (domain == PF_NETLINK) {
3273             switch (protocol) {
3274 #ifdef CONFIG_RTNETLINK
3275             case NETLINK_ROUTE:
3276                 fd_trans_register(ret, &target_netlink_route_trans);
3277                 break;
3278 #endif
3279             case NETLINK_KOBJECT_UEVENT:
3280                 /* nothing to do: messages are strings */
3281                 break;
3282             case NETLINK_AUDIT:
3283                 fd_trans_register(ret, &target_netlink_audit_trans);
3284                 break;
3285             default:
3286                 g_assert_not_reached();
3287             }
3288         }
3289     }
3290     return ret;
3291 }
3292 
3293 /* do_bind() Must return target values and target errnos. */
3294 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3295                         socklen_t addrlen)
3296 {
3297     void *addr;
3298     abi_long ret;
3299 
3300     if ((int)addrlen < 0) {
3301         return -TARGET_EINVAL;
3302     }
3303 
3304     addr = alloca(addrlen+1);
3305 
3306     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3307     if (ret)
3308         return ret;
3309 
3310     return get_errno(bind(sockfd, addr, addrlen));
3311 }
3312 
3313 /* do_connect() Must return target values and target errnos. */
3314 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3315                            socklen_t addrlen)
3316 {
3317     void *addr;
3318     abi_long ret;
3319 
3320     if ((int)addrlen < 0) {
3321         return -TARGET_EINVAL;
3322     }
3323 
3324     addr = alloca(addrlen+1);
3325 
3326     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3327     if (ret)
3328         return ret;
3329 
3330     return get_errno(safe_connect(sockfd, addr, addrlen));
3331 }
3332 
3333 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3334 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3335                                       int flags, int send)
3336 {
3337     abi_long ret, len;
3338     struct msghdr msg;
3339     abi_ulong count;
3340     struct iovec *vec;
3341     abi_ulong target_vec;
3342 
3343     if (msgp->msg_name) {
3344         msg.msg_namelen = tswap32(msgp->msg_namelen);
3345         msg.msg_name = alloca(msg.msg_namelen+1);
3346         ret = target_to_host_sockaddr(fd, msg.msg_name,
3347                                       tswapal(msgp->msg_name),
3348                                       msg.msg_namelen);
3349         if (ret == -TARGET_EFAULT) {
3350             /* For connected sockets msg_name and msg_namelen must
3351              * be ignored, so returning EFAULT immediately is wrong.
3352              * Instead, pass a bad msg_name to the host kernel, and
3353              * let it decide whether to return EFAULT or not.
3354              */
3355             msg.msg_name = (void *)-1;
3356         } else if (ret) {
3357             goto out2;
3358         }
3359     } else {
3360         msg.msg_name = NULL;
3361         msg.msg_namelen = 0;
3362     }
3363     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3364     msg.msg_control = alloca(msg.msg_controllen);
3365     memset(msg.msg_control, 0, msg.msg_controllen);
3366 
3367     msg.msg_flags = tswap32(msgp->msg_flags);
3368 
3369     count = tswapal(msgp->msg_iovlen);
3370     target_vec = tswapal(msgp->msg_iov);
3371 
3372     if (count > IOV_MAX) {
3373         /* sendrcvmsg returns a different errno for this condition than
3374          * readv/writev, so we must catch it here before lock_iovec() does.
3375          */
3376         ret = -TARGET_EMSGSIZE;
3377         goto out2;
3378     }
3379 
3380     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3381                      target_vec, count, send);
3382     if (vec == NULL) {
3383         ret = -host_to_target_errno(errno);
3384         goto out2;
3385     }
3386     msg.msg_iovlen = count;
3387     msg.msg_iov = vec;
3388 
3389     if (send) {
3390         if (fd_trans_target_to_host_data(fd)) {
3391             void *host_msg;
3392 
3393             host_msg = g_malloc(msg.msg_iov->iov_len);
3394             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3395             ret = fd_trans_target_to_host_data(fd)(host_msg,
3396                                                    msg.msg_iov->iov_len);
3397             if (ret >= 0) {
3398                 msg.msg_iov->iov_base = host_msg;
3399                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3400             }
3401             g_free(host_msg);
3402         } else {
3403             ret = target_to_host_cmsg(&msg, msgp);
3404             if (ret == 0) {
3405                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3406             }
3407         }
3408     } else {
3409         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3410         if (!is_error(ret)) {
3411             len = ret;
3412             if (fd_trans_host_to_target_data(fd)) {
3413                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3414                                                MIN(msg.msg_iov->iov_len, len));
3415             } else {
3416                 ret = host_to_target_cmsg(msgp, &msg);
3417             }
3418             if (!is_error(ret)) {
3419                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3420                 msgp->msg_flags = tswap32(msg.msg_flags);
3421                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3422                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3423                                     msg.msg_name, msg.msg_namelen);
3424                     if (ret) {
3425                         goto out;
3426                     }
3427                 }
3428 
3429                 ret = len;
3430             }
3431         }
3432     }
3433 
3434 out:
3435     unlock_iovec(vec, target_vec, count, !send);
3436 out2:
3437     return ret;
3438 }
3439 
3440 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3441                                int flags, int send)
3442 {
3443     abi_long ret;
3444     struct target_msghdr *msgp;
3445 
3446     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3447                           msgp,
3448                           target_msg,
3449                           send ? 1 : 0)) {
3450         return -TARGET_EFAULT;
3451     }
3452     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3453     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3454     return ret;
3455 }
3456 
3457 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3458  * so it might not have this *mmsg-specific flag either.
3459  */
3460 #ifndef MSG_WAITFORONE
3461 #define MSG_WAITFORONE 0x10000
3462 #endif
3463 
3464 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3465                                 unsigned int vlen, unsigned int flags,
3466                                 int send)
3467 {
3468     struct target_mmsghdr *mmsgp;
3469     abi_long ret = 0;
3470     int i;
3471 
3472     if (vlen > UIO_MAXIOV) {
3473         vlen = UIO_MAXIOV;
3474     }
3475 
3476     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3477     if (!mmsgp) {
3478         return -TARGET_EFAULT;
3479     }
3480 
3481     for (i = 0; i < vlen; i++) {
3482         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3483         if (is_error(ret)) {
3484             break;
3485         }
3486         mmsgp[i].msg_len = tswap32(ret);
3487         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3488         if (flags & MSG_WAITFORONE) {
3489             flags |= MSG_DONTWAIT;
3490         }
3491     }
3492 
3493     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3494 
3495     /* Return number of datagrams sent if we sent any at all;
3496      * otherwise return the error.
3497      */
3498     if (i) {
3499         return i;
3500     }
3501     return ret;
3502 }
3503 
3504 /* do_accept4() Must return target values and target errnos. */
3505 static abi_long do_accept4(int fd, abi_ulong target_addr,
3506                            abi_ulong target_addrlen_addr, int flags)
3507 {
3508     socklen_t addrlen, ret_addrlen;
3509     void *addr;
3510     abi_long ret;
3511     int host_flags;
3512 
3513     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3514 
3515     if (target_addr == 0) {
3516         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3517     }
3518 
3519     /* linux returns EFAULT if addrlen pointer is invalid */
3520     if (get_user_u32(addrlen, target_addrlen_addr))
3521         return -TARGET_EFAULT;
3522 
3523     if ((int)addrlen < 0) {
3524         return -TARGET_EINVAL;
3525     }
3526 
3527     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3528         return -TARGET_EFAULT;
3529 
3530     addr = alloca(addrlen);
3531 
3532     ret_addrlen = addrlen;
3533     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3534     if (!is_error(ret)) {
3535         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3536         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3537             ret = -TARGET_EFAULT;
3538         }
3539     }
3540     return ret;
3541 }
3542 
3543 /* do_getpeername() Must return target values and target errnos. */
3544 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3545                                abi_ulong target_addrlen_addr)
3546 {
3547     socklen_t addrlen, ret_addrlen;
3548     void *addr;
3549     abi_long ret;
3550 
3551     if (get_user_u32(addrlen, target_addrlen_addr))
3552         return -TARGET_EFAULT;
3553 
3554     if ((int)addrlen < 0) {
3555         return -TARGET_EINVAL;
3556     }
3557 
3558     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3559         return -TARGET_EFAULT;
3560 
3561     addr = alloca(addrlen);
3562 
3563     ret_addrlen = addrlen;
3564     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3565     if (!is_error(ret)) {
3566         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3567         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3568             ret = -TARGET_EFAULT;
3569         }
3570     }
3571     return ret;
3572 }
3573 
3574 /* do_getsockname() Must return target values and target errnos. */
3575 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3576                                abi_ulong target_addrlen_addr)
3577 {
3578     socklen_t addrlen, ret_addrlen;
3579     void *addr;
3580     abi_long ret;
3581 
3582     if (get_user_u32(addrlen, target_addrlen_addr))
3583         return -TARGET_EFAULT;
3584 
3585     if ((int)addrlen < 0) {
3586         return -TARGET_EINVAL;
3587     }
3588 
3589     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3590         return -TARGET_EFAULT;
3591 
3592     addr = alloca(addrlen);
3593 
3594     ret_addrlen = addrlen;
3595     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3596     if (!is_error(ret)) {
3597         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3598         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3599             ret = -TARGET_EFAULT;
3600         }
3601     }
3602     return ret;
3603 }
3604 
3605 /* do_socketpair() Must return target values and target errnos. */
3606 static abi_long do_socketpair(int domain, int type, int protocol,
3607                               abi_ulong target_tab_addr)
3608 {
3609     int tab[2];
3610     abi_long ret;
3611 
3612     target_to_host_sock_type(&type);
3613 
3614     ret = get_errno(socketpair(domain, type, protocol, tab));
3615     if (!is_error(ret)) {
3616         if (put_user_s32(tab[0], target_tab_addr)
3617             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3618             ret = -TARGET_EFAULT;
3619     }
3620     return ret;
3621 }
3622 
3623 /* do_sendto() Must return target values and target errnos. */
3624 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3625                           abi_ulong target_addr, socklen_t addrlen)
3626 {
3627     void *addr;
3628     void *host_msg;
3629     void *copy_msg = NULL;
3630     abi_long ret;
3631 
3632     if ((int)addrlen < 0) {
3633         return -TARGET_EINVAL;
3634     }
3635 
3636     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3637     if (!host_msg)
3638         return -TARGET_EFAULT;
3639     if (fd_trans_target_to_host_data(fd)) {
3640         copy_msg = host_msg;
3641         host_msg = g_malloc(len);
3642         memcpy(host_msg, copy_msg, len);
3643         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3644         if (ret < 0) {
3645             goto fail;
3646         }
3647     }
3648     if (target_addr) {
3649         addr = alloca(addrlen+1);
3650         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3651         if (ret) {
3652             goto fail;
3653         }
3654         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3655     } else {
3656         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3657     }
3658 fail:
3659     if (copy_msg) {
3660         g_free(host_msg);
3661         host_msg = copy_msg;
3662     }
3663     unlock_user(host_msg, msg, 0);
3664     return ret;
3665 }
3666 
3667 /* do_recvfrom() Must return target values and target errnos. */
3668 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3669                             abi_ulong target_addr,
3670                             abi_ulong target_addrlen)
3671 {
3672     socklen_t addrlen, ret_addrlen;
3673     void *addr;
3674     void *host_msg;
3675     abi_long ret;
3676 
3677     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3678     if (!host_msg)
3679         return -TARGET_EFAULT;
3680     if (target_addr) {
3681         if (get_user_u32(addrlen, target_addrlen)) {
3682             ret = -TARGET_EFAULT;
3683             goto fail;
3684         }
3685         if ((int)addrlen < 0) {
3686             ret = -TARGET_EINVAL;
3687             goto fail;
3688         }
3689         addr = alloca(addrlen);
3690         ret_addrlen = addrlen;
3691         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3692                                       addr, &ret_addrlen));
3693     } else {
3694         addr = NULL; /* To keep compiler quiet.  */
3695         addrlen = 0; /* To keep compiler quiet.  */
3696         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3697     }
3698     if (!is_error(ret)) {
3699         if (fd_trans_host_to_target_data(fd)) {
3700             abi_long trans;
3701             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3702             if (is_error(trans)) {
3703                 ret = trans;
3704                 goto fail;
3705             }
3706         }
3707         if (target_addr) {
3708             host_to_target_sockaddr(target_addr, addr,
3709                                     MIN(addrlen, ret_addrlen));
3710             if (put_user_u32(ret_addrlen, target_addrlen)) {
3711                 ret = -TARGET_EFAULT;
3712                 goto fail;
3713             }
3714         }
3715         unlock_user(host_msg, msg, len);
3716     } else {
3717 fail:
3718         unlock_user(host_msg, msg, 0);
3719     }
3720     return ret;
3721 }
3722 
3723 #ifdef TARGET_NR_socketcall
3724 /* do_socketcall() must return target values and target errnos. */
3725 static abi_long do_socketcall(int num, abi_ulong vptr)
3726 {
3727     static const unsigned nargs[] = { /* number of arguments per operation */
3728         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3729         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3730         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3731         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3732         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3733         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3734         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3735         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3736         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3737         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3738         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3739         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3740         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3741         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3742         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3743         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3744         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3745         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3746         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3747         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3748     };
3749     abi_long a[6]; /* max 6 args */
3750     unsigned i;
3751 
3752     /* check the range of the first argument num */
3753     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3754     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3755         return -TARGET_EINVAL;
3756     }
3757     /* ensure we have space for args */
3758     if (nargs[num] > ARRAY_SIZE(a)) {
3759         return -TARGET_EINVAL;
3760     }
3761     /* collect the arguments in a[] according to nargs[] */
3762     for (i = 0; i < nargs[num]; ++i) {
3763         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3764             return -TARGET_EFAULT;
3765         }
3766     }
3767     /* now when we have the args, invoke the appropriate underlying function */
3768     switch (num) {
3769     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3770         return do_socket(a[0], a[1], a[2]);
3771     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3772         return do_bind(a[0], a[1], a[2]);
3773     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3774         return do_connect(a[0], a[1], a[2]);
3775     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3776         return get_errno(listen(a[0], a[1]));
3777     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3778         return do_accept4(a[0], a[1], a[2], 0);
3779     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3780         return do_getsockname(a[0], a[1], a[2]);
3781     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3782         return do_getpeername(a[0], a[1], a[2]);
3783     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3784         return do_socketpair(a[0], a[1], a[2], a[3]);
3785     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3786         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3787     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3788         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3789     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3790         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3791     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3792         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3793     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3794         return get_errno(shutdown(a[0], a[1]));
3795     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3796         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3797     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3798         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3799     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3800         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3801     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3802         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3803     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3804         return do_accept4(a[0], a[1], a[2], a[3]);
3805     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3806         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3807     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3808         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3809     default:
3810         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3811         return -TARGET_EINVAL;
3812     }
3813 }
3814 #endif
3815 
3816 #define N_SHM_REGIONS	32
3817 
3818 static struct shm_region {
3819     abi_ulong start;
3820     abi_ulong size;
3821     bool in_use;
3822 } shm_regions[N_SHM_REGIONS];
3823 
3824 #ifndef TARGET_SEMID64_DS
3825 /* asm-generic version of this struct */
3826 struct target_semid64_ds
3827 {
3828   struct target_ipc_perm sem_perm;
3829   abi_ulong sem_otime;
3830 #if TARGET_ABI_BITS == 32
3831   abi_ulong __unused1;
3832 #endif
3833   abi_ulong sem_ctime;
3834 #if TARGET_ABI_BITS == 32
3835   abi_ulong __unused2;
3836 #endif
3837   abi_ulong sem_nsems;
3838   abi_ulong __unused3;
3839   abi_ulong __unused4;
3840 };
3841 #endif
3842 
3843 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3844                                                abi_ulong target_addr)
3845 {
3846     struct target_ipc_perm *target_ip;
3847     struct target_semid64_ds *target_sd;
3848 
3849     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3850         return -TARGET_EFAULT;
3851     target_ip = &(target_sd->sem_perm);
3852     host_ip->__key = tswap32(target_ip->__key);
3853     host_ip->uid = tswap32(target_ip->uid);
3854     host_ip->gid = tswap32(target_ip->gid);
3855     host_ip->cuid = tswap32(target_ip->cuid);
3856     host_ip->cgid = tswap32(target_ip->cgid);
3857 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3858     host_ip->mode = tswap32(target_ip->mode);
3859 #else
3860     host_ip->mode = tswap16(target_ip->mode);
3861 #endif
3862 #if defined(TARGET_PPC)
3863     host_ip->__seq = tswap32(target_ip->__seq);
3864 #else
3865     host_ip->__seq = tswap16(target_ip->__seq);
3866 #endif
3867     unlock_user_struct(target_sd, target_addr, 0);
3868     return 0;
3869 }
3870 
3871 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3872                                                struct ipc_perm *host_ip)
3873 {
3874     struct target_ipc_perm *target_ip;
3875     struct target_semid64_ds *target_sd;
3876 
3877     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3878         return -TARGET_EFAULT;
3879     target_ip = &(target_sd->sem_perm);
3880     target_ip->__key = tswap32(host_ip->__key);
3881     target_ip->uid = tswap32(host_ip->uid);
3882     target_ip->gid = tswap32(host_ip->gid);
3883     target_ip->cuid = tswap32(host_ip->cuid);
3884     target_ip->cgid = tswap32(host_ip->cgid);
3885 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3886     target_ip->mode = tswap32(host_ip->mode);
3887 #else
3888     target_ip->mode = tswap16(host_ip->mode);
3889 #endif
3890 #if defined(TARGET_PPC)
3891     target_ip->__seq = tswap32(host_ip->__seq);
3892 #else
3893     target_ip->__seq = tswap16(host_ip->__seq);
3894 #endif
3895     unlock_user_struct(target_sd, target_addr, 1);
3896     return 0;
3897 }
3898 
3899 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3900                                                abi_ulong target_addr)
3901 {
3902     struct target_semid64_ds *target_sd;
3903 
3904     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3905         return -TARGET_EFAULT;
3906     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3907         return -TARGET_EFAULT;
3908     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3909     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3910     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3911     unlock_user_struct(target_sd, target_addr, 0);
3912     return 0;
3913 }
3914 
3915 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3916                                                struct semid_ds *host_sd)
3917 {
3918     struct target_semid64_ds *target_sd;
3919 
3920     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3921         return -TARGET_EFAULT;
3922     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3923         return -TARGET_EFAULT;
3924     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3925     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3926     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3927     unlock_user_struct(target_sd, target_addr, 1);
3928     return 0;
3929 }
3930 
3931 struct target_seminfo {
3932     int semmap;
3933     int semmni;
3934     int semmns;
3935     int semmnu;
3936     int semmsl;
3937     int semopm;
3938     int semume;
3939     int semusz;
3940     int semvmx;
3941     int semaem;
3942 };
3943 
3944 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3945                                               struct seminfo *host_seminfo)
3946 {
3947     struct target_seminfo *target_seminfo;
3948     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3949         return -TARGET_EFAULT;
3950     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3951     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3952     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3953     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3954     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3955     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3956     __put_user(host_seminfo->semume, &target_seminfo->semume);
3957     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3958     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3959     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3960     unlock_user_struct(target_seminfo, target_addr, 1);
3961     return 0;
3962 }
3963 
3964 union semun {
3965 	int val;
3966 	struct semid_ds *buf;
3967 	unsigned short *array;
3968 	struct seminfo *__buf;
3969 };
3970 
3971 union target_semun {
3972 	int val;
3973 	abi_ulong buf;
3974 	abi_ulong array;
3975 	abi_ulong __buf;
3976 };
3977 
3978 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3979                                                abi_ulong target_addr)
3980 {
3981     int nsems;
3982     unsigned short *array;
3983     union semun semun;
3984     struct semid_ds semid_ds;
3985     int i, ret;
3986 
3987     semun.buf = &semid_ds;
3988 
3989     ret = semctl(semid, 0, IPC_STAT, semun);
3990     if (ret == -1)
3991         return get_errno(ret);
3992 
3993     nsems = semid_ds.sem_nsems;
3994 
3995     *host_array = g_try_new(unsigned short, nsems);
3996     if (!*host_array) {
3997         return -TARGET_ENOMEM;
3998     }
3999     array = lock_user(VERIFY_READ, target_addr,
4000                       nsems*sizeof(unsigned short), 1);
4001     if (!array) {
4002         g_free(*host_array);
4003         return -TARGET_EFAULT;
4004     }
4005 
4006     for(i=0; i<nsems; i++) {
4007         __get_user((*host_array)[i], &array[i]);
4008     }
4009     unlock_user(array, target_addr, 0);
4010 
4011     return 0;
4012 }
4013 
4014 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4015                                                unsigned short **host_array)
4016 {
4017     int nsems;
4018     unsigned short *array;
4019     union semun semun;
4020     struct semid_ds semid_ds;
4021     int i, ret;
4022 
4023     semun.buf = &semid_ds;
4024 
4025     ret = semctl(semid, 0, IPC_STAT, semun);
4026     if (ret == -1)
4027         return get_errno(ret);
4028 
4029     nsems = semid_ds.sem_nsems;
4030 
4031     array = lock_user(VERIFY_WRITE, target_addr,
4032                       nsems*sizeof(unsigned short), 0);
4033     if (!array)
4034         return -TARGET_EFAULT;
4035 
4036     for(i=0; i<nsems; i++) {
4037         __put_user((*host_array)[i], &array[i]);
4038     }
4039     g_free(*host_array);
4040     unlock_user(array, target_addr, 1);
4041 
4042     return 0;
4043 }
4044 
4045 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4046                                  abi_ulong target_arg)
4047 {
4048     union target_semun target_su = { .buf = target_arg };
4049     union semun arg;
4050     struct semid_ds dsarg;
4051     unsigned short *array = NULL;
4052     struct seminfo seminfo;
4053     abi_long ret = -TARGET_EINVAL;
4054     abi_long err;
4055     cmd &= 0xff;
4056 
4057     switch( cmd ) {
4058 	case GETVAL:
4059 	case SETVAL:
4060             /* In 64 bit cross-endian situations, we will erroneously pick up
4061              * the wrong half of the union for the "val" element.  To rectify
4062              * this, the entire 8-byte structure is byteswapped, followed by
4063 	     * a swap of the 4 byte val field. In other cases, the data is
4064 	     * already in proper host byte order. */
4065 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4066 		target_su.buf = tswapal(target_su.buf);
4067 		arg.val = tswap32(target_su.val);
4068 	    } else {
4069 		arg.val = target_su.val;
4070 	    }
4071             ret = get_errno(semctl(semid, semnum, cmd, arg));
4072             break;
4073 	case GETALL:
4074 	case SETALL:
4075             err = target_to_host_semarray(semid, &array, target_su.array);
4076             if (err)
4077                 return err;
4078             arg.array = array;
4079             ret = get_errno(semctl(semid, semnum, cmd, arg));
4080             err = host_to_target_semarray(semid, target_su.array, &array);
4081             if (err)
4082                 return err;
4083             break;
4084 	case IPC_STAT:
4085 	case IPC_SET:
4086 	case SEM_STAT:
4087             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4088             if (err)
4089                 return err;
4090             arg.buf = &dsarg;
4091             ret = get_errno(semctl(semid, semnum, cmd, arg));
4092             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4093             if (err)
4094                 return err;
4095             break;
4096 	case IPC_INFO:
4097 	case SEM_INFO:
4098             arg.__buf = &seminfo;
4099             ret = get_errno(semctl(semid, semnum, cmd, arg));
4100             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4101             if (err)
4102                 return err;
4103             break;
4104 	case IPC_RMID:
4105 	case GETPID:
4106 	case GETNCNT:
4107 	case GETZCNT:
4108             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4109             break;
4110     }
4111 
4112     return ret;
4113 }
4114 
4115 struct target_sembuf {
4116     unsigned short sem_num;
4117     short sem_op;
4118     short sem_flg;
4119 };
4120 
4121 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4122                                              abi_ulong target_addr,
4123                                              unsigned nsops)
4124 {
4125     struct target_sembuf *target_sembuf;
4126     int i;
4127 
4128     target_sembuf = lock_user(VERIFY_READ, target_addr,
4129                               nsops*sizeof(struct target_sembuf), 1);
4130     if (!target_sembuf)
4131         return -TARGET_EFAULT;
4132 
4133     for(i=0; i<nsops; i++) {
4134         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4135         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4136         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4137     }
4138 
4139     unlock_user(target_sembuf, target_addr, 0);
4140 
4141     return 0;
4142 }
4143 
4144 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4145     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4146 
4147 /*
4148  * This macro is required to handle the s390 variants, which passes the
4149  * arguments in a different order than default.
4150  */
4151 #ifdef __s390x__
4152 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4153   (__nsops), (__timeout), (__sops)
4154 #else
4155 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4156   (__nsops), 0, (__sops), (__timeout)
4157 #endif
4158 
4159 static inline abi_long do_semtimedop(int semid,
4160                                      abi_long ptr,
4161                                      unsigned nsops,
4162                                      abi_long timeout, bool time64)
4163 {
4164     struct sembuf *sops;
4165     struct timespec ts, *pts = NULL;
4166     abi_long ret;
4167 
4168     if (timeout) {
4169         pts = &ts;
4170         if (time64) {
4171             if (target_to_host_timespec64(pts, timeout)) {
4172                 return -TARGET_EFAULT;
4173             }
4174         } else {
4175             if (target_to_host_timespec(pts, timeout)) {
4176                 return -TARGET_EFAULT;
4177             }
4178         }
4179     }
4180 
4181     if (nsops > TARGET_SEMOPM) {
4182         return -TARGET_E2BIG;
4183     }
4184 
4185     sops = g_new(struct sembuf, nsops);
4186 
4187     if (target_to_host_sembuf(sops, ptr, nsops)) {
4188         g_free(sops);
4189         return -TARGET_EFAULT;
4190     }
4191 
4192     ret = -TARGET_ENOSYS;
4193 #ifdef __NR_semtimedop
4194     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4195 #endif
4196 #ifdef __NR_ipc
4197     if (ret == -TARGET_ENOSYS) {
4198         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4199                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4200     }
4201 #endif
4202     g_free(sops);
4203     return ret;
4204 }
4205 #endif
4206 
4207 struct target_msqid_ds
4208 {
4209     struct target_ipc_perm msg_perm;
4210     abi_ulong msg_stime;
4211 #if TARGET_ABI_BITS == 32
4212     abi_ulong __unused1;
4213 #endif
4214     abi_ulong msg_rtime;
4215 #if TARGET_ABI_BITS == 32
4216     abi_ulong __unused2;
4217 #endif
4218     abi_ulong msg_ctime;
4219 #if TARGET_ABI_BITS == 32
4220     abi_ulong __unused3;
4221 #endif
4222     abi_ulong __msg_cbytes;
4223     abi_ulong msg_qnum;
4224     abi_ulong msg_qbytes;
4225     abi_ulong msg_lspid;
4226     abi_ulong msg_lrpid;
4227     abi_ulong __unused4;
4228     abi_ulong __unused5;
4229 };
4230 
4231 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4232                                                abi_ulong target_addr)
4233 {
4234     struct target_msqid_ds *target_md;
4235 
4236     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4237         return -TARGET_EFAULT;
4238     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4239         return -TARGET_EFAULT;
4240     host_md->msg_stime = tswapal(target_md->msg_stime);
4241     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4242     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4243     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4244     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4245     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4246     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4247     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4248     unlock_user_struct(target_md, target_addr, 0);
4249     return 0;
4250 }
4251 
4252 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4253                                                struct msqid_ds *host_md)
4254 {
4255     struct target_msqid_ds *target_md;
4256 
4257     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4258         return -TARGET_EFAULT;
4259     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4260         return -TARGET_EFAULT;
4261     target_md->msg_stime = tswapal(host_md->msg_stime);
4262     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4263     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4264     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4265     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4266     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4267     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4268     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4269     unlock_user_struct(target_md, target_addr, 1);
4270     return 0;
4271 }
4272 
4273 struct target_msginfo {
4274     int msgpool;
4275     int msgmap;
4276     int msgmax;
4277     int msgmnb;
4278     int msgmni;
4279     int msgssz;
4280     int msgtql;
4281     unsigned short int msgseg;
4282 };
4283 
4284 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4285                                               struct msginfo *host_msginfo)
4286 {
4287     struct target_msginfo *target_msginfo;
4288     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4289         return -TARGET_EFAULT;
4290     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4291     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4292     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4293     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4294     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4295     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4296     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4297     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4298     unlock_user_struct(target_msginfo, target_addr, 1);
4299     return 0;
4300 }
4301 
4302 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4303 {
4304     struct msqid_ds dsarg;
4305     struct msginfo msginfo;
4306     abi_long ret = -TARGET_EINVAL;
4307 
4308     cmd &= 0xff;
4309 
4310     switch (cmd) {
4311     case IPC_STAT:
4312     case IPC_SET:
4313     case MSG_STAT:
4314         if (target_to_host_msqid_ds(&dsarg,ptr))
4315             return -TARGET_EFAULT;
4316         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4317         if (host_to_target_msqid_ds(ptr,&dsarg))
4318             return -TARGET_EFAULT;
4319         break;
4320     case IPC_RMID:
4321         ret = get_errno(msgctl(msgid, cmd, NULL));
4322         break;
4323     case IPC_INFO:
4324     case MSG_INFO:
4325         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4326         if (host_to_target_msginfo(ptr, &msginfo))
4327             return -TARGET_EFAULT;
4328         break;
4329     }
4330 
4331     return ret;
4332 }
4333 
4334 struct target_msgbuf {
4335     abi_long mtype;
4336     char	mtext[1];
4337 };
4338 
4339 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4340                                  ssize_t msgsz, int msgflg)
4341 {
4342     struct target_msgbuf *target_mb;
4343     struct msgbuf *host_mb;
4344     abi_long ret = 0;
4345 
4346     if (msgsz < 0) {
4347         return -TARGET_EINVAL;
4348     }
4349 
4350     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4351         return -TARGET_EFAULT;
4352     host_mb = g_try_malloc(msgsz + sizeof(long));
4353     if (!host_mb) {
4354         unlock_user_struct(target_mb, msgp, 0);
4355         return -TARGET_ENOMEM;
4356     }
4357     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4358     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4359     ret = -TARGET_ENOSYS;
4360 #ifdef __NR_msgsnd
4361     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4362 #endif
4363 #ifdef __NR_ipc
4364     if (ret == -TARGET_ENOSYS) {
4365 #ifdef __s390x__
4366         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4367                                  host_mb));
4368 #else
4369         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4370                                  host_mb, 0));
4371 #endif
4372     }
4373 #endif
4374     g_free(host_mb);
4375     unlock_user_struct(target_mb, msgp, 0);
4376 
4377     return ret;
4378 }
4379 
4380 #ifdef __NR_ipc
4381 #if defined(__sparc__)
4382 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4383 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4384 #elif defined(__s390x__)
4385 /* The s390 sys_ipc variant has only five parameters.  */
4386 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4387     ((long int[]){(long int)__msgp, __msgtyp})
4388 #else
4389 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4390     ((long int[]){(long int)__msgp, __msgtyp}), 0
4391 #endif
4392 #endif
4393 
4394 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4395                                  ssize_t msgsz, abi_long msgtyp,
4396                                  int msgflg)
4397 {
4398     struct target_msgbuf *target_mb;
4399     char *target_mtext;
4400     struct msgbuf *host_mb;
4401     abi_long ret = 0;
4402 
4403     if (msgsz < 0) {
4404         return -TARGET_EINVAL;
4405     }
4406 
4407     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4408         return -TARGET_EFAULT;
4409 
4410     host_mb = g_try_malloc(msgsz + sizeof(long));
4411     if (!host_mb) {
4412         ret = -TARGET_ENOMEM;
4413         goto end;
4414     }
4415     ret = -TARGET_ENOSYS;
4416 #ifdef __NR_msgrcv
4417     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4418 #endif
4419 #ifdef __NR_ipc
4420     if (ret == -TARGET_ENOSYS) {
4421         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4422                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4423     }
4424 #endif
4425 
4426     if (ret > 0) {
4427         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4428         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4429         if (!target_mtext) {
4430             ret = -TARGET_EFAULT;
4431             goto end;
4432         }
4433         memcpy(target_mb->mtext, host_mb->mtext, ret);
4434         unlock_user(target_mtext, target_mtext_addr, ret);
4435     }
4436 
4437     target_mb->mtype = tswapal(host_mb->mtype);
4438 
4439 end:
4440     if (target_mb)
4441         unlock_user_struct(target_mb, msgp, 1);
4442     g_free(host_mb);
4443     return ret;
4444 }
4445 
4446 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4447                                                abi_ulong target_addr)
4448 {
4449     struct target_shmid_ds *target_sd;
4450 
4451     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4452         return -TARGET_EFAULT;
4453     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4454         return -TARGET_EFAULT;
4455     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4456     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4457     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4458     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4459     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4460     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4461     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4462     unlock_user_struct(target_sd, target_addr, 0);
4463     return 0;
4464 }
4465 
4466 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4467                                                struct shmid_ds *host_sd)
4468 {
4469     struct target_shmid_ds *target_sd;
4470 
4471     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4472         return -TARGET_EFAULT;
4473     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4474         return -TARGET_EFAULT;
4475     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4476     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4477     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4478     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4479     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4480     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4481     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4482     unlock_user_struct(target_sd, target_addr, 1);
4483     return 0;
4484 }
4485 
4486 struct  target_shminfo {
4487     abi_ulong shmmax;
4488     abi_ulong shmmin;
4489     abi_ulong shmmni;
4490     abi_ulong shmseg;
4491     abi_ulong shmall;
4492 };
4493 
4494 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4495                                               struct shminfo *host_shminfo)
4496 {
4497     struct target_shminfo *target_shminfo;
4498     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4499         return -TARGET_EFAULT;
4500     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4501     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4502     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4503     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4504     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4505     unlock_user_struct(target_shminfo, target_addr, 1);
4506     return 0;
4507 }
4508 
4509 struct target_shm_info {
4510     int used_ids;
4511     abi_ulong shm_tot;
4512     abi_ulong shm_rss;
4513     abi_ulong shm_swp;
4514     abi_ulong swap_attempts;
4515     abi_ulong swap_successes;
4516 };
4517 
4518 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4519                                                struct shm_info *host_shm_info)
4520 {
4521     struct target_shm_info *target_shm_info;
4522     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4523         return -TARGET_EFAULT;
4524     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4525     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4526     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4527     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4528     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4529     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4530     unlock_user_struct(target_shm_info, target_addr, 1);
4531     return 0;
4532 }
4533 
4534 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4535 {
4536     struct shmid_ds dsarg;
4537     struct shminfo shminfo;
4538     struct shm_info shm_info;
4539     abi_long ret = -TARGET_EINVAL;
4540 
4541     cmd &= 0xff;
4542 
4543     switch(cmd) {
4544     case IPC_STAT:
4545     case IPC_SET:
4546     case SHM_STAT:
4547         if (target_to_host_shmid_ds(&dsarg, buf))
4548             return -TARGET_EFAULT;
4549         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4550         if (host_to_target_shmid_ds(buf, &dsarg))
4551             return -TARGET_EFAULT;
4552         break;
4553     case IPC_INFO:
4554         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4555         if (host_to_target_shminfo(buf, &shminfo))
4556             return -TARGET_EFAULT;
4557         break;
4558     case SHM_INFO:
4559         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4560         if (host_to_target_shm_info(buf, &shm_info))
4561             return -TARGET_EFAULT;
4562         break;
4563     case IPC_RMID:
4564     case SHM_LOCK:
4565     case SHM_UNLOCK:
4566         ret = get_errno(shmctl(shmid, cmd, NULL));
4567         break;
4568     }
4569 
4570     return ret;
4571 }
4572 
4573 #ifndef TARGET_FORCE_SHMLBA
4574 /* For most architectures, SHMLBA is the same as the page size;
4575  * some architectures have larger values, in which case they should
4576  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4577  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4578  * and defining its own value for SHMLBA.
4579  *
4580  * The kernel also permits SHMLBA to be set by the architecture to a
4581  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4582  * this means that addresses are rounded to the large size if
4583  * SHM_RND is set but addresses not aligned to that size are not rejected
4584  * as long as they are at least page-aligned. Since the only architecture
4585  * which uses this is ia64 this code doesn't provide for that oddity.
4586  */
4587 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4588 {
4589     return TARGET_PAGE_SIZE;
4590 }
4591 #endif
4592 
4593 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4594                                  int shmid, abi_ulong shmaddr, int shmflg)
4595 {
4596     abi_long raddr;
4597     void *host_raddr;
4598     struct shmid_ds shm_info;
4599     int i,ret;
4600     abi_ulong shmlba;
4601 
4602     /* find out the length of the shared memory segment */
4603     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4604     if (is_error(ret)) {
4605         /* can't get length, bail out */
4606         return ret;
4607     }
4608 
4609     shmlba = target_shmlba(cpu_env);
4610 
4611     if (shmaddr & (shmlba - 1)) {
4612         if (shmflg & SHM_RND) {
4613             shmaddr &= ~(shmlba - 1);
4614         } else {
4615             return -TARGET_EINVAL;
4616         }
4617     }
4618     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4619         return -TARGET_EINVAL;
4620     }
4621 
4622     mmap_lock();
4623 
4624     if (shmaddr)
4625         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4626     else {
4627         abi_ulong mmap_start;
4628 
4629         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4630         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4631 
4632         if (mmap_start == -1) {
4633             errno = ENOMEM;
4634             host_raddr = (void *)-1;
4635         } else
4636             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4637     }
4638 
4639     if (host_raddr == (void *)-1) {
4640         mmap_unlock();
4641         return get_errno((long)host_raddr);
4642     }
4643     raddr=h2g((unsigned long)host_raddr);
4644 
4645     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4646                    PAGE_VALID | PAGE_READ |
4647                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4648 
4649     for (i = 0; i < N_SHM_REGIONS; i++) {
4650         if (!shm_regions[i].in_use) {
4651             shm_regions[i].in_use = true;
4652             shm_regions[i].start = raddr;
4653             shm_regions[i].size = shm_info.shm_segsz;
4654             break;
4655         }
4656     }
4657 
4658     mmap_unlock();
4659     return raddr;
4660 
4661 }
4662 
4663 static inline abi_long do_shmdt(abi_ulong shmaddr)
4664 {
4665     int i;
4666     abi_long rv;
4667 
4668     mmap_lock();
4669 
4670     for (i = 0; i < N_SHM_REGIONS; ++i) {
4671         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4672             shm_regions[i].in_use = false;
4673             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4674             break;
4675         }
4676     }
4677     rv = get_errno(shmdt(g2h(shmaddr)));
4678 
4679     mmap_unlock();
4680 
4681     return rv;
4682 }
4683 
4684 #ifdef TARGET_NR_ipc
4685 /* ??? This only works with linear mappings.  */
4686 /* do_ipc() must return target values and target errnos. */
4687 static abi_long do_ipc(CPUArchState *cpu_env,
4688                        unsigned int call, abi_long first,
4689                        abi_long second, abi_long third,
4690                        abi_long ptr, abi_long fifth)
4691 {
4692     int version;
4693     abi_long ret = 0;
4694 
4695     version = call >> 16;
4696     call &= 0xffff;
4697 
4698     switch (call) {
4699     case IPCOP_semop:
4700         ret = do_semtimedop(first, ptr, second, 0, false);
4701         break;
4702     case IPCOP_semtimedop:
4703     /*
4704      * The s390 sys_ipc variant has only five parameters instead of six
4705      * (as for default variant) and the only difference is the handling of
4706      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4707      * to a struct timespec where the generic variant uses fifth parameter.
4708      */
4709 #if defined(TARGET_S390X)
4710         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4711 #else
4712         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4713 #endif
4714         break;
4715 
4716     case IPCOP_semget:
4717         ret = get_errno(semget(first, second, third));
4718         break;
4719 
4720     case IPCOP_semctl: {
4721         /* The semun argument to semctl is passed by value, so dereference the
4722          * ptr argument. */
4723         abi_ulong atptr;
4724         get_user_ual(atptr, ptr);
4725         ret = do_semctl(first, second, third, atptr);
4726         break;
4727     }
4728 
4729     case IPCOP_msgget:
4730         ret = get_errno(msgget(first, second));
4731         break;
4732 
4733     case IPCOP_msgsnd:
4734         ret = do_msgsnd(first, ptr, second, third);
4735         break;
4736 
4737     case IPCOP_msgctl:
4738         ret = do_msgctl(first, second, ptr);
4739         break;
4740 
4741     case IPCOP_msgrcv:
4742         switch (version) {
4743         case 0:
4744             {
4745                 struct target_ipc_kludge {
4746                     abi_long msgp;
4747                     abi_long msgtyp;
4748                 } *tmp;
4749 
4750                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4751                     ret = -TARGET_EFAULT;
4752                     break;
4753                 }
4754 
4755                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4756 
4757                 unlock_user_struct(tmp, ptr, 0);
4758                 break;
4759             }
4760         default:
4761             ret = do_msgrcv(first, ptr, second, fifth, third);
4762         }
4763         break;
4764 
4765     case IPCOP_shmat:
4766         switch (version) {
4767         default:
4768         {
4769             abi_ulong raddr;
4770             raddr = do_shmat(cpu_env, first, ptr, second);
4771             if (is_error(raddr))
4772                 return get_errno(raddr);
4773             if (put_user_ual(raddr, third))
4774                 return -TARGET_EFAULT;
4775             break;
4776         }
4777         case 1:
4778             ret = -TARGET_EINVAL;
4779             break;
4780         }
4781 	break;
4782     case IPCOP_shmdt:
4783         ret = do_shmdt(ptr);
4784 	break;
4785 
4786     case IPCOP_shmget:
4787 	/* IPC_* flag values are the same on all linux platforms */
4788 	ret = get_errno(shmget(first, second, third));
4789 	break;
4790 
4791 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4792     case IPCOP_shmctl:
4793         ret = do_shmctl(first, second, ptr);
4794         break;
4795     default:
4796         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4797                       call, version);
4798 	ret = -TARGET_ENOSYS;
4799 	break;
4800     }
4801     return ret;
4802 }
4803 #endif
4804 
4805 /* kernel structure types definitions */
4806 
4807 #define STRUCT(name, ...) STRUCT_ ## name,
4808 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4809 enum {
4810 #include "syscall_types.h"
4811 STRUCT_MAX
4812 };
4813 #undef STRUCT
4814 #undef STRUCT_SPECIAL
4815 
4816 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4817 #define STRUCT_SPECIAL(name)
4818 #include "syscall_types.h"
4819 #undef STRUCT
4820 #undef STRUCT_SPECIAL
4821 
4822 #define MAX_STRUCT_SIZE 4096
4823 
4824 #ifdef CONFIG_FIEMAP
4825 /* So fiemap access checks don't overflow on 32 bit systems.
4826  * This is very slightly smaller than the limit imposed by
4827  * the underlying kernel.
4828  */
4829 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4830                             / sizeof(struct fiemap_extent))
4831 
4832 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4833                                        int fd, int cmd, abi_long arg)
4834 {
4835     /* The parameter for this ioctl is a struct fiemap followed
4836      * by an array of struct fiemap_extent whose size is set
4837      * in fiemap->fm_extent_count. The array is filled in by the
4838      * ioctl.
4839      */
4840     int target_size_in, target_size_out;
4841     struct fiemap *fm;
4842     const argtype *arg_type = ie->arg_type;
4843     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4844     void *argptr, *p;
4845     abi_long ret;
4846     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4847     uint32_t outbufsz;
4848     int free_fm = 0;
4849 
4850     assert(arg_type[0] == TYPE_PTR);
4851     assert(ie->access == IOC_RW);
4852     arg_type++;
4853     target_size_in = thunk_type_size(arg_type, 0);
4854     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4855     if (!argptr) {
4856         return -TARGET_EFAULT;
4857     }
4858     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4859     unlock_user(argptr, arg, 0);
4860     fm = (struct fiemap *)buf_temp;
4861     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4862         return -TARGET_EINVAL;
4863     }
4864 
4865     outbufsz = sizeof (*fm) +
4866         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4867 
4868     if (outbufsz > MAX_STRUCT_SIZE) {
4869         /* We can't fit all the extents into the fixed size buffer.
4870          * Allocate one that is large enough and use it instead.
4871          */
4872         fm = g_try_malloc(outbufsz);
4873         if (!fm) {
4874             return -TARGET_ENOMEM;
4875         }
4876         memcpy(fm, buf_temp, sizeof(struct fiemap));
4877         free_fm = 1;
4878     }
4879     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4880     if (!is_error(ret)) {
4881         target_size_out = target_size_in;
4882         /* An extent_count of 0 means we were only counting the extents
4883          * so there are no structs to copy
4884          */
4885         if (fm->fm_extent_count != 0) {
4886             target_size_out += fm->fm_mapped_extents * extent_size;
4887         }
4888         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4889         if (!argptr) {
4890             ret = -TARGET_EFAULT;
4891         } else {
4892             /* Convert the struct fiemap */
4893             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4894             if (fm->fm_extent_count != 0) {
4895                 p = argptr + target_size_in;
4896                 /* ...and then all the struct fiemap_extents */
4897                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4898                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4899                                   THUNK_TARGET);
4900                     p += extent_size;
4901                 }
4902             }
4903             unlock_user(argptr, arg, target_size_out);
4904         }
4905     }
4906     if (free_fm) {
4907         g_free(fm);
4908     }
4909     return ret;
4910 }
4911 #endif
4912 
4913 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4914                                 int fd, int cmd, abi_long arg)
4915 {
4916     const argtype *arg_type = ie->arg_type;
4917     int target_size;
4918     void *argptr;
4919     int ret;
4920     struct ifconf *host_ifconf;
4921     uint32_t outbufsz;
4922     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4923     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4924     int target_ifreq_size;
4925     int nb_ifreq;
4926     int free_buf = 0;
4927     int i;
4928     int target_ifc_len;
4929     abi_long target_ifc_buf;
4930     int host_ifc_len;
4931     char *host_ifc_buf;
4932 
4933     assert(arg_type[0] == TYPE_PTR);
4934     assert(ie->access == IOC_RW);
4935 
4936     arg_type++;
4937     target_size = thunk_type_size(arg_type, 0);
4938 
4939     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4940     if (!argptr)
4941         return -TARGET_EFAULT;
4942     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4943     unlock_user(argptr, arg, 0);
4944 
4945     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4946     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4947     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4948 
4949     if (target_ifc_buf != 0) {
4950         target_ifc_len = host_ifconf->ifc_len;
4951         nb_ifreq = target_ifc_len / target_ifreq_size;
4952         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4953 
4954         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4955         if (outbufsz > MAX_STRUCT_SIZE) {
4956             /*
4957              * We can't fit all the extents into the fixed size buffer.
4958              * Allocate one that is large enough and use it instead.
4959              */
4960             host_ifconf = malloc(outbufsz);
4961             if (!host_ifconf) {
4962                 return -TARGET_ENOMEM;
4963             }
4964             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4965             free_buf = 1;
4966         }
4967         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4968 
4969         host_ifconf->ifc_len = host_ifc_len;
4970     } else {
4971       host_ifc_buf = NULL;
4972     }
4973     host_ifconf->ifc_buf = host_ifc_buf;
4974 
4975     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4976     if (!is_error(ret)) {
4977 	/* convert host ifc_len to target ifc_len */
4978 
4979         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4980         target_ifc_len = nb_ifreq * target_ifreq_size;
4981         host_ifconf->ifc_len = target_ifc_len;
4982 
4983 	/* restore target ifc_buf */
4984 
4985         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4986 
4987 	/* copy struct ifconf to target user */
4988 
4989         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4990         if (!argptr)
4991             return -TARGET_EFAULT;
4992         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4993         unlock_user(argptr, arg, target_size);
4994 
4995         if (target_ifc_buf != 0) {
4996             /* copy ifreq[] to target user */
4997             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4998             for (i = 0; i < nb_ifreq ; i++) {
4999                 thunk_convert(argptr + i * target_ifreq_size,
5000                               host_ifc_buf + i * sizeof(struct ifreq),
5001                               ifreq_arg_type, THUNK_TARGET);
5002             }
5003             unlock_user(argptr, target_ifc_buf, target_ifc_len);
5004         }
5005     }
5006 
5007     if (free_buf) {
5008         free(host_ifconf);
5009     }
5010 
5011     return ret;
5012 }
5013 
5014 #if defined(CONFIG_USBFS)
5015 #if HOST_LONG_BITS > 64
5016 #error USBDEVFS thunks do not support >64 bit hosts yet.
5017 #endif
5018 struct live_urb {
5019     uint64_t target_urb_adr;
5020     uint64_t target_buf_adr;
5021     char *target_buf_ptr;
5022     struct usbdevfs_urb host_urb;
5023 };
5024 
5025 static GHashTable *usbdevfs_urb_hashtable(void)
5026 {
5027     static GHashTable *urb_hashtable;
5028 
5029     if (!urb_hashtable) {
5030         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5031     }
5032     return urb_hashtable;
5033 }
5034 
5035 static void urb_hashtable_insert(struct live_urb *urb)
5036 {
5037     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5038     g_hash_table_insert(urb_hashtable, urb, urb);
5039 }
5040 
5041 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5042 {
5043     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5044     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5045 }
5046 
5047 static void urb_hashtable_remove(struct live_urb *urb)
5048 {
5049     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5050     g_hash_table_remove(urb_hashtable, urb);
5051 }
5052 
5053 static abi_long
5054 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5055                           int fd, int cmd, abi_long arg)
5056 {
5057     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5058     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5059     struct live_urb *lurb;
5060     void *argptr;
5061     uint64_t hurb;
5062     int target_size;
5063     uintptr_t target_urb_adr;
5064     abi_long ret;
5065 
5066     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5067 
5068     memset(buf_temp, 0, sizeof(uint64_t));
5069     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5070     if (is_error(ret)) {
5071         return ret;
5072     }
5073 
5074     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5075     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5076     if (!lurb->target_urb_adr) {
5077         return -TARGET_EFAULT;
5078     }
5079     urb_hashtable_remove(lurb);
5080     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5081         lurb->host_urb.buffer_length);
5082     lurb->target_buf_ptr = NULL;
5083 
5084     /* restore the guest buffer pointer */
5085     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5086 
5087     /* update the guest urb struct */
5088     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5089     if (!argptr) {
5090         g_free(lurb);
5091         return -TARGET_EFAULT;
5092     }
5093     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5094     unlock_user(argptr, lurb->target_urb_adr, target_size);
5095 
5096     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5097     /* write back the urb handle */
5098     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5099     if (!argptr) {
5100         g_free(lurb);
5101         return -TARGET_EFAULT;
5102     }
5103 
5104     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5105     target_urb_adr = lurb->target_urb_adr;
5106     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5107     unlock_user(argptr, arg, target_size);
5108 
5109     g_free(lurb);
5110     return ret;
5111 }
5112 
5113 static abi_long
5114 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5115                              uint8_t *buf_temp __attribute__((unused)),
5116                              int fd, int cmd, abi_long arg)
5117 {
5118     struct live_urb *lurb;
5119 
5120     /* map target address back to host URB with metadata. */
5121     lurb = urb_hashtable_lookup(arg);
5122     if (!lurb) {
5123         return -TARGET_EFAULT;
5124     }
5125     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5126 }
5127 
5128 static abi_long
5129 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5130                             int fd, int cmd, abi_long arg)
5131 {
5132     const argtype *arg_type = ie->arg_type;
5133     int target_size;
5134     abi_long ret;
5135     void *argptr;
5136     int rw_dir;
5137     struct live_urb *lurb;
5138 
5139     /*
5140      * each submitted URB needs to map to a unique ID for the
5141      * kernel, and that unique ID needs to be a pointer to
5142      * host memory.  hence, we need to malloc for each URB.
5143      * isochronous transfers have a variable length struct.
5144      */
5145     arg_type++;
5146     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5147 
5148     /* construct host copy of urb and metadata */
5149     lurb = g_try_malloc0(sizeof(struct live_urb));
5150     if (!lurb) {
5151         return -TARGET_ENOMEM;
5152     }
5153 
5154     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5155     if (!argptr) {
5156         g_free(lurb);
5157         return -TARGET_EFAULT;
5158     }
5159     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5160     unlock_user(argptr, arg, 0);
5161 
5162     lurb->target_urb_adr = arg;
5163     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5164 
5165     /* buffer space used depends on endpoint type so lock the entire buffer */
5166     /* control type urbs should check the buffer contents for true direction */
5167     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5168     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5169         lurb->host_urb.buffer_length, 1);
5170     if (lurb->target_buf_ptr == NULL) {
5171         g_free(lurb);
5172         return -TARGET_EFAULT;
5173     }
5174 
5175     /* update buffer pointer in host copy */
5176     lurb->host_urb.buffer = lurb->target_buf_ptr;
5177 
5178     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5179     if (is_error(ret)) {
5180         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5181         g_free(lurb);
5182     } else {
5183         urb_hashtable_insert(lurb);
5184     }
5185 
5186     return ret;
5187 }
5188 #endif /* CONFIG_USBFS */
5189 
5190 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5191                             int cmd, abi_long arg)
5192 {
5193     void *argptr;
5194     struct dm_ioctl *host_dm;
5195     abi_long guest_data;
5196     uint32_t guest_data_size;
5197     int target_size;
5198     const argtype *arg_type = ie->arg_type;
5199     abi_long ret;
5200     void *big_buf = NULL;
5201     char *host_data;
5202 
5203     arg_type++;
5204     target_size = thunk_type_size(arg_type, 0);
5205     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5206     if (!argptr) {
5207         ret = -TARGET_EFAULT;
5208         goto out;
5209     }
5210     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5211     unlock_user(argptr, arg, 0);
5212 
5213     /* buf_temp is too small, so fetch things into a bigger buffer */
5214     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5215     memcpy(big_buf, buf_temp, target_size);
5216     buf_temp = big_buf;
5217     host_dm = big_buf;
5218 
5219     guest_data = arg + host_dm->data_start;
5220     if ((guest_data - arg) < 0) {
5221         ret = -TARGET_EINVAL;
5222         goto out;
5223     }
5224     guest_data_size = host_dm->data_size - host_dm->data_start;
5225     host_data = (char*)host_dm + host_dm->data_start;
5226 
5227     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5228     if (!argptr) {
5229         ret = -TARGET_EFAULT;
5230         goto out;
5231     }
5232 
5233     switch (ie->host_cmd) {
5234     case DM_REMOVE_ALL:
5235     case DM_LIST_DEVICES:
5236     case DM_DEV_CREATE:
5237     case DM_DEV_REMOVE:
5238     case DM_DEV_SUSPEND:
5239     case DM_DEV_STATUS:
5240     case DM_DEV_WAIT:
5241     case DM_TABLE_STATUS:
5242     case DM_TABLE_CLEAR:
5243     case DM_TABLE_DEPS:
5244     case DM_LIST_VERSIONS:
5245         /* no input data */
5246         break;
5247     case DM_DEV_RENAME:
5248     case DM_DEV_SET_GEOMETRY:
5249         /* data contains only strings */
5250         memcpy(host_data, argptr, guest_data_size);
5251         break;
5252     case DM_TARGET_MSG:
5253         memcpy(host_data, argptr, guest_data_size);
5254         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5255         break;
5256     case DM_TABLE_LOAD:
5257     {
5258         void *gspec = argptr;
5259         void *cur_data = host_data;
5260         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5261         int spec_size = thunk_type_size(arg_type, 0);
5262         int i;
5263 
5264         for (i = 0; i < host_dm->target_count; i++) {
5265             struct dm_target_spec *spec = cur_data;
5266             uint32_t next;
5267             int slen;
5268 
5269             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5270             slen = strlen((char*)gspec + spec_size) + 1;
5271             next = spec->next;
5272             spec->next = sizeof(*spec) + slen;
5273             strcpy((char*)&spec[1], gspec + spec_size);
5274             gspec += next;
5275             cur_data += spec->next;
5276         }
5277         break;
5278     }
5279     default:
5280         ret = -TARGET_EINVAL;
5281         unlock_user(argptr, guest_data, 0);
5282         goto out;
5283     }
5284     unlock_user(argptr, guest_data, 0);
5285 
5286     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5287     if (!is_error(ret)) {
5288         guest_data = arg + host_dm->data_start;
5289         guest_data_size = host_dm->data_size - host_dm->data_start;
5290         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5291         switch (ie->host_cmd) {
5292         case DM_REMOVE_ALL:
5293         case DM_DEV_CREATE:
5294         case DM_DEV_REMOVE:
5295         case DM_DEV_RENAME:
5296         case DM_DEV_SUSPEND:
5297         case DM_DEV_STATUS:
5298         case DM_TABLE_LOAD:
5299         case DM_TABLE_CLEAR:
5300         case DM_TARGET_MSG:
5301         case DM_DEV_SET_GEOMETRY:
5302             /* no return data */
5303             break;
5304         case DM_LIST_DEVICES:
5305         {
5306             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5307             uint32_t remaining_data = guest_data_size;
5308             void *cur_data = argptr;
5309             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5310             int nl_size = 12; /* can't use thunk_size due to alignment */
5311 
5312             while (1) {
5313                 uint32_t next = nl->next;
5314                 if (next) {
5315                     nl->next = nl_size + (strlen(nl->name) + 1);
5316                 }
5317                 if (remaining_data < nl->next) {
5318                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5319                     break;
5320                 }
5321                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5322                 strcpy(cur_data + nl_size, nl->name);
5323                 cur_data += nl->next;
5324                 remaining_data -= nl->next;
5325                 if (!next) {
5326                     break;
5327                 }
5328                 nl = (void*)nl + next;
5329             }
5330             break;
5331         }
5332         case DM_DEV_WAIT:
5333         case DM_TABLE_STATUS:
5334         {
5335             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5336             void *cur_data = argptr;
5337             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5338             int spec_size = thunk_type_size(arg_type, 0);
5339             int i;
5340 
5341             for (i = 0; i < host_dm->target_count; i++) {
5342                 uint32_t next = spec->next;
5343                 int slen = strlen((char*)&spec[1]) + 1;
5344                 spec->next = (cur_data - argptr) + spec_size + slen;
5345                 if (guest_data_size < spec->next) {
5346                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5347                     break;
5348                 }
5349                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5350                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5351                 cur_data = argptr + spec->next;
5352                 spec = (void*)host_dm + host_dm->data_start + next;
5353             }
5354             break;
5355         }
5356         case DM_TABLE_DEPS:
5357         {
5358             void *hdata = (void*)host_dm + host_dm->data_start;
5359             int count = *(uint32_t*)hdata;
5360             uint64_t *hdev = hdata + 8;
5361             uint64_t *gdev = argptr + 8;
5362             int i;
5363 
5364             *(uint32_t*)argptr = tswap32(count);
5365             for (i = 0; i < count; i++) {
5366                 *gdev = tswap64(*hdev);
5367                 gdev++;
5368                 hdev++;
5369             }
5370             break;
5371         }
5372         case DM_LIST_VERSIONS:
5373         {
5374             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5375             uint32_t remaining_data = guest_data_size;
5376             void *cur_data = argptr;
5377             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5378             int vers_size = thunk_type_size(arg_type, 0);
5379 
5380             while (1) {
5381                 uint32_t next = vers->next;
5382                 if (next) {
5383                     vers->next = vers_size + (strlen(vers->name) + 1);
5384                 }
5385                 if (remaining_data < vers->next) {
5386                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5387                     break;
5388                 }
5389                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5390                 strcpy(cur_data + vers_size, vers->name);
5391                 cur_data += vers->next;
5392                 remaining_data -= vers->next;
5393                 if (!next) {
5394                     break;
5395                 }
5396                 vers = (void*)vers + next;
5397             }
5398             break;
5399         }
5400         default:
5401             unlock_user(argptr, guest_data, 0);
5402             ret = -TARGET_EINVAL;
5403             goto out;
5404         }
5405         unlock_user(argptr, guest_data, guest_data_size);
5406 
5407         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5408         if (!argptr) {
5409             ret = -TARGET_EFAULT;
5410             goto out;
5411         }
5412         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5413         unlock_user(argptr, arg, target_size);
5414     }
5415 out:
5416     g_free(big_buf);
5417     return ret;
5418 }
5419 
5420 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5421                                int cmd, abi_long arg)
5422 {
5423     void *argptr;
5424     int target_size;
5425     const argtype *arg_type = ie->arg_type;
5426     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5427     abi_long ret;
5428 
5429     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5430     struct blkpg_partition host_part;
5431 
5432     /* Read and convert blkpg */
5433     arg_type++;
5434     target_size = thunk_type_size(arg_type, 0);
5435     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5436     if (!argptr) {
5437         ret = -TARGET_EFAULT;
5438         goto out;
5439     }
5440     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5441     unlock_user(argptr, arg, 0);
5442 
5443     switch (host_blkpg->op) {
5444     case BLKPG_ADD_PARTITION:
5445     case BLKPG_DEL_PARTITION:
5446         /* payload is struct blkpg_partition */
5447         break;
5448     default:
5449         /* Unknown opcode */
5450         ret = -TARGET_EINVAL;
5451         goto out;
5452     }
5453 
5454     /* Read and convert blkpg->data */
5455     arg = (abi_long)(uintptr_t)host_blkpg->data;
5456     target_size = thunk_type_size(part_arg_type, 0);
5457     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5458     if (!argptr) {
5459         ret = -TARGET_EFAULT;
5460         goto out;
5461     }
5462     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5463     unlock_user(argptr, arg, 0);
5464 
5465     /* Swizzle the data pointer to our local copy and call! */
5466     host_blkpg->data = &host_part;
5467     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5468 
5469 out:
5470     return ret;
5471 }
5472 
5473 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5474                                 int fd, int cmd, abi_long arg)
5475 {
5476     const argtype *arg_type = ie->arg_type;
5477     const StructEntry *se;
5478     const argtype *field_types;
5479     const int *dst_offsets, *src_offsets;
5480     int target_size;
5481     void *argptr;
5482     abi_ulong *target_rt_dev_ptr = NULL;
5483     unsigned long *host_rt_dev_ptr = NULL;
5484     abi_long ret;
5485     int i;
5486 
5487     assert(ie->access == IOC_W);
5488     assert(*arg_type == TYPE_PTR);
5489     arg_type++;
5490     assert(*arg_type == TYPE_STRUCT);
5491     target_size = thunk_type_size(arg_type, 0);
5492     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5493     if (!argptr) {
5494         return -TARGET_EFAULT;
5495     }
5496     arg_type++;
5497     assert(*arg_type == (int)STRUCT_rtentry);
5498     se = struct_entries + *arg_type++;
5499     assert(se->convert[0] == NULL);
5500     /* convert struct here to be able to catch rt_dev string */
5501     field_types = se->field_types;
5502     dst_offsets = se->field_offsets[THUNK_HOST];
5503     src_offsets = se->field_offsets[THUNK_TARGET];
5504     for (i = 0; i < se->nb_fields; i++) {
5505         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5506             assert(*field_types == TYPE_PTRVOID);
5507             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5508             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5509             if (*target_rt_dev_ptr != 0) {
5510                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5511                                                   tswapal(*target_rt_dev_ptr));
5512                 if (!*host_rt_dev_ptr) {
5513                     unlock_user(argptr, arg, 0);
5514                     return -TARGET_EFAULT;
5515                 }
5516             } else {
5517                 *host_rt_dev_ptr = 0;
5518             }
5519             field_types++;
5520             continue;
5521         }
5522         field_types = thunk_convert(buf_temp + dst_offsets[i],
5523                                     argptr + src_offsets[i],
5524                                     field_types, THUNK_HOST);
5525     }
5526     unlock_user(argptr, arg, 0);
5527 
5528     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5529 
5530     assert(host_rt_dev_ptr != NULL);
5531     assert(target_rt_dev_ptr != NULL);
5532     if (*host_rt_dev_ptr != 0) {
5533         unlock_user((void *)*host_rt_dev_ptr,
5534                     *target_rt_dev_ptr, 0);
5535     }
5536     return ret;
5537 }
5538 
5539 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5540                                      int fd, int cmd, abi_long arg)
5541 {
5542     int sig = target_to_host_signal(arg);
5543     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5544 }
5545 
5546 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5547                                     int fd, int cmd, abi_long arg)
5548 {
5549     struct timeval tv;
5550     abi_long ret;
5551 
5552     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5553     if (is_error(ret)) {
5554         return ret;
5555     }
5556 
5557     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5558         if (copy_to_user_timeval(arg, &tv)) {
5559             return -TARGET_EFAULT;
5560         }
5561     } else {
5562         if (copy_to_user_timeval64(arg, &tv)) {
5563             return -TARGET_EFAULT;
5564         }
5565     }
5566 
5567     return ret;
5568 }
5569 
5570 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5571                                       int fd, int cmd, abi_long arg)
5572 {
5573     struct timespec ts;
5574     abi_long ret;
5575 
5576     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5577     if (is_error(ret)) {
5578         return ret;
5579     }
5580 
5581     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5582         if (host_to_target_timespec(arg, &ts)) {
5583             return -TARGET_EFAULT;
5584         }
5585     } else{
5586         if (host_to_target_timespec64(arg, &ts)) {
5587             return -TARGET_EFAULT;
5588         }
5589     }
5590 
5591     return ret;
5592 }
5593 
5594 #ifdef TIOCGPTPEER
5595 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5596                                      int fd, int cmd, abi_long arg)
5597 {
5598     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5599     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5600 }
5601 #endif
5602 
5603 #ifdef HAVE_DRM_H
5604 
5605 static void unlock_drm_version(struct drm_version *host_ver,
5606                                struct target_drm_version *target_ver,
5607                                bool copy)
5608 {
5609     unlock_user(host_ver->name, target_ver->name,
5610                                 copy ? host_ver->name_len : 0);
5611     unlock_user(host_ver->date, target_ver->date,
5612                                 copy ? host_ver->date_len : 0);
5613     unlock_user(host_ver->desc, target_ver->desc,
5614                                 copy ? host_ver->desc_len : 0);
5615 }
5616 
5617 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5618                                           struct target_drm_version *target_ver)
5619 {
5620     memset(host_ver, 0, sizeof(*host_ver));
5621 
5622     __get_user(host_ver->name_len, &target_ver->name_len);
5623     if (host_ver->name_len) {
5624         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5625                                    target_ver->name_len, 0);
5626         if (!host_ver->name) {
5627             return -EFAULT;
5628         }
5629     }
5630 
5631     __get_user(host_ver->date_len, &target_ver->date_len);
5632     if (host_ver->date_len) {
5633         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5634                                    target_ver->date_len, 0);
5635         if (!host_ver->date) {
5636             goto err;
5637         }
5638     }
5639 
5640     __get_user(host_ver->desc_len, &target_ver->desc_len);
5641     if (host_ver->desc_len) {
5642         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5643                                    target_ver->desc_len, 0);
5644         if (!host_ver->desc) {
5645             goto err;
5646         }
5647     }
5648 
5649     return 0;
5650 err:
5651     unlock_drm_version(host_ver, target_ver, false);
5652     return -EFAULT;
5653 }
5654 
5655 static inline void host_to_target_drmversion(
5656                                           struct target_drm_version *target_ver,
5657                                           struct drm_version *host_ver)
5658 {
5659     __put_user(host_ver->version_major, &target_ver->version_major);
5660     __put_user(host_ver->version_minor, &target_ver->version_minor);
5661     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5662     __put_user(host_ver->name_len, &target_ver->name_len);
5663     __put_user(host_ver->date_len, &target_ver->date_len);
5664     __put_user(host_ver->desc_len, &target_ver->desc_len);
5665     unlock_drm_version(host_ver, target_ver, true);
5666 }
5667 
5668 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5669                              int fd, int cmd, abi_long arg)
5670 {
5671     struct drm_version *ver;
5672     struct target_drm_version *target_ver;
5673     abi_long ret;
5674 
5675     switch (ie->host_cmd) {
5676     case DRM_IOCTL_VERSION:
5677         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5678             return -TARGET_EFAULT;
5679         }
5680         ver = (struct drm_version *)buf_temp;
5681         ret = target_to_host_drmversion(ver, target_ver);
5682         if (!is_error(ret)) {
5683             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5684             if (is_error(ret)) {
5685                 unlock_drm_version(ver, target_ver, false);
5686             } else {
5687                 host_to_target_drmversion(target_ver, ver);
5688             }
5689         }
5690         unlock_user_struct(target_ver, arg, 0);
5691         return ret;
5692     }
5693     return -TARGET_ENOSYS;
5694 }
5695 
5696 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5697                                            struct drm_i915_getparam *gparam,
5698                                            int fd, abi_long arg)
5699 {
5700     abi_long ret;
5701     int value;
5702     struct target_drm_i915_getparam *target_gparam;
5703 
5704     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5705         return -TARGET_EFAULT;
5706     }
5707 
5708     __get_user(gparam->param, &target_gparam->param);
5709     gparam->value = &value;
5710     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5711     put_user_s32(value, target_gparam->value);
5712 
5713     unlock_user_struct(target_gparam, arg, 0);
5714     return ret;
5715 }
5716 
5717 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5718                                   int fd, int cmd, abi_long arg)
5719 {
5720     switch (ie->host_cmd) {
5721     case DRM_IOCTL_I915_GETPARAM:
5722         return do_ioctl_drm_i915_getparam(ie,
5723                                           (struct drm_i915_getparam *)buf_temp,
5724                                           fd, arg);
5725     default:
5726         return -TARGET_ENOSYS;
5727     }
5728 }
5729 
5730 #endif
5731 
5732 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5733                                         int fd, int cmd, abi_long arg)
5734 {
5735     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5736     struct tun_filter *target_filter;
5737     char *target_addr;
5738 
5739     assert(ie->access == IOC_W);
5740 
5741     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5742     if (!target_filter) {
5743         return -TARGET_EFAULT;
5744     }
5745     filter->flags = tswap16(target_filter->flags);
5746     filter->count = tswap16(target_filter->count);
5747     unlock_user(target_filter, arg, 0);
5748 
5749     if (filter->count) {
5750         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5751             MAX_STRUCT_SIZE) {
5752             return -TARGET_EFAULT;
5753         }
5754 
5755         target_addr = lock_user(VERIFY_READ,
5756                                 arg + offsetof(struct tun_filter, addr),
5757                                 filter->count * ETH_ALEN, 1);
5758         if (!target_addr) {
5759             return -TARGET_EFAULT;
5760         }
5761         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5762         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5763     }
5764 
5765     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5766 }
5767 
5768 IOCTLEntry ioctl_entries[] = {
5769 #define IOCTL(cmd, access, ...) \
5770     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5771 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5772     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5773 #define IOCTL_IGNORE(cmd) \
5774     { TARGET_ ## cmd, 0, #cmd },
5775 #include "ioctls.h"
5776     { 0, 0, },
5777 };
5778 
5779 /* ??? Implement proper locking for ioctls.  */
5780 /* do_ioctl() Must return target values and target errnos. */
5781 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5782 {
5783     const IOCTLEntry *ie;
5784     const argtype *arg_type;
5785     abi_long ret;
5786     uint8_t buf_temp[MAX_STRUCT_SIZE];
5787     int target_size;
5788     void *argptr;
5789 
5790     ie = ioctl_entries;
5791     for(;;) {
5792         if (ie->target_cmd == 0) {
5793             qemu_log_mask(
5794                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5795             return -TARGET_ENOSYS;
5796         }
5797         if (ie->target_cmd == cmd)
5798             break;
5799         ie++;
5800     }
5801     arg_type = ie->arg_type;
5802     if (ie->do_ioctl) {
5803         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5804     } else if (!ie->host_cmd) {
5805         /* Some architectures define BSD ioctls in their headers
5806            that are not implemented in Linux.  */
5807         return -TARGET_ENOSYS;
5808     }
5809 
5810     switch(arg_type[0]) {
5811     case TYPE_NULL:
5812         /* no argument */
5813         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5814         break;
5815     case TYPE_PTRVOID:
5816     case TYPE_INT:
5817     case TYPE_LONG:
5818     case TYPE_ULONG:
5819         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5820         break;
5821     case TYPE_PTR:
5822         arg_type++;
5823         target_size = thunk_type_size(arg_type, 0);
5824         switch(ie->access) {
5825         case IOC_R:
5826             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5827             if (!is_error(ret)) {
5828                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5829                 if (!argptr)
5830                     return -TARGET_EFAULT;
5831                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5832                 unlock_user(argptr, arg, target_size);
5833             }
5834             break;
5835         case IOC_W:
5836             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5837             if (!argptr)
5838                 return -TARGET_EFAULT;
5839             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5840             unlock_user(argptr, arg, 0);
5841             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5842             break;
5843         default:
5844         case IOC_RW:
5845             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5846             if (!argptr)
5847                 return -TARGET_EFAULT;
5848             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5849             unlock_user(argptr, arg, 0);
5850             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5851             if (!is_error(ret)) {
5852                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5853                 if (!argptr)
5854                     return -TARGET_EFAULT;
5855                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5856                 unlock_user(argptr, arg, target_size);
5857             }
5858             break;
5859         }
5860         break;
5861     default:
5862         qemu_log_mask(LOG_UNIMP,
5863                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5864                       (long)cmd, arg_type[0]);
5865         ret = -TARGET_ENOSYS;
5866         break;
5867     }
5868     return ret;
5869 }
5870 
5871 static const bitmask_transtbl iflag_tbl[] = {
5872         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5873         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5874         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5875         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5876         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5877         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5878         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5879         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5880         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5881         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5882         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5883         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5884         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5885         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5886         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5887         { 0, 0, 0, 0 }
5888 };
5889 
5890 static const bitmask_transtbl oflag_tbl[] = {
5891 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5892 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5893 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5894 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5895 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5896 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5897 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5898 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5899 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5900 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5901 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5902 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5903 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5904 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5905 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5906 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5907 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5908 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5909 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5910 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5911 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5912 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5913 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5914 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5915 	{ 0, 0, 0, 0 }
5916 };
5917 
5918 static const bitmask_transtbl cflag_tbl[] = {
5919 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5920 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5921 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5922 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5923 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5924 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5925 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5926 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5927 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5928 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5929 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5930 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5931 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5932 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5933 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5934 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5935 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5936 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5937 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5938 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5939 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5940 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5941 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5942 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5943 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5944 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5945 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5946 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5947 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5948 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5949 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5950 	{ 0, 0, 0, 0 }
5951 };
5952 
5953 static const bitmask_transtbl lflag_tbl[] = {
5954   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5955   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5956   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5957   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5958   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5959   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5960   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5961   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5962   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5963   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5964   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5965   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5966   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5967   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5968   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5969   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5970   { 0, 0, 0, 0 }
5971 };
5972 
5973 static void target_to_host_termios (void *dst, const void *src)
5974 {
5975     struct host_termios *host = dst;
5976     const struct target_termios *target = src;
5977 
5978     host->c_iflag =
5979         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5980     host->c_oflag =
5981         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5982     host->c_cflag =
5983         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5984     host->c_lflag =
5985         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5986     host->c_line = target->c_line;
5987 
5988     memset(host->c_cc, 0, sizeof(host->c_cc));
5989     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5990     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5991     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5992     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5993     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5994     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5995     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5996     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5997     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5998     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5999     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
6000     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
6001     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
6002     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
6003     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
6004     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
6005     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
6006 }
6007 
6008 static void host_to_target_termios (void *dst, const void *src)
6009 {
6010     struct target_termios *target = dst;
6011     const struct host_termios *host = src;
6012 
6013     target->c_iflag =
6014         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
6015     target->c_oflag =
6016         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
6017     target->c_cflag =
6018         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6019     target->c_lflag =
6020         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6021     target->c_line = host->c_line;
6022 
6023     memset(target->c_cc, 0, sizeof(target->c_cc));
6024     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6025     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6026     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6027     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6028     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6029     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6030     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6031     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6032     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6033     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6034     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6035     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6036     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6037     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6038     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6039     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6040     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6041 }
6042 
6043 static const StructEntry struct_termios_def = {
6044     .convert = { host_to_target_termios, target_to_host_termios },
6045     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6046     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6047     .print = print_termios,
6048 };
6049 
6050 static bitmask_transtbl mmap_flags_tbl[] = {
6051     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6052     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6053     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6054     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6055       MAP_ANONYMOUS, MAP_ANONYMOUS },
6056     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6057       MAP_GROWSDOWN, MAP_GROWSDOWN },
6058     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6059       MAP_DENYWRITE, MAP_DENYWRITE },
6060     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6061       MAP_EXECUTABLE, MAP_EXECUTABLE },
6062     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6063     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6064       MAP_NORESERVE, MAP_NORESERVE },
6065     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6066     /* MAP_STACK had been ignored by the kernel for quite some time.
6067        Recognize it for the target insofar as we do not want to pass
6068        it through to the host.  */
6069     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6070     { 0, 0, 0, 0 }
6071 };
6072 
6073 /*
6074  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6075  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6076  */
6077 #if defined(TARGET_I386)
6078 
6079 /* NOTE: there is really one LDT for all the threads */
6080 static uint8_t *ldt_table;
6081 
6082 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6083 {
6084     int size;
6085     void *p;
6086 
6087     if (!ldt_table)
6088         return 0;
6089     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6090     if (size > bytecount)
6091         size = bytecount;
6092     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6093     if (!p)
6094         return -TARGET_EFAULT;
6095     /* ??? Should this by byteswapped?  */
6096     memcpy(p, ldt_table, size);
6097     unlock_user(p, ptr, size);
6098     return size;
6099 }
6100 
6101 /* XXX: add locking support */
6102 static abi_long write_ldt(CPUX86State *env,
6103                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6104 {
6105     struct target_modify_ldt_ldt_s ldt_info;
6106     struct target_modify_ldt_ldt_s *target_ldt_info;
6107     int seg_32bit, contents, read_exec_only, limit_in_pages;
6108     int seg_not_present, useable, lm;
6109     uint32_t *lp, entry_1, entry_2;
6110 
6111     if (bytecount != sizeof(ldt_info))
6112         return -TARGET_EINVAL;
6113     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6114         return -TARGET_EFAULT;
6115     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6116     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6117     ldt_info.limit = tswap32(target_ldt_info->limit);
6118     ldt_info.flags = tswap32(target_ldt_info->flags);
6119     unlock_user_struct(target_ldt_info, ptr, 0);
6120 
6121     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6122         return -TARGET_EINVAL;
6123     seg_32bit = ldt_info.flags & 1;
6124     contents = (ldt_info.flags >> 1) & 3;
6125     read_exec_only = (ldt_info.flags >> 3) & 1;
6126     limit_in_pages = (ldt_info.flags >> 4) & 1;
6127     seg_not_present = (ldt_info.flags >> 5) & 1;
6128     useable = (ldt_info.flags >> 6) & 1;
6129 #ifdef TARGET_ABI32
6130     lm = 0;
6131 #else
6132     lm = (ldt_info.flags >> 7) & 1;
6133 #endif
6134     if (contents == 3) {
6135         if (oldmode)
6136             return -TARGET_EINVAL;
6137         if (seg_not_present == 0)
6138             return -TARGET_EINVAL;
6139     }
6140     /* allocate the LDT */
6141     if (!ldt_table) {
6142         env->ldt.base = target_mmap(0,
6143                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6144                                     PROT_READ|PROT_WRITE,
6145                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6146         if (env->ldt.base == -1)
6147             return -TARGET_ENOMEM;
6148         memset(g2h(env->ldt.base), 0,
6149                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6150         env->ldt.limit = 0xffff;
6151         ldt_table = g2h(env->ldt.base);
6152     }
6153 
6154     /* NOTE: same code as Linux kernel */
6155     /* Allow LDTs to be cleared by the user. */
6156     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6157         if (oldmode ||
6158             (contents == 0		&&
6159              read_exec_only == 1	&&
6160              seg_32bit == 0		&&
6161              limit_in_pages == 0	&&
6162              seg_not_present == 1	&&
6163              useable == 0 )) {
6164             entry_1 = 0;
6165             entry_2 = 0;
6166             goto install;
6167         }
6168     }
6169 
6170     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6171         (ldt_info.limit & 0x0ffff);
6172     entry_2 = (ldt_info.base_addr & 0xff000000) |
6173         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6174         (ldt_info.limit & 0xf0000) |
6175         ((read_exec_only ^ 1) << 9) |
6176         (contents << 10) |
6177         ((seg_not_present ^ 1) << 15) |
6178         (seg_32bit << 22) |
6179         (limit_in_pages << 23) |
6180         (lm << 21) |
6181         0x7000;
6182     if (!oldmode)
6183         entry_2 |= (useable << 20);
6184 
6185     /* Install the new entry ...  */
6186 install:
6187     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6188     lp[0] = tswap32(entry_1);
6189     lp[1] = tswap32(entry_2);
6190     return 0;
6191 }
6192 
6193 /* specific and weird i386 syscalls */
6194 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6195                               unsigned long bytecount)
6196 {
6197     abi_long ret;
6198 
6199     switch (func) {
6200     case 0:
6201         ret = read_ldt(ptr, bytecount);
6202         break;
6203     case 1:
6204         ret = write_ldt(env, ptr, bytecount, 1);
6205         break;
6206     case 0x11:
6207         ret = write_ldt(env, ptr, bytecount, 0);
6208         break;
6209     default:
6210         ret = -TARGET_ENOSYS;
6211         break;
6212     }
6213     return ret;
6214 }
6215 
6216 #if defined(TARGET_ABI32)
6217 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6218 {
6219     uint64_t *gdt_table = g2h(env->gdt.base);
6220     struct target_modify_ldt_ldt_s ldt_info;
6221     struct target_modify_ldt_ldt_s *target_ldt_info;
6222     int seg_32bit, contents, read_exec_only, limit_in_pages;
6223     int seg_not_present, useable, lm;
6224     uint32_t *lp, entry_1, entry_2;
6225     int i;
6226 
6227     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6228     if (!target_ldt_info)
6229         return -TARGET_EFAULT;
6230     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6231     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6232     ldt_info.limit = tswap32(target_ldt_info->limit);
6233     ldt_info.flags = tswap32(target_ldt_info->flags);
6234     if (ldt_info.entry_number == -1) {
6235         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6236             if (gdt_table[i] == 0) {
6237                 ldt_info.entry_number = i;
6238                 target_ldt_info->entry_number = tswap32(i);
6239                 break;
6240             }
6241         }
6242     }
6243     unlock_user_struct(target_ldt_info, ptr, 1);
6244 
6245     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6246         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6247            return -TARGET_EINVAL;
6248     seg_32bit = ldt_info.flags & 1;
6249     contents = (ldt_info.flags >> 1) & 3;
6250     read_exec_only = (ldt_info.flags >> 3) & 1;
6251     limit_in_pages = (ldt_info.flags >> 4) & 1;
6252     seg_not_present = (ldt_info.flags >> 5) & 1;
6253     useable = (ldt_info.flags >> 6) & 1;
6254 #ifdef TARGET_ABI32
6255     lm = 0;
6256 #else
6257     lm = (ldt_info.flags >> 7) & 1;
6258 #endif
6259 
6260     if (contents == 3) {
6261         if (seg_not_present == 0)
6262             return -TARGET_EINVAL;
6263     }
6264 
6265     /* NOTE: same code as Linux kernel */
6266     /* Allow LDTs to be cleared by the user. */
6267     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6268         if ((contents == 0             &&
6269              read_exec_only == 1       &&
6270              seg_32bit == 0            &&
6271              limit_in_pages == 0       &&
6272              seg_not_present == 1      &&
6273              useable == 0 )) {
6274             entry_1 = 0;
6275             entry_2 = 0;
6276             goto install;
6277         }
6278     }
6279 
6280     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6281         (ldt_info.limit & 0x0ffff);
6282     entry_2 = (ldt_info.base_addr & 0xff000000) |
6283         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6284         (ldt_info.limit & 0xf0000) |
6285         ((read_exec_only ^ 1) << 9) |
6286         (contents << 10) |
6287         ((seg_not_present ^ 1) << 15) |
6288         (seg_32bit << 22) |
6289         (limit_in_pages << 23) |
6290         (useable << 20) |
6291         (lm << 21) |
6292         0x7000;
6293 
6294     /* Install the new entry ...  */
6295 install:
6296     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6297     lp[0] = tswap32(entry_1);
6298     lp[1] = tswap32(entry_2);
6299     return 0;
6300 }
6301 
6302 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6303 {
6304     struct target_modify_ldt_ldt_s *target_ldt_info;
6305     uint64_t *gdt_table = g2h(env->gdt.base);
6306     uint32_t base_addr, limit, flags;
6307     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6308     int seg_not_present, useable, lm;
6309     uint32_t *lp, entry_1, entry_2;
6310 
6311     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6312     if (!target_ldt_info)
6313         return -TARGET_EFAULT;
6314     idx = tswap32(target_ldt_info->entry_number);
6315     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6316         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6317         unlock_user_struct(target_ldt_info, ptr, 1);
6318         return -TARGET_EINVAL;
6319     }
6320     lp = (uint32_t *)(gdt_table + idx);
6321     entry_1 = tswap32(lp[0]);
6322     entry_2 = tswap32(lp[1]);
6323 
6324     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6325     contents = (entry_2 >> 10) & 3;
6326     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6327     seg_32bit = (entry_2 >> 22) & 1;
6328     limit_in_pages = (entry_2 >> 23) & 1;
6329     useable = (entry_2 >> 20) & 1;
6330 #ifdef TARGET_ABI32
6331     lm = 0;
6332 #else
6333     lm = (entry_2 >> 21) & 1;
6334 #endif
6335     flags = (seg_32bit << 0) | (contents << 1) |
6336         (read_exec_only << 3) | (limit_in_pages << 4) |
6337         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6338     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6339     base_addr = (entry_1 >> 16) |
6340         (entry_2 & 0xff000000) |
6341         ((entry_2 & 0xff) << 16);
6342     target_ldt_info->base_addr = tswapal(base_addr);
6343     target_ldt_info->limit = tswap32(limit);
6344     target_ldt_info->flags = tswap32(flags);
6345     unlock_user_struct(target_ldt_info, ptr, 1);
6346     return 0;
6347 }
6348 
6349 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6350 {
6351     return -TARGET_ENOSYS;
6352 }
6353 #else
6354 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6355 {
6356     abi_long ret = 0;
6357     abi_ulong val;
6358     int idx;
6359 
6360     switch(code) {
6361     case TARGET_ARCH_SET_GS:
6362     case TARGET_ARCH_SET_FS:
6363         if (code == TARGET_ARCH_SET_GS)
6364             idx = R_GS;
6365         else
6366             idx = R_FS;
6367         cpu_x86_load_seg(env, idx, 0);
6368         env->segs[idx].base = addr;
6369         break;
6370     case TARGET_ARCH_GET_GS:
6371     case TARGET_ARCH_GET_FS:
6372         if (code == TARGET_ARCH_GET_GS)
6373             idx = R_GS;
6374         else
6375             idx = R_FS;
6376         val = env->segs[idx].base;
6377         if (put_user(val, addr, abi_ulong))
6378             ret = -TARGET_EFAULT;
6379         break;
6380     default:
6381         ret = -TARGET_EINVAL;
6382         break;
6383     }
6384     return ret;
6385 }
6386 #endif /* defined(TARGET_ABI32 */
6387 
6388 #endif /* defined(TARGET_I386) */
6389 
6390 #define NEW_STACK_SIZE 0x40000
6391 
6392 
6393 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6394 typedef struct {
6395     CPUArchState *env;
6396     pthread_mutex_t mutex;
6397     pthread_cond_t cond;
6398     pthread_t thread;
6399     uint32_t tid;
6400     abi_ulong child_tidptr;
6401     abi_ulong parent_tidptr;
6402     sigset_t sigmask;
6403 } new_thread_info;
6404 
6405 static void *clone_func(void *arg)
6406 {
6407     new_thread_info *info = arg;
6408     CPUArchState *env;
6409     CPUState *cpu;
6410     TaskState *ts;
6411 
6412     rcu_register_thread();
6413     tcg_register_thread();
6414     env = info->env;
6415     cpu = env_cpu(env);
6416     thread_cpu = cpu;
6417     ts = (TaskState *)cpu->opaque;
6418     info->tid = sys_gettid();
6419     task_settid(ts);
6420     if (info->child_tidptr)
6421         put_user_u32(info->tid, info->child_tidptr);
6422     if (info->parent_tidptr)
6423         put_user_u32(info->tid, info->parent_tidptr);
6424     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6425     /* Enable signals.  */
6426     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6427     /* Signal to the parent that we're ready.  */
6428     pthread_mutex_lock(&info->mutex);
6429     pthread_cond_broadcast(&info->cond);
6430     pthread_mutex_unlock(&info->mutex);
6431     /* Wait until the parent has finished initializing the tls state.  */
6432     pthread_mutex_lock(&clone_lock);
6433     pthread_mutex_unlock(&clone_lock);
6434     cpu_loop(env);
6435     /* never exits */
6436     return NULL;
6437 }
6438 
6439 /* do_fork() Must return host values and target errnos (unlike most
6440    do_*() functions). */
6441 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6442                    abi_ulong parent_tidptr, target_ulong newtls,
6443                    abi_ulong child_tidptr)
6444 {
6445     CPUState *cpu = env_cpu(env);
6446     int ret;
6447     TaskState *ts;
6448     CPUState *new_cpu;
6449     CPUArchState *new_env;
6450     sigset_t sigmask;
6451 
6452     flags &= ~CLONE_IGNORED_FLAGS;
6453 
6454     /* Emulate vfork() with fork() */
6455     if (flags & CLONE_VFORK)
6456         flags &= ~(CLONE_VFORK | CLONE_VM);
6457 
6458     if (flags & CLONE_VM) {
6459         TaskState *parent_ts = (TaskState *)cpu->opaque;
6460         new_thread_info info;
6461         pthread_attr_t attr;
6462 
6463         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6464             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6465             return -TARGET_EINVAL;
6466         }
6467 
6468         ts = g_new0(TaskState, 1);
6469         init_task_state(ts);
6470 
6471         /* Grab a mutex so that thread setup appears atomic.  */
6472         pthread_mutex_lock(&clone_lock);
6473 
6474         /* we create a new CPU instance. */
6475         new_env = cpu_copy(env);
6476         /* Init regs that differ from the parent.  */
6477         cpu_clone_regs_child(new_env, newsp, flags);
6478         cpu_clone_regs_parent(env, flags);
6479         new_cpu = env_cpu(new_env);
6480         new_cpu->opaque = ts;
6481         ts->bprm = parent_ts->bprm;
6482         ts->info = parent_ts->info;
6483         ts->signal_mask = parent_ts->signal_mask;
6484 
6485         if (flags & CLONE_CHILD_CLEARTID) {
6486             ts->child_tidptr = child_tidptr;
6487         }
6488 
6489         if (flags & CLONE_SETTLS) {
6490             cpu_set_tls (new_env, newtls);
6491         }
6492 
6493         memset(&info, 0, sizeof(info));
6494         pthread_mutex_init(&info.mutex, NULL);
6495         pthread_mutex_lock(&info.mutex);
6496         pthread_cond_init(&info.cond, NULL);
6497         info.env = new_env;
6498         if (flags & CLONE_CHILD_SETTID) {
6499             info.child_tidptr = child_tidptr;
6500         }
6501         if (flags & CLONE_PARENT_SETTID) {
6502             info.parent_tidptr = parent_tidptr;
6503         }
6504 
6505         ret = pthread_attr_init(&attr);
6506         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6507         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6508         /* It is not safe to deliver signals until the child has finished
6509            initializing, so temporarily block all signals.  */
6510         sigfillset(&sigmask);
6511         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6512         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6513 
6514         /* If this is our first additional thread, we need to ensure we
6515          * generate code for parallel execution and flush old translations.
6516          */
6517         if (!parallel_cpus) {
6518             parallel_cpus = true;
6519             tb_flush(cpu);
6520         }
6521 
6522         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6523         /* TODO: Free new CPU state if thread creation failed.  */
6524 
6525         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6526         pthread_attr_destroy(&attr);
6527         if (ret == 0) {
6528             /* Wait for the child to initialize.  */
6529             pthread_cond_wait(&info.cond, &info.mutex);
6530             ret = info.tid;
6531         } else {
6532             ret = -1;
6533         }
6534         pthread_mutex_unlock(&info.mutex);
6535         pthread_cond_destroy(&info.cond);
6536         pthread_mutex_destroy(&info.mutex);
6537         pthread_mutex_unlock(&clone_lock);
6538     } else {
6539         /* if no CLONE_VM, we consider it is a fork */
6540         if (flags & CLONE_INVALID_FORK_FLAGS) {
6541             return -TARGET_EINVAL;
6542         }
6543 
6544         /* We can't support custom termination signals */
6545         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6546             return -TARGET_EINVAL;
6547         }
6548 
6549         if (block_signals()) {
6550             return -TARGET_ERESTARTSYS;
6551         }
6552 
6553         fork_start();
6554         ret = fork();
6555         if (ret == 0) {
6556             /* Child Process.  */
6557             cpu_clone_regs_child(env, newsp, flags);
6558             fork_end(1);
6559             /* There is a race condition here.  The parent process could
6560                theoretically read the TID in the child process before the child
6561                tid is set.  This would require using either ptrace
6562                (not implemented) or having *_tidptr to point at a shared memory
6563                mapping.  We can't repeat the spinlock hack used above because
6564                the child process gets its own copy of the lock.  */
6565             if (flags & CLONE_CHILD_SETTID)
6566                 put_user_u32(sys_gettid(), child_tidptr);
6567             if (flags & CLONE_PARENT_SETTID)
6568                 put_user_u32(sys_gettid(), parent_tidptr);
6569             ts = (TaskState *)cpu->opaque;
6570             if (flags & CLONE_SETTLS)
6571                 cpu_set_tls (env, newtls);
6572             if (flags & CLONE_CHILD_CLEARTID)
6573                 ts->child_tidptr = child_tidptr;
6574         } else {
6575             cpu_clone_regs_parent(env, flags);
6576             fork_end(0);
6577         }
6578     }
6579     return ret;
6580 }
6581 
6582 /* warning : doesn't handle linux specific flags... */
6583 static int target_to_host_fcntl_cmd(int cmd)
6584 {
6585     int ret;
6586 
6587     switch(cmd) {
6588     case TARGET_F_DUPFD:
6589     case TARGET_F_GETFD:
6590     case TARGET_F_SETFD:
6591     case TARGET_F_GETFL:
6592     case TARGET_F_SETFL:
6593     case TARGET_F_OFD_GETLK:
6594     case TARGET_F_OFD_SETLK:
6595     case TARGET_F_OFD_SETLKW:
6596         ret = cmd;
6597         break;
6598     case TARGET_F_GETLK:
6599         ret = F_GETLK64;
6600         break;
6601     case TARGET_F_SETLK:
6602         ret = F_SETLK64;
6603         break;
6604     case TARGET_F_SETLKW:
6605         ret = F_SETLKW64;
6606         break;
6607     case TARGET_F_GETOWN:
6608         ret = F_GETOWN;
6609         break;
6610     case TARGET_F_SETOWN:
6611         ret = F_SETOWN;
6612         break;
6613     case TARGET_F_GETSIG:
6614         ret = F_GETSIG;
6615         break;
6616     case TARGET_F_SETSIG:
6617         ret = F_SETSIG;
6618         break;
6619 #if TARGET_ABI_BITS == 32
6620     case TARGET_F_GETLK64:
6621         ret = F_GETLK64;
6622         break;
6623     case TARGET_F_SETLK64:
6624         ret = F_SETLK64;
6625         break;
6626     case TARGET_F_SETLKW64:
6627         ret = F_SETLKW64;
6628         break;
6629 #endif
6630     case TARGET_F_SETLEASE:
6631         ret = F_SETLEASE;
6632         break;
6633     case TARGET_F_GETLEASE:
6634         ret = F_GETLEASE;
6635         break;
6636 #ifdef F_DUPFD_CLOEXEC
6637     case TARGET_F_DUPFD_CLOEXEC:
6638         ret = F_DUPFD_CLOEXEC;
6639         break;
6640 #endif
6641     case TARGET_F_NOTIFY:
6642         ret = F_NOTIFY;
6643         break;
6644 #ifdef F_GETOWN_EX
6645     case TARGET_F_GETOWN_EX:
6646         ret = F_GETOWN_EX;
6647         break;
6648 #endif
6649 #ifdef F_SETOWN_EX
6650     case TARGET_F_SETOWN_EX:
6651         ret = F_SETOWN_EX;
6652         break;
6653 #endif
6654 #ifdef F_SETPIPE_SZ
6655     case TARGET_F_SETPIPE_SZ:
6656         ret = F_SETPIPE_SZ;
6657         break;
6658     case TARGET_F_GETPIPE_SZ:
6659         ret = F_GETPIPE_SZ;
6660         break;
6661 #endif
6662 #ifdef F_ADD_SEALS
6663     case TARGET_F_ADD_SEALS:
6664         ret = F_ADD_SEALS;
6665         break;
6666     case TARGET_F_GET_SEALS:
6667         ret = F_GET_SEALS;
6668         break;
6669 #endif
6670     default:
6671         ret = -TARGET_EINVAL;
6672         break;
6673     }
6674 
6675 #if defined(__powerpc64__)
6676     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6677      * is not supported by kernel. The glibc fcntl call actually adjusts
6678      * them to 5, 6 and 7 before making the syscall(). Since we make the
6679      * syscall directly, adjust to what is supported by the kernel.
6680      */
6681     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6682         ret -= F_GETLK64 - 5;
6683     }
6684 #endif
6685 
6686     return ret;
6687 }
6688 
6689 #define FLOCK_TRANSTBL \
6690     switch (type) { \
6691     TRANSTBL_CONVERT(F_RDLCK); \
6692     TRANSTBL_CONVERT(F_WRLCK); \
6693     TRANSTBL_CONVERT(F_UNLCK); \
6694     }
6695 
6696 static int target_to_host_flock(int type)
6697 {
6698 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6699     FLOCK_TRANSTBL
6700 #undef  TRANSTBL_CONVERT
6701     return -TARGET_EINVAL;
6702 }
6703 
6704 static int host_to_target_flock(int type)
6705 {
6706 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6707     FLOCK_TRANSTBL
6708 #undef  TRANSTBL_CONVERT
6709     /* if we don't know how to convert the value coming
6710      * from the host we copy to the target field as-is
6711      */
6712     return type;
6713 }
6714 
6715 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6716                                             abi_ulong target_flock_addr)
6717 {
6718     struct target_flock *target_fl;
6719     int l_type;
6720 
6721     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6722         return -TARGET_EFAULT;
6723     }
6724 
6725     __get_user(l_type, &target_fl->l_type);
6726     l_type = target_to_host_flock(l_type);
6727     if (l_type < 0) {
6728         return l_type;
6729     }
6730     fl->l_type = l_type;
6731     __get_user(fl->l_whence, &target_fl->l_whence);
6732     __get_user(fl->l_start, &target_fl->l_start);
6733     __get_user(fl->l_len, &target_fl->l_len);
6734     __get_user(fl->l_pid, &target_fl->l_pid);
6735     unlock_user_struct(target_fl, target_flock_addr, 0);
6736     return 0;
6737 }
6738 
6739 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6740                                           const struct flock64 *fl)
6741 {
6742     struct target_flock *target_fl;
6743     short l_type;
6744 
6745     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6746         return -TARGET_EFAULT;
6747     }
6748 
6749     l_type = host_to_target_flock(fl->l_type);
6750     __put_user(l_type, &target_fl->l_type);
6751     __put_user(fl->l_whence, &target_fl->l_whence);
6752     __put_user(fl->l_start, &target_fl->l_start);
6753     __put_user(fl->l_len, &target_fl->l_len);
6754     __put_user(fl->l_pid, &target_fl->l_pid);
6755     unlock_user_struct(target_fl, target_flock_addr, 1);
6756     return 0;
6757 }
6758 
6759 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6760 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6761 
6762 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6763 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6764                                                    abi_ulong target_flock_addr)
6765 {
6766     struct target_oabi_flock64 *target_fl;
6767     int l_type;
6768 
6769     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6770         return -TARGET_EFAULT;
6771     }
6772 
6773     __get_user(l_type, &target_fl->l_type);
6774     l_type = target_to_host_flock(l_type);
6775     if (l_type < 0) {
6776         return l_type;
6777     }
6778     fl->l_type = l_type;
6779     __get_user(fl->l_whence, &target_fl->l_whence);
6780     __get_user(fl->l_start, &target_fl->l_start);
6781     __get_user(fl->l_len, &target_fl->l_len);
6782     __get_user(fl->l_pid, &target_fl->l_pid);
6783     unlock_user_struct(target_fl, target_flock_addr, 0);
6784     return 0;
6785 }
6786 
6787 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6788                                                  const struct flock64 *fl)
6789 {
6790     struct target_oabi_flock64 *target_fl;
6791     short l_type;
6792 
6793     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6794         return -TARGET_EFAULT;
6795     }
6796 
6797     l_type = host_to_target_flock(fl->l_type);
6798     __put_user(l_type, &target_fl->l_type);
6799     __put_user(fl->l_whence, &target_fl->l_whence);
6800     __put_user(fl->l_start, &target_fl->l_start);
6801     __put_user(fl->l_len, &target_fl->l_len);
6802     __put_user(fl->l_pid, &target_fl->l_pid);
6803     unlock_user_struct(target_fl, target_flock_addr, 1);
6804     return 0;
6805 }
6806 #endif
6807 
6808 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6809                                               abi_ulong target_flock_addr)
6810 {
6811     struct target_flock64 *target_fl;
6812     int l_type;
6813 
6814     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6815         return -TARGET_EFAULT;
6816     }
6817 
6818     __get_user(l_type, &target_fl->l_type);
6819     l_type = target_to_host_flock(l_type);
6820     if (l_type < 0) {
6821         return l_type;
6822     }
6823     fl->l_type = l_type;
6824     __get_user(fl->l_whence, &target_fl->l_whence);
6825     __get_user(fl->l_start, &target_fl->l_start);
6826     __get_user(fl->l_len, &target_fl->l_len);
6827     __get_user(fl->l_pid, &target_fl->l_pid);
6828     unlock_user_struct(target_fl, target_flock_addr, 0);
6829     return 0;
6830 }
6831 
6832 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6833                                             const struct flock64 *fl)
6834 {
6835     struct target_flock64 *target_fl;
6836     short l_type;
6837 
6838     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6839         return -TARGET_EFAULT;
6840     }
6841 
6842     l_type = host_to_target_flock(fl->l_type);
6843     __put_user(l_type, &target_fl->l_type);
6844     __put_user(fl->l_whence, &target_fl->l_whence);
6845     __put_user(fl->l_start, &target_fl->l_start);
6846     __put_user(fl->l_len, &target_fl->l_len);
6847     __put_user(fl->l_pid, &target_fl->l_pid);
6848     unlock_user_struct(target_fl, target_flock_addr, 1);
6849     return 0;
6850 }
6851 
6852 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6853 {
6854     struct flock64 fl64;
6855 #ifdef F_GETOWN_EX
6856     struct f_owner_ex fox;
6857     struct target_f_owner_ex *target_fox;
6858 #endif
6859     abi_long ret;
6860     int host_cmd = target_to_host_fcntl_cmd(cmd);
6861 
6862     if (host_cmd == -TARGET_EINVAL)
6863 	    return host_cmd;
6864 
6865     switch(cmd) {
6866     case TARGET_F_GETLK:
6867         ret = copy_from_user_flock(&fl64, arg);
6868         if (ret) {
6869             return ret;
6870         }
6871         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6872         if (ret == 0) {
6873             ret = copy_to_user_flock(arg, &fl64);
6874         }
6875         break;
6876 
6877     case TARGET_F_SETLK:
6878     case TARGET_F_SETLKW:
6879         ret = copy_from_user_flock(&fl64, arg);
6880         if (ret) {
6881             return ret;
6882         }
6883         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6884         break;
6885 
6886     case TARGET_F_GETLK64:
6887     case TARGET_F_OFD_GETLK:
6888         ret = copy_from_user_flock64(&fl64, arg);
6889         if (ret) {
6890             return ret;
6891         }
6892         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6893         if (ret == 0) {
6894             ret = copy_to_user_flock64(arg, &fl64);
6895         }
6896         break;
6897     case TARGET_F_SETLK64:
6898     case TARGET_F_SETLKW64:
6899     case TARGET_F_OFD_SETLK:
6900     case TARGET_F_OFD_SETLKW:
6901         ret = copy_from_user_flock64(&fl64, arg);
6902         if (ret) {
6903             return ret;
6904         }
6905         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6906         break;
6907 
6908     case TARGET_F_GETFL:
6909         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6910         if (ret >= 0) {
6911             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6912         }
6913         break;
6914 
6915     case TARGET_F_SETFL:
6916         ret = get_errno(safe_fcntl(fd, host_cmd,
6917                                    target_to_host_bitmask(arg,
6918                                                           fcntl_flags_tbl)));
6919         break;
6920 
6921 #ifdef F_GETOWN_EX
6922     case TARGET_F_GETOWN_EX:
6923         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6924         if (ret >= 0) {
6925             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6926                 return -TARGET_EFAULT;
6927             target_fox->type = tswap32(fox.type);
6928             target_fox->pid = tswap32(fox.pid);
6929             unlock_user_struct(target_fox, arg, 1);
6930         }
6931         break;
6932 #endif
6933 
6934 #ifdef F_SETOWN_EX
6935     case TARGET_F_SETOWN_EX:
6936         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6937             return -TARGET_EFAULT;
6938         fox.type = tswap32(target_fox->type);
6939         fox.pid = tswap32(target_fox->pid);
6940         unlock_user_struct(target_fox, arg, 0);
6941         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6942         break;
6943 #endif
6944 
6945     case TARGET_F_SETSIG:
6946         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6947         break;
6948 
6949     case TARGET_F_GETSIG:
6950         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6951         break;
6952 
6953     case TARGET_F_SETOWN:
6954     case TARGET_F_GETOWN:
6955     case TARGET_F_SETLEASE:
6956     case TARGET_F_GETLEASE:
6957     case TARGET_F_SETPIPE_SZ:
6958     case TARGET_F_GETPIPE_SZ:
6959     case TARGET_F_ADD_SEALS:
6960     case TARGET_F_GET_SEALS:
6961         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6962         break;
6963 
6964     default:
6965         ret = get_errno(safe_fcntl(fd, cmd, arg));
6966         break;
6967     }
6968     return ret;
6969 }
6970 
6971 #ifdef USE_UID16
6972 
6973 static inline int high2lowuid(int uid)
6974 {
6975     if (uid > 65535)
6976         return 65534;
6977     else
6978         return uid;
6979 }
6980 
6981 static inline int high2lowgid(int gid)
6982 {
6983     if (gid > 65535)
6984         return 65534;
6985     else
6986         return gid;
6987 }
6988 
6989 static inline int low2highuid(int uid)
6990 {
6991     if ((int16_t)uid == -1)
6992         return -1;
6993     else
6994         return uid;
6995 }
6996 
6997 static inline int low2highgid(int gid)
6998 {
6999     if ((int16_t)gid == -1)
7000         return -1;
7001     else
7002         return gid;
7003 }
7004 static inline int tswapid(int id)
7005 {
7006     return tswap16(id);
7007 }
7008 
7009 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7010 
7011 #else /* !USE_UID16 */
7012 static inline int high2lowuid(int uid)
7013 {
7014     return uid;
7015 }
7016 static inline int high2lowgid(int gid)
7017 {
7018     return gid;
7019 }
7020 static inline int low2highuid(int uid)
7021 {
7022     return uid;
7023 }
7024 static inline int low2highgid(int gid)
7025 {
7026     return gid;
7027 }
7028 static inline int tswapid(int id)
7029 {
7030     return tswap32(id);
7031 }
7032 
7033 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7034 
7035 #endif /* USE_UID16 */
7036 
7037 /* We must do direct syscalls for setting UID/GID, because we want to
7038  * implement the Linux system call semantics of "change only for this thread",
7039  * not the libc/POSIX semantics of "change for all threads in process".
7040  * (See http://ewontfix.com/17/ for more details.)
7041  * We use the 32-bit version of the syscalls if present; if it is not
7042  * then either the host architecture supports 32-bit UIDs natively with
7043  * the standard syscall, or the 16-bit UID is the best we can do.
7044  */
7045 #ifdef __NR_setuid32
7046 #define __NR_sys_setuid __NR_setuid32
7047 #else
7048 #define __NR_sys_setuid __NR_setuid
7049 #endif
7050 #ifdef __NR_setgid32
7051 #define __NR_sys_setgid __NR_setgid32
7052 #else
7053 #define __NR_sys_setgid __NR_setgid
7054 #endif
7055 #ifdef __NR_setresuid32
7056 #define __NR_sys_setresuid __NR_setresuid32
7057 #else
7058 #define __NR_sys_setresuid __NR_setresuid
7059 #endif
7060 #ifdef __NR_setresgid32
7061 #define __NR_sys_setresgid __NR_setresgid32
7062 #else
7063 #define __NR_sys_setresgid __NR_setresgid
7064 #endif
7065 
7066 _syscall1(int, sys_setuid, uid_t, uid)
7067 _syscall1(int, sys_setgid, gid_t, gid)
7068 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7069 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7070 
7071 void syscall_init(void)
7072 {
7073     IOCTLEntry *ie;
7074     const argtype *arg_type;
7075     int size;
7076     int i;
7077 
7078     thunk_init(STRUCT_MAX);
7079 
7080 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7081 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7082 #include "syscall_types.h"
7083 #undef STRUCT
7084 #undef STRUCT_SPECIAL
7085 
7086     /* Build target_to_host_errno_table[] table from
7087      * host_to_target_errno_table[]. */
7088     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7089         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7090     }
7091 
7092     /* we patch the ioctl size if necessary. We rely on the fact that
7093        no ioctl has all the bits at '1' in the size field */
7094     ie = ioctl_entries;
7095     while (ie->target_cmd != 0) {
7096         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7097             TARGET_IOC_SIZEMASK) {
7098             arg_type = ie->arg_type;
7099             if (arg_type[0] != TYPE_PTR) {
7100                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7101                         ie->target_cmd);
7102                 exit(1);
7103             }
7104             arg_type++;
7105             size = thunk_type_size(arg_type, 0);
7106             ie->target_cmd = (ie->target_cmd &
7107                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7108                 (size << TARGET_IOC_SIZESHIFT);
7109         }
7110 
7111         /* automatic consistency check if same arch */
7112 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7113     (defined(__x86_64__) && defined(TARGET_X86_64))
7114         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7115             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7116                     ie->name, ie->target_cmd, ie->host_cmd);
7117         }
7118 #endif
7119         ie++;
7120     }
7121 }
7122 
7123 #ifdef TARGET_NR_truncate64
7124 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7125                                          abi_long arg2,
7126                                          abi_long arg3,
7127                                          abi_long arg4)
7128 {
7129     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7130         arg2 = arg3;
7131         arg3 = arg4;
7132     }
7133     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7134 }
7135 #endif
7136 
7137 #ifdef TARGET_NR_ftruncate64
7138 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7139                                           abi_long arg2,
7140                                           abi_long arg3,
7141                                           abi_long arg4)
7142 {
7143     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7144         arg2 = arg3;
7145         arg3 = arg4;
7146     }
7147     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7148 }
7149 #endif
7150 
7151 #if defined(TARGET_NR_timer_settime) || \
7152     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7153 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7154                                                  abi_ulong target_addr)
7155 {
7156     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7157                                 offsetof(struct target_itimerspec,
7158                                          it_interval)) ||
7159         target_to_host_timespec(&host_its->it_value, target_addr +
7160                                 offsetof(struct target_itimerspec,
7161                                          it_value))) {
7162         return -TARGET_EFAULT;
7163     }
7164 
7165     return 0;
7166 }
7167 #endif
7168 
7169 #if defined(TARGET_NR_timer_settime64) || \
7170     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7171 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7172                                                    abi_ulong target_addr)
7173 {
7174     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7175                                   offsetof(struct target__kernel_itimerspec,
7176                                            it_interval)) ||
7177         target_to_host_timespec64(&host_its->it_value, target_addr +
7178                                   offsetof(struct target__kernel_itimerspec,
7179                                            it_value))) {
7180         return -TARGET_EFAULT;
7181     }
7182 
7183     return 0;
7184 }
7185 #endif
7186 
7187 #if ((defined(TARGET_NR_timerfd_gettime) || \
7188       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7189       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7190 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7191                                                  struct itimerspec *host_its)
7192 {
7193     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7194                                                        it_interval),
7195                                 &host_its->it_interval) ||
7196         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7197                                                        it_value),
7198                                 &host_its->it_value)) {
7199         return -TARGET_EFAULT;
7200     }
7201     return 0;
7202 }
7203 #endif
7204 
7205 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7206       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7207       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7208 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7209                                                    struct itimerspec *host_its)
7210 {
7211     if (host_to_target_timespec64(target_addr +
7212                                   offsetof(struct target__kernel_itimerspec,
7213                                            it_interval),
7214                                   &host_its->it_interval) ||
7215         host_to_target_timespec64(target_addr +
7216                                   offsetof(struct target__kernel_itimerspec,
7217                                            it_value),
7218                                   &host_its->it_value)) {
7219         return -TARGET_EFAULT;
7220     }
7221     return 0;
7222 }
7223 #endif
7224 
7225 #if defined(TARGET_NR_adjtimex) || \
7226     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7227 static inline abi_long target_to_host_timex(struct timex *host_tx,
7228                                             abi_long target_addr)
7229 {
7230     struct target_timex *target_tx;
7231 
7232     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7233         return -TARGET_EFAULT;
7234     }
7235 
7236     __get_user(host_tx->modes, &target_tx->modes);
7237     __get_user(host_tx->offset, &target_tx->offset);
7238     __get_user(host_tx->freq, &target_tx->freq);
7239     __get_user(host_tx->maxerror, &target_tx->maxerror);
7240     __get_user(host_tx->esterror, &target_tx->esterror);
7241     __get_user(host_tx->status, &target_tx->status);
7242     __get_user(host_tx->constant, &target_tx->constant);
7243     __get_user(host_tx->precision, &target_tx->precision);
7244     __get_user(host_tx->tolerance, &target_tx->tolerance);
7245     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7246     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7247     __get_user(host_tx->tick, &target_tx->tick);
7248     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7249     __get_user(host_tx->jitter, &target_tx->jitter);
7250     __get_user(host_tx->shift, &target_tx->shift);
7251     __get_user(host_tx->stabil, &target_tx->stabil);
7252     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7253     __get_user(host_tx->calcnt, &target_tx->calcnt);
7254     __get_user(host_tx->errcnt, &target_tx->errcnt);
7255     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7256     __get_user(host_tx->tai, &target_tx->tai);
7257 
7258     unlock_user_struct(target_tx, target_addr, 0);
7259     return 0;
7260 }
7261 
7262 static inline abi_long host_to_target_timex(abi_long target_addr,
7263                                             struct timex *host_tx)
7264 {
7265     struct target_timex *target_tx;
7266 
7267     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7268         return -TARGET_EFAULT;
7269     }
7270 
7271     __put_user(host_tx->modes, &target_tx->modes);
7272     __put_user(host_tx->offset, &target_tx->offset);
7273     __put_user(host_tx->freq, &target_tx->freq);
7274     __put_user(host_tx->maxerror, &target_tx->maxerror);
7275     __put_user(host_tx->esterror, &target_tx->esterror);
7276     __put_user(host_tx->status, &target_tx->status);
7277     __put_user(host_tx->constant, &target_tx->constant);
7278     __put_user(host_tx->precision, &target_tx->precision);
7279     __put_user(host_tx->tolerance, &target_tx->tolerance);
7280     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7281     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7282     __put_user(host_tx->tick, &target_tx->tick);
7283     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7284     __put_user(host_tx->jitter, &target_tx->jitter);
7285     __put_user(host_tx->shift, &target_tx->shift);
7286     __put_user(host_tx->stabil, &target_tx->stabil);
7287     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7288     __put_user(host_tx->calcnt, &target_tx->calcnt);
7289     __put_user(host_tx->errcnt, &target_tx->errcnt);
7290     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7291     __put_user(host_tx->tai, &target_tx->tai);
7292 
7293     unlock_user_struct(target_tx, target_addr, 1);
7294     return 0;
7295 }
7296 #endif
7297 
7298 
7299 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7300 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7301                                               abi_long target_addr)
7302 {
7303     struct target__kernel_timex *target_tx;
7304 
7305     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7306                                  offsetof(struct target__kernel_timex,
7307                                           time))) {
7308         return -TARGET_EFAULT;
7309     }
7310 
7311     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7312         return -TARGET_EFAULT;
7313     }
7314 
7315     __get_user(host_tx->modes, &target_tx->modes);
7316     __get_user(host_tx->offset, &target_tx->offset);
7317     __get_user(host_tx->freq, &target_tx->freq);
7318     __get_user(host_tx->maxerror, &target_tx->maxerror);
7319     __get_user(host_tx->esterror, &target_tx->esterror);
7320     __get_user(host_tx->status, &target_tx->status);
7321     __get_user(host_tx->constant, &target_tx->constant);
7322     __get_user(host_tx->precision, &target_tx->precision);
7323     __get_user(host_tx->tolerance, &target_tx->tolerance);
7324     __get_user(host_tx->tick, &target_tx->tick);
7325     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7326     __get_user(host_tx->jitter, &target_tx->jitter);
7327     __get_user(host_tx->shift, &target_tx->shift);
7328     __get_user(host_tx->stabil, &target_tx->stabil);
7329     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7330     __get_user(host_tx->calcnt, &target_tx->calcnt);
7331     __get_user(host_tx->errcnt, &target_tx->errcnt);
7332     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7333     __get_user(host_tx->tai, &target_tx->tai);
7334 
7335     unlock_user_struct(target_tx, target_addr, 0);
7336     return 0;
7337 }
7338 
7339 static inline abi_long host_to_target_timex64(abi_long target_addr,
7340                                               struct timex *host_tx)
7341 {
7342     struct target__kernel_timex *target_tx;
7343 
7344    if (copy_to_user_timeval64(target_addr +
7345                               offsetof(struct target__kernel_timex, time),
7346                               &host_tx->time)) {
7347         return -TARGET_EFAULT;
7348     }
7349 
7350     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7351         return -TARGET_EFAULT;
7352     }
7353 
7354     __put_user(host_tx->modes, &target_tx->modes);
7355     __put_user(host_tx->offset, &target_tx->offset);
7356     __put_user(host_tx->freq, &target_tx->freq);
7357     __put_user(host_tx->maxerror, &target_tx->maxerror);
7358     __put_user(host_tx->esterror, &target_tx->esterror);
7359     __put_user(host_tx->status, &target_tx->status);
7360     __put_user(host_tx->constant, &target_tx->constant);
7361     __put_user(host_tx->precision, &target_tx->precision);
7362     __put_user(host_tx->tolerance, &target_tx->tolerance);
7363     __put_user(host_tx->tick, &target_tx->tick);
7364     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7365     __put_user(host_tx->jitter, &target_tx->jitter);
7366     __put_user(host_tx->shift, &target_tx->shift);
7367     __put_user(host_tx->stabil, &target_tx->stabil);
7368     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7369     __put_user(host_tx->calcnt, &target_tx->calcnt);
7370     __put_user(host_tx->errcnt, &target_tx->errcnt);
7371     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7372     __put_user(host_tx->tai, &target_tx->tai);
7373 
7374     unlock_user_struct(target_tx, target_addr, 1);
7375     return 0;
7376 }
7377 #endif
7378 
7379 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7380                                                abi_ulong target_addr)
7381 {
7382     struct target_sigevent *target_sevp;
7383 
7384     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7385         return -TARGET_EFAULT;
7386     }
7387 
7388     /* This union is awkward on 64 bit systems because it has a 32 bit
7389      * integer and a pointer in it; we follow the conversion approach
7390      * used for handling sigval types in signal.c so the guest should get
7391      * the correct value back even if we did a 64 bit byteswap and it's
7392      * using the 32 bit integer.
7393      */
7394     host_sevp->sigev_value.sival_ptr =
7395         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7396     host_sevp->sigev_signo =
7397         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7398     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7399     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7400 
7401     unlock_user_struct(target_sevp, target_addr, 1);
7402     return 0;
7403 }
7404 
7405 #if defined(TARGET_NR_mlockall)
7406 static inline int target_to_host_mlockall_arg(int arg)
7407 {
7408     int result = 0;
7409 
7410     if (arg & TARGET_MCL_CURRENT) {
7411         result |= MCL_CURRENT;
7412     }
7413     if (arg & TARGET_MCL_FUTURE) {
7414         result |= MCL_FUTURE;
7415     }
7416 #ifdef MCL_ONFAULT
7417     if (arg & TARGET_MCL_ONFAULT) {
7418         result |= MCL_ONFAULT;
7419     }
7420 #endif
7421 
7422     return result;
7423 }
7424 #endif
7425 
7426 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7427      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7428      defined(TARGET_NR_newfstatat))
7429 static inline abi_long host_to_target_stat64(void *cpu_env,
7430                                              abi_ulong target_addr,
7431                                              struct stat *host_st)
7432 {
7433 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7434     if (((CPUARMState *)cpu_env)->eabi) {
7435         struct target_eabi_stat64 *target_st;
7436 
7437         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7438             return -TARGET_EFAULT;
7439         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7440         __put_user(host_st->st_dev, &target_st->st_dev);
7441         __put_user(host_st->st_ino, &target_st->st_ino);
7442 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7443         __put_user(host_st->st_ino, &target_st->__st_ino);
7444 #endif
7445         __put_user(host_st->st_mode, &target_st->st_mode);
7446         __put_user(host_st->st_nlink, &target_st->st_nlink);
7447         __put_user(host_st->st_uid, &target_st->st_uid);
7448         __put_user(host_st->st_gid, &target_st->st_gid);
7449         __put_user(host_st->st_rdev, &target_st->st_rdev);
7450         __put_user(host_st->st_size, &target_st->st_size);
7451         __put_user(host_st->st_blksize, &target_st->st_blksize);
7452         __put_user(host_st->st_blocks, &target_st->st_blocks);
7453         __put_user(host_st->st_atime, &target_st->target_st_atime);
7454         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7455         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7456 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7457         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7458         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7459         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7460 #endif
7461         unlock_user_struct(target_st, target_addr, 1);
7462     } else
7463 #endif
7464     {
7465 #if defined(TARGET_HAS_STRUCT_STAT64)
7466         struct target_stat64 *target_st;
7467 #else
7468         struct target_stat *target_st;
7469 #endif
7470 
7471         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7472             return -TARGET_EFAULT;
7473         memset(target_st, 0, sizeof(*target_st));
7474         __put_user(host_st->st_dev, &target_st->st_dev);
7475         __put_user(host_st->st_ino, &target_st->st_ino);
7476 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7477         __put_user(host_st->st_ino, &target_st->__st_ino);
7478 #endif
7479         __put_user(host_st->st_mode, &target_st->st_mode);
7480         __put_user(host_st->st_nlink, &target_st->st_nlink);
7481         __put_user(host_st->st_uid, &target_st->st_uid);
7482         __put_user(host_st->st_gid, &target_st->st_gid);
7483         __put_user(host_st->st_rdev, &target_st->st_rdev);
7484         /* XXX: better use of kernel struct */
7485         __put_user(host_st->st_size, &target_st->st_size);
7486         __put_user(host_st->st_blksize, &target_st->st_blksize);
7487         __put_user(host_st->st_blocks, &target_st->st_blocks);
7488         __put_user(host_st->st_atime, &target_st->target_st_atime);
7489         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7490         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7491 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7492         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7493         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7494         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7495 #endif
7496         unlock_user_struct(target_st, target_addr, 1);
7497     }
7498 
7499     return 0;
7500 }
7501 #endif
7502 
7503 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7504 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7505                                             abi_ulong target_addr)
7506 {
7507     struct target_statx *target_stx;
7508 
7509     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7510         return -TARGET_EFAULT;
7511     }
7512     memset(target_stx, 0, sizeof(*target_stx));
7513 
7514     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7515     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7516     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7517     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7518     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7519     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7520     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7521     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7522     __put_user(host_stx->stx_size, &target_stx->stx_size);
7523     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7524     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7525     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7526     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7527     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7528     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7529     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7530     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7531     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7532     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7533     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7534     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7535     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7536     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7537 
7538     unlock_user_struct(target_stx, target_addr, 1);
7539 
7540     return 0;
7541 }
7542 #endif
7543 
7544 static int do_sys_futex(int *uaddr, int op, int val,
7545                          const struct timespec *timeout, int *uaddr2,
7546                          int val3)
7547 {
7548 #if HOST_LONG_BITS == 64
7549 #if defined(__NR_futex)
7550     /* always a 64-bit time_t, it doesn't define _time64 version  */
7551     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7552 
7553 #endif
7554 #else /* HOST_LONG_BITS == 64 */
7555 #if defined(__NR_futex_time64)
7556     if (sizeof(timeout->tv_sec) == 8) {
7557         /* _time64 function on 32bit arch */
7558         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7559     }
7560 #endif
7561 #if defined(__NR_futex)
7562     /* old function on 32bit arch */
7563     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7564 #endif
7565 #endif /* HOST_LONG_BITS == 64 */
7566     g_assert_not_reached();
7567 }
7568 
7569 static int do_safe_futex(int *uaddr, int op, int val,
7570                          const struct timespec *timeout, int *uaddr2,
7571                          int val3)
7572 {
7573 #if HOST_LONG_BITS == 64
7574 #if defined(__NR_futex)
7575     /* always a 64-bit time_t, it doesn't define _time64 version  */
7576     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7577 #endif
7578 #else /* HOST_LONG_BITS == 64 */
7579 #if defined(__NR_futex_time64)
7580     if (sizeof(timeout->tv_sec) == 8) {
7581         /* _time64 function on 32bit arch */
7582         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7583                                            val3));
7584     }
7585 #endif
7586 #if defined(__NR_futex)
7587     /* old function on 32bit arch */
7588     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7589 #endif
7590 #endif /* HOST_LONG_BITS == 64 */
7591     return -TARGET_ENOSYS;
7592 }
7593 
7594 /* ??? Using host futex calls even when target atomic operations
7595    are not really atomic probably breaks things.  However implementing
7596    futexes locally would make futexes shared between multiple processes
7597    tricky.  However they're probably useless because guest atomic
7598    operations won't work either.  */
7599 #if defined(TARGET_NR_futex)
7600 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7601                     target_ulong uaddr2, int val3)
7602 {
7603     struct timespec ts, *pts;
7604     int base_op;
7605 
7606     /* ??? We assume FUTEX_* constants are the same on both host
7607        and target.  */
7608 #ifdef FUTEX_CMD_MASK
7609     base_op = op & FUTEX_CMD_MASK;
7610 #else
7611     base_op = op;
7612 #endif
7613     switch (base_op) {
7614     case FUTEX_WAIT:
7615     case FUTEX_WAIT_BITSET:
7616         if (timeout) {
7617             pts = &ts;
7618             target_to_host_timespec(pts, timeout);
7619         } else {
7620             pts = NULL;
7621         }
7622         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7623     case FUTEX_WAKE:
7624         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7625     case FUTEX_FD:
7626         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7627     case FUTEX_REQUEUE:
7628     case FUTEX_CMP_REQUEUE:
7629     case FUTEX_WAKE_OP:
7630         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7631            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7632            But the prototype takes a `struct timespec *'; insert casts
7633            to satisfy the compiler.  We do not need to tswap TIMEOUT
7634            since it's not compared to guest memory.  */
7635         pts = (struct timespec *)(uintptr_t) timeout;
7636         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7637                              (base_op == FUTEX_CMP_REQUEUE
7638                                       ? tswap32(val3)
7639                                       : val3));
7640     default:
7641         return -TARGET_ENOSYS;
7642     }
7643 }
7644 #endif
7645 
7646 #if defined(TARGET_NR_futex_time64)
7647 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7648                            target_ulong uaddr2, int val3)
7649 {
7650     struct timespec ts, *pts;
7651     int base_op;
7652 
7653     /* ??? We assume FUTEX_* constants are the same on both host
7654        and target.  */
7655 #ifdef FUTEX_CMD_MASK
7656     base_op = op & FUTEX_CMD_MASK;
7657 #else
7658     base_op = op;
7659 #endif
7660     switch (base_op) {
7661     case FUTEX_WAIT:
7662     case FUTEX_WAIT_BITSET:
7663         if (timeout) {
7664             pts = &ts;
7665             if (target_to_host_timespec64(pts, timeout)) {
7666                 return -TARGET_EFAULT;
7667             }
7668         } else {
7669             pts = NULL;
7670         }
7671         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7672     case FUTEX_WAKE:
7673         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7674     case FUTEX_FD:
7675         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7676     case FUTEX_REQUEUE:
7677     case FUTEX_CMP_REQUEUE:
7678     case FUTEX_WAKE_OP:
7679         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7680            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7681            But the prototype takes a `struct timespec *'; insert casts
7682            to satisfy the compiler.  We do not need to tswap TIMEOUT
7683            since it's not compared to guest memory.  */
7684         pts = (struct timespec *)(uintptr_t) timeout;
7685         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7686                              (base_op == FUTEX_CMP_REQUEUE
7687                                       ? tswap32(val3)
7688                                       : val3));
7689     default:
7690         return -TARGET_ENOSYS;
7691     }
7692 }
7693 #endif
7694 
7695 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7696 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7697                                      abi_long handle, abi_long mount_id,
7698                                      abi_long flags)
7699 {
7700     struct file_handle *target_fh;
7701     struct file_handle *fh;
7702     int mid = 0;
7703     abi_long ret;
7704     char *name;
7705     unsigned int size, total_size;
7706 
7707     if (get_user_s32(size, handle)) {
7708         return -TARGET_EFAULT;
7709     }
7710 
7711     name = lock_user_string(pathname);
7712     if (!name) {
7713         return -TARGET_EFAULT;
7714     }
7715 
7716     total_size = sizeof(struct file_handle) + size;
7717     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7718     if (!target_fh) {
7719         unlock_user(name, pathname, 0);
7720         return -TARGET_EFAULT;
7721     }
7722 
7723     fh = g_malloc0(total_size);
7724     fh->handle_bytes = size;
7725 
7726     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7727     unlock_user(name, pathname, 0);
7728 
7729     /* man name_to_handle_at(2):
7730      * Other than the use of the handle_bytes field, the caller should treat
7731      * the file_handle structure as an opaque data type
7732      */
7733 
7734     memcpy(target_fh, fh, total_size);
7735     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7736     target_fh->handle_type = tswap32(fh->handle_type);
7737     g_free(fh);
7738     unlock_user(target_fh, handle, total_size);
7739 
7740     if (put_user_s32(mid, mount_id)) {
7741         return -TARGET_EFAULT;
7742     }
7743 
7744     return ret;
7745 
7746 }
7747 #endif
7748 
7749 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7750 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7751                                      abi_long flags)
7752 {
7753     struct file_handle *target_fh;
7754     struct file_handle *fh;
7755     unsigned int size, total_size;
7756     abi_long ret;
7757 
7758     if (get_user_s32(size, handle)) {
7759         return -TARGET_EFAULT;
7760     }
7761 
7762     total_size = sizeof(struct file_handle) + size;
7763     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7764     if (!target_fh) {
7765         return -TARGET_EFAULT;
7766     }
7767 
7768     fh = g_memdup(target_fh, total_size);
7769     fh->handle_bytes = size;
7770     fh->handle_type = tswap32(target_fh->handle_type);
7771 
7772     ret = get_errno(open_by_handle_at(mount_fd, fh,
7773                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7774 
7775     g_free(fh);
7776 
7777     unlock_user(target_fh, handle, total_size);
7778 
7779     return ret;
7780 }
7781 #endif
7782 
7783 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7784 
7785 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7786 {
7787     int host_flags;
7788     target_sigset_t *target_mask;
7789     sigset_t host_mask;
7790     abi_long ret;
7791 
7792     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7793         return -TARGET_EINVAL;
7794     }
7795     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7796         return -TARGET_EFAULT;
7797     }
7798 
7799     target_to_host_sigset(&host_mask, target_mask);
7800 
7801     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7802 
7803     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7804     if (ret >= 0) {
7805         fd_trans_register(ret, &target_signalfd_trans);
7806     }
7807 
7808     unlock_user_struct(target_mask, mask, 0);
7809 
7810     return ret;
7811 }
7812 #endif
7813 
7814 /* Map host to target signal numbers for the wait family of syscalls.
7815    Assume all other status bits are the same.  */
7816 int host_to_target_waitstatus(int status)
7817 {
7818     if (WIFSIGNALED(status)) {
7819         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7820     }
7821     if (WIFSTOPPED(status)) {
7822         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7823                | (status & 0xff);
7824     }
7825     return status;
7826 }
7827 
7828 static int open_self_cmdline(void *cpu_env, int fd)
7829 {
7830     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7831     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7832     int i;
7833 
7834     for (i = 0; i < bprm->argc; i++) {
7835         size_t len = strlen(bprm->argv[i]) + 1;
7836 
7837         if (write(fd, bprm->argv[i], len) != len) {
7838             return -1;
7839         }
7840     }
7841 
7842     return 0;
7843 }
7844 
7845 static int open_self_maps(void *cpu_env, int fd)
7846 {
7847     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7848     TaskState *ts = cpu->opaque;
7849     GSList *map_info = read_self_maps();
7850     GSList *s;
7851     int count;
7852 
7853     for (s = map_info; s; s = g_slist_next(s)) {
7854         MapInfo *e = (MapInfo *) s->data;
7855 
7856         if (h2g_valid(e->start)) {
7857             unsigned long min = e->start;
7858             unsigned long max = e->end;
7859             int flags = page_get_flags(h2g(min));
7860             const char *path;
7861 
7862             max = h2g_valid(max - 1) ?
7863                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7864 
7865             if (page_check_range(h2g(min), max - min, flags) == -1) {
7866                 continue;
7867             }
7868 
7869             if (h2g(min) == ts->info->stack_limit) {
7870                 path = "[stack]";
7871             } else {
7872                 path = e->path;
7873             }
7874 
7875             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7876                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7877                             h2g(min), h2g(max - 1) + 1,
7878                             e->is_read ? 'r' : '-',
7879                             e->is_write ? 'w' : '-',
7880                             e->is_exec ? 'x' : '-',
7881                             e->is_priv ? 'p' : '-',
7882                             (uint64_t) e->offset, e->dev, e->inode);
7883             if (path) {
7884                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7885             } else {
7886                 dprintf(fd, "\n");
7887             }
7888         }
7889     }
7890 
7891     free_self_maps(map_info);
7892 
7893 #ifdef TARGET_VSYSCALL_PAGE
7894     /*
7895      * We only support execution from the vsyscall page.
7896      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7897      */
7898     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7899                     " --xp 00000000 00:00 0",
7900                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7901     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7902 #endif
7903 
7904     return 0;
7905 }
7906 
7907 static int open_self_stat(void *cpu_env, int fd)
7908 {
7909     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7910     TaskState *ts = cpu->opaque;
7911     g_autoptr(GString) buf = g_string_new(NULL);
7912     int i;
7913 
7914     for (i = 0; i < 44; i++) {
7915         if (i == 0) {
7916             /* pid */
7917             g_string_printf(buf, FMT_pid " ", getpid());
7918         } else if (i == 1) {
7919             /* app name */
7920             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7921             bin = bin ? bin + 1 : ts->bprm->argv[0];
7922             g_string_printf(buf, "(%.15s) ", bin);
7923         } else if (i == 27) {
7924             /* stack bottom */
7925             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7926         } else {
7927             /* for the rest, there is MasterCard */
7928             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7929         }
7930 
7931         if (write(fd, buf->str, buf->len) != buf->len) {
7932             return -1;
7933         }
7934     }
7935 
7936     return 0;
7937 }
7938 
7939 static int open_self_auxv(void *cpu_env, int fd)
7940 {
7941     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7942     TaskState *ts = cpu->opaque;
7943     abi_ulong auxv = ts->info->saved_auxv;
7944     abi_ulong len = ts->info->auxv_len;
7945     char *ptr;
7946 
7947     /*
7948      * Auxiliary vector is stored in target process stack.
7949      * read in whole auxv vector and copy it to file
7950      */
7951     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7952     if (ptr != NULL) {
7953         while (len > 0) {
7954             ssize_t r;
7955             r = write(fd, ptr, len);
7956             if (r <= 0) {
7957                 break;
7958             }
7959             len -= r;
7960             ptr += r;
7961         }
7962         lseek(fd, 0, SEEK_SET);
7963         unlock_user(ptr, auxv, len);
7964     }
7965 
7966     return 0;
7967 }
7968 
7969 static int is_proc_myself(const char *filename, const char *entry)
7970 {
7971     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7972         filename += strlen("/proc/");
7973         if (!strncmp(filename, "self/", strlen("self/"))) {
7974             filename += strlen("self/");
7975         } else if (*filename >= '1' && *filename <= '9') {
7976             char myself[80];
7977             snprintf(myself, sizeof(myself), "%d/", getpid());
7978             if (!strncmp(filename, myself, strlen(myself))) {
7979                 filename += strlen(myself);
7980             } else {
7981                 return 0;
7982             }
7983         } else {
7984             return 0;
7985         }
7986         if (!strcmp(filename, entry)) {
7987             return 1;
7988         }
7989     }
7990     return 0;
7991 }
7992 
7993 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7994     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7995 static int is_proc(const char *filename, const char *entry)
7996 {
7997     return strcmp(filename, entry) == 0;
7998 }
7999 #endif
8000 
8001 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8002 static int open_net_route(void *cpu_env, int fd)
8003 {
8004     FILE *fp;
8005     char *line = NULL;
8006     size_t len = 0;
8007     ssize_t read;
8008 
8009     fp = fopen("/proc/net/route", "r");
8010     if (fp == NULL) {
8011         return -1;
8012     }
8013 
8014     /* read header */
8015 
8016     read = getline(&line, &len, fp);
8017     dprintf(fd, "%s", line);
8018 
8019     /* read routes */
8020 
8021     while ((read = getline(&line, &len, fp)) != -1) {
8022         char iface[16];
8023         uint32_t dest, gw, mask;
8024         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8025         int fields;
8026 
8027         fields = sscanf(line,
8028                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8029                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8030                         &mask, &mtu, &window, &irtt);
8031         if (fields != 11) {
8032             continue;
8033         }
8034         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8035                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8036                 metric, tswap32(mask), mtu, window, irtt);
8037     }
8038 
8039     free(line);
8040     fclose(fp);
8041 
8042     return 0;
8043 }
8044 #endif
8045 
8046 #if defined(TARGET_SPARC)
8047 static int open_cpuinfo(void *cpu_env, int fd)
8048 {
8049     dprintf(fd, "type\t\t: sun4u\n");
8050     return 0;
8051 }
8052 #endif
8053 
8054 #if defined(TARGET_HPPA)
8055 static int open_cpuinfo(void *cpu_env, int fd)
8056 {
8057     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8058     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8059     dprintf(fd, "capabilities\t: os32\n");
8060     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8061     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8062     return 0;
8063 }
8064 #endif
8065 
8066 #if defined(TARGET_M68K)
8067 static int open_hardware(void *cpu_env, int fd)
8068 {
8069     dprintf(fd, "Model:\t\tqemu-m68k\n");
8070     return 0;
8071 }
8072 #endif
8073 
8074 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8075 {
8076     struct fake_open {
8077         const char *filename;
8078         int (*fill)(void *cpu_env, int fd);
8079         int (*cmp)(const char *s1, const char *s2);
8080     };
8081     const struct fake_open *fake_open;
8082     static const struct fake_open fakes[] = {
8083         { "maps", open_self_maps, is_proc_myself },
8084         { "stat", open_self_stat, is_proc_myself },
8085         { "auxv", open_self_auxv, is_proc_myself },
8086         { "cmdline", open_self_cmdline, is_proc_myself },
8087 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8088         { "/proc/net/route", open_net_route, is_proc },
8089 #endif
8090 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8091         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8092 #endif
8093 #if defined(TARGET_M68K)
8094         { "/proc/hardware", open_hardware, is_proc },
8095 #endif
8096         { NULL, NULL, NULL }
8097     };
8098 
8099     if (is_proc_myself(pathname, "exe")) {
8100         int execfd = qemu_getauxval(AT_EXECFD);
8101         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8102     }
8103 
8104     for (fake_open = fakes; fake_open->filename; fake_open++) {
8105         if (fake_open->cmp(pathname, fake_open->filename)) {
8106             break;
8107         }
8108     }
8109 
8110     if (fake_open->filename) {
8111         const char *tmpdir;
8112         char filename[PATH_MAX];
8113         int fd, r;
8114 
8115         /* create temporary file to map stat to */
8116         tmpdir = getenv("TMPDIR");
8117         if (!tmpdir)
8118             tmpdir = "/tmp";
8119         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8120         fd = mkstemp(filename);
8121         if (fd < 0) {
8122             return fd;
8123         }
8124         unlink(filename);
8125 
8126         if ((r = fake_open->fill(cpu_env, fd))) {
8127             int e = errno;
8128             close(fd);
8129             errno = e;
8130             return r;
8131         }
8132         lseek(fd, 0, SEEK_SET);
8133 
8134         return fd;
8135     }
8136 
8137     return safe_openat(dirfd, path(pathname), flags, mode);
8138 }
8139 
8140 #define TIMER_MAGIC 0x0caf0000
8141 #define TIMER_MAGIC_MASK 0xffff0000
8142 
8143 /* Convert QEMU provided timer ID back to internal 16bit index format */
8144 static target_timer_t get_timer_id(abi_long arg)
8145 {
8146     target_timer_t timerid = arg;
8147 
8148     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8149         return -TARGET_EINVAL;
8150     }
8151 
8152     timerid &= 0xffff;
8153 
8154     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8155         return -TARGET_EINVAL;
8156     }
8157 
8158     return timerid;
8159 }
8160 
8161 static int target_to_host_cpu_mask(unsigned long *host_mask,
8162                                    size_t host_size,
8163                                    abi_ulong target_addr,
8164                                    size_t target_size)
8165 {
8166     unsigned target_bits = sizeof(abi_ulong) * 8;
8167     unsigned host_bits = sizeof(*host_mask) * 8;
8168     abi_ulong *target_mask;
8169     unsigned i, j;
8170 
8171     assert(host_size >= target_size);
8172 
8173     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8174     if (!target_mask) {
8175         return -TARGET_EFAULT;
8176     }
8177     memset(host_mask, 0, host_size);
8178 
8179     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8180         unsigned bit = i * target_bits;
8181         abi_ulong val;
8182 
8183         __get_user(val, &target_mask[i]);
8184         for (j = 0; j < target_bits; j++, bit++) {
8185             if (val & (1UL << j)) {
8186                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8187             }
8188         }
8189     }
8190 
8191     unlock_user(target_mask, target_addr, 0);
8192     return 0;
8193 }
8194 
8195 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8196                                    size_t host_size,
8197                                    abi_ulong target_addr,
8198                                    size_t target_size)
8199 {
8200     unsigned target_bits = sizeof(abi_ulong) * 8;
8201     unsigned host_bits = sizeof(*host_mask) * 8;
8202     abi_ulong *target_mask;
8203     unsigned i, j;
8204 
8205     assert(host_size >= target_size);
8206 
8207     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8208     if (!target_mask) {
8209         return -TARGET_EFAULT;
8210     }
8211 
8212     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8213         unsigned bit = i * target_bits;
8214         abi_ulong val = 0;
8215 
8216         for (j = 0; j < target_bits; j++, bit++) {
8217             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8218                 val |= 1UL << j;
8219             }
8220         }
8221         __put_user(val, &target_mask[i]);
8222     }
8223 
8224     unlock_user(target_mask, target_addr, target_size);
8225     return 0;
8226 }
8227 
8228 /* This is an internal helper for do_syscall so that it is easier
8229  * to have a single return point, so that actions, such as logging
8230  * of syscall results, can be performed.
8231  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8232  */
8233 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8234                             abi_long arg2, abi_long arg3, abi_long arg4,
8235                             abi_long arg5, abi_long arg6, abi_long arg7,
8236                             abi_long arg8)
8237 {
8238     CPUState *cpu = env_cpu(cpu_env);
8239     abi_long ret;
8240 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8241     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8242     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8243     || defined(TARGET_NR_statx)
8244     struct stat st;
8245 #endif
8246 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8247     || defined(TARGET_NR_fstatfs)
8248     struct statfs stfs;
8249 #endif
8250     void *p;
8251 
8252     switch(num) {
8253     case TARGET_NR_exit:
8254         /* In old applications this may be used to implement _exit(2).
8255            However in threaded applications it is used for thread termination,
8256            and _exit_group is used for application termination.
8257            Do thread termination if we have more then one thread.  */
8258 
8259         if (block_signals()) {
8260             return -TARGET_ERESTARTSYS;
8261         }
8262 
8263         pthread_mutex_lock(&clone_lock);
8264 
8265         if (CPU_NEXT(first_cpu)) {
8266             TaskState *ts = cpu->opaque;
8267 
8268             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8269             object_unref(OBJECT(cpu));
8270             /*
8271              * At this point the CPU should be unrealized and removed
8272              * from cpu lists. We can clean-up the rest of the thread
8273              * data without the lock held.
8274              */
8275 
8276             pthread_mutex_unlock(&clone_lock);
8277 
8278             if (ts->child_tidptr) {
8279                 put_user_u32(0, ts->child_tidptr);
8280                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8281                           NULL, NULL, 0);
8282             }
8283             thread_cpu = NULL;
8284             g_free(ts);
8285             rcu_unregister_thread();
8286             pthread_exit(NULL);
8287         }
8288 
8289         pthread_mutex_unlock(&clone_lock);
8290         preexit_cleanup(cpu_env, arg1);
8291         _exit(arg1);
8292         return 0; /* avoid warning */
8293     case TARGET_NR_read:
8294         if (arg2 == 0 && arg3 == 0) {
8295             return get_errno(safe_read(arg1, 0, 0));
8296         } else {
8297             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8298                 return -TARGET_EFAULT;
8299             ret = get_errno(safe_read(arg1, p, arg3));
8300             if (ret >= 0 &&
8301                 fd_trans_host_to_target_data(arg1)) {
8302                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8303             }
8304             unlock_user(p, arg2, ret);
8305         }
8306         return ret;
8307     case TARGET_NR_write:
8308         if (arg2 == 0 && arg3 == 0) {
8309             return get_errno(safe_write(arg1, 0, 0));
8310         }
8311         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8312             return -TARGET_EFAULT;
8313         if (fd_trans_target_to_host_data(arg1)) {
8314             void *copy = g_malloc(arg3);
8315             memcpy(copy, p, arg3);
8316             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8317             if (ret >= 0) {
8318                 ret = get_errno(safe_write(arg1, copy, ret));
8319             }
8320             g_free(copy);
8321         } else {
8322             ret = get_errno(safe_write(arg1, p, arg3));
8323         }
8324         unlock_user(p, arg2, 0);
8325         return ret;
8326 
8327 #ifdef TARGET_NR_open
8328     case TARGET_NR_open:
8329         if (!(p = lock_user_string(arg1)))
8330             return -TARGET_EFAULT;
8331         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8332                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8333                                   arg3));
8334         fd_trans_unregister(ret);
8335         unlock_user(p, arg1, 0);
8336         return ret;
8337 #endif
8338     case TARGET_NR_openat:
8339         if (!(p = lock_user_string(arg2)))
8340             return -TARGET_EFAULT;
8341         ret = get_errno(do_openat(cpu_env, arg1, p,
8342                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8343                                   arg4));
8344         fd_trans_unregister(ret);
8345         unlock_user(p, arg2, 0);
8346         return ret;
8347 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8348     case TARGET_NR_name_to_handle_at:
8349         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8350         return ret;
8351 #endif
8352 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8353     case TARGET_NR_open_by_handle_at:
8354         ret = do_open_by_handle_at(arg1, arg2, arg3);
8355         fd_trans_unregister(ret);
8356         return ret;
8357 #endif
8358     case TARGET_NR_close:
8359         fd_trans_unregister(arg1);
8360         return get_errno(close(arg1));
8361 
8362     case TARGET_NR_brk:
8363         return do_brk(arg1);
8364 #ifdef TARGET_NR_fork
8365     case TARGET_NR_fork:
8366         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8367 #endif
8368 #ifdef TARGET_NR_waitpid
8369     case TARGET_NR_waitpid:
8370         {
8371             int status;
8372             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8373             if (!is_error(ret) && arg2 && ret
8374                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8375                 return -TARGET_EFAULT;
8376         }
8377         return ret;
8378 #endif
8379 #ifdef TARGET_NR_waitid
8380     case TARGET_NR_waitid:
8381         {
8382             siginfo_t info;
8383             info.si_pid = 0;
8384             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8385             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8386                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8387                     return -TARGET_EFAULT;
8388                 host_to_target_siginfo(p, &info);
8389                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8390             }
8391         }
8392         return ret;
8393 #endif
8394 #ifdef TARGET_NR_creat /* not on alpha */
8395     case TARGET_NR_creat:
8396         if (!(p = lock_user_string(arg1)))
8397             return -TARGET_EFAULT;
8398         ret = get_errno(creat(p, arg2));
8399         fd_trans_unregister(ret);
8400         unlock_user(p, arg1, 0);
8401         return ret;
8402 #endif
8403 #ifdef TARGET_NR_link
8404     case TARGET_NR_link:
8405         {
8406             void * p2;
8407             p = lock_user_string(arg1);
8408             p2 = lock_user_string(arg2);
8409             if (!p || !p2)
8410                 ret = -TARGET_EFAULT;
8411             else
8412                 ret = get_errno(link(p, p2));
8413             unlock_user(p2, arg2, 0);
8414             unlock_user(p, arg1, 0);
8415         }
8416         return ret;
8417 #endif
8418 #if defined(TARGET_NR_linkat)
8419     case TARGET_NR_linkat:
8420         {
8421             void * p2 = NULL;
8422             if (!arg2 || !arg4)
8423                 return -TARGET_EFAULT;
8424             p  = lock_user_string(arg2);
8425             p2 = lock_user_string(arg4);
8426             if (!p || !p2)
8427                 ret = -TARGET_EFAULT;
8428             else
8429                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8430             unlock_user(p, arg2, 0);
8431             unlock_user(p2, arg4, 0);
8432         }
8433         return ret;
8434 #endif
8435 #ifdef TARGET_NR_unlink
8436     case TARGET_NR_unlink:
8437         if (!(p = lock_user_string(arg1)))
8438             return -TARGET_EFAULT;
8439         ret = get_errno(unlink(p));
8440         unlock_user(p, arg1, 0);
8441         return ret;
8442 #endif
8443 #if defined(TARGET_NR_unlinkat)
8444     case TARGET_NR_unlinkat:
8445         if (!(p = lock_user_string(arg2)))
8446             return -TARGET_EFAULT;
8447         ret = get_errno(unlinkat(arg1, p, arg3));
8448         unlock_user(p, arg2, 0);
8449         return ret;
8450 #endif
8451     case TARGET_NR_execve:
8452         {
8453             char **argp, **envp;
8454             int argc, envc;
8455             abi_ulong gp;
8456             abi_ulong guest_argp;
8457             abi_ulong guest_envp;
8458             abi_ulong addr;
8459             char **q;
8460             int total_size = 0;
8461 
8462             argc = 0;
8463             guest_argp = arg2;
8464             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8465                 if (get_user_ual(addr, gp))
8466                     return -TARGET_EFAULT;
8467                 if (!addr)
8468                     break;
8469                 argc++;
8470             }
8471             envc = 0;
8472             guest_envp = arg3;
8473             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8474                 if (get_user_ual(addr, gp))
8475                     return -TARGET_EFAULT;
8476                 if (!addr)
8477                     break;
8478                 envc++;
8479             }
8480 
8481             argp = g_new0(char *, argc + 1);
8482             envp = g_new0(char *, envc + 1);
8483 
8484             for (gp = guest_argp, q = argp; gp;
8485                   gp += sizeof(abi_ulong), q++) {
8486                 if (get_user_ual(addr, gp))
8487                     goto execve_efault;
8488                 if (!addr)
8489                     break;
8490                 if (!(*q = lock_user_string(addr)))
8491                     goto execve_efault;
8492                 total_size += strlen(*q) + 1;
8493             }
8494             *q = NULL;
8495 
8496             for (gp = guest_envp, q = envp; gp;
8497                   gp += sizeof(abi_ulong), q++) {
8498                 if (get_user_ual(addr, gp))
8499                     goto execve_efault;
8500                 if (!addr)
8501                     break;
8502                 if (!(*q = lock_user_string(addr)))
8503                     goto execve_efault;
8504                 total_size += strlen(*q) + 1;
8505             }
8506             *q = NULL;
8507 
8508             if (!(p = lock_user_string(arg1)))
8509                 goto execve_efault;
8510             /* Although execve() is not an interruptible syscall it is
8511              * a special case where we must use the safe_syscall wrapper:
8512              * if we allow a signal to happen before we make the host
8513              * syscall then we will 'lose' it, because at the point of
8514              * execve the process leaves QEMU's control. So we use the
8515              * safe syscall wrapper to ensure that we either take the
8516              * signal as a guest signal, or else it does not happen
8517              * before the execve completes and makes it the other
8518              * program's problem.
8519              */
8520             ret = get_errno(safe_execve(p, argp, envp));
8521             unlock_user(p, arg1, 0);
8522 
8523             goto execve_end;
8524 
8525         execve_efault:
8526             ret = -TARGET_EFAULT;
8527 
8528         execve_end:
8529             for (gp = guest_argp, q = argp; *q;
8530                   gp += sizeof(abi_ulong), q++) {
8531                 if (get_user_ual(addr, gp)
8532                     || !addr)
8533                     break;
8534                 unlock_user(*q, addr, 0);
8535             }
8536             for (gp = guest_envp, q = envp; *q;
8537                   gp += sizeof(abi_ulong), q++) {
8538                 if (get_user_ual(addr, gp)
8539                     || !addr)
8540                     break;
8541                 unlock_user(*q, addr, 0);
8542             }
8543 
8544             g_free(argp);
8545             g_free(envp);
8546         }
8547         return ret;
8548     case TARGET_NR_chdir:
8549         if (!(p = lock_user_string(arg1)))
8550             return -TARGET_EFAULT;
8551         ret = get_errno(chdir(p));
8552         unlock_user(p, arg1, 0);
8553         return ret;
8554 #ifdef TARGET_NR_time
8555     case TARGET_NR_time:
8556         {
8557             time_t host_time;
8558             ret = get_errno(time(&host_time));
8559             if (!is_error(ret)
8560                 && arg1
8561                 && put_user_sal(host_time, arg1))
8562                 return -TARGET_EFAULT;
8563         }
8564         return ret;
8565 #endif
8566 #ifdef TARGET_NR_mknod
8567     case TARGET_NR_mknod:
8568         if (!(p = lock_user_string(arg1)))
8569             return -TARGET_EFAULT;
8570         ret = get_errno(mknod(p, arg2, arg3));
8571         unlock_user(p, arg1, 0);
8572         return ret;
8573 #endif
8574 #if defined(TARGET_NR_mknodat)
8575     case TARGET_NR_mknodat:
8576         if (!(p = lock_user_string(arg2)))
8577             return -TARGET_EFAULT;
8578         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8579         unlock_user(p, arg2, 0);
8580         return ret;
8581 #endif
8582 #ifdef TARGET_NR_chmod
8583     case TARGET_NR_chmod:
8584         if (!(p = lock_user_string(arg1)))
8585             return -TARGET_EFAULT;
8586         ret = get_errno(chmod(p, arg2));
8587         unlock_user(p, arg1, 0);
8588         return ret;
8589 #endif
8590 #ifdef TARGET_NR_lseek
8591     case TARGET_NR_lseek:
8592         return get_errno(lseek(arg1, arg2, arg3));
8593 #endif
8594 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8595     /* Alpha specific */
8596     case TARGET_NR_getxpid:
8597         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8598         return get_errno(getpid());
8599 #endif
8600 #ifdef TARGET_NR_getpid
8601     case TARGET_NR_getpid:
8602         return get_errno(getpid());
8603 #endif
8604     case TARGET_NR_mount:
8605         {
8606             /* need to look at the data field */
8607             void *p2, *p3;
8608 
8609             if (arg1) {
8610                 p = lock_user_string(arg1);
8611                 if (!p) {
8612                     return -TARGET_EFAULT;
8613                 }
8614             } else {
8615                 p = NULL;
8616             }
8617 
8618             p2 = lock_user_string(arg2);
8619             if (!p2) {
8620                 if (arg1) {
8621                     unlock_user(p, arg1, 0);
8622                 }
8623                 return -TARGET_EFAULT;
8624             }
8625 
8626             if (arg3) {
8627                 p3 = lock_user_string(arg3);
8628                 if (!p3) {
8629                     if (arg1) {
8630                         unlock_user(p, arg1, 0);
8631                     }
8632                     unlock_user(p2, arg2, 0);
8633                     return -TARGET_EFAULT;
8634                 }
8635             } else {
8636                 p3 = NULL;
8637             }
8638 
8639             /* FIXME - arg5 should be locked, but it isn't clear how to
8640              * do that since it's not guaranteed to be a NULL-terminated
8641              * string.
8642              */
8643             if (!arg5) {
8644                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8645             } else {
8646                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8647             }
8648             ret = get_errno(ret);
8649 
8650             if (arg1) {
8651                 unlock_user(p, arg1, 0);
8652             }
8653             unlock_user(p2, arg2, 0);
8654             if (arg3) {
8655                 unlock_user(p3, arg3, 0);
8656             }
8657         }
8658         return ret;
8659 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8660 #if defined(TARGET_NR_umount)
8661     case TARGET_NR_umount:
8662 #endif
8663 #if defined(TARGET_NR_oldumount)
8664     case TARGET_NR_oldumount:
8665 #endif
8666         if (!(p = lock_user_string(arg1)))
8667             return -TARGET_EFAULT;
8668         ret = get_errno(umount(p));
8669         unlock_user(p, arg1, 0);
8670         return ret;
8671 #endif
8672 #ifdef TARGET_NR_stime /* not on alpha */
8673     case TARGET_NR_stime:
8674         {
8675             struct timespec ts;
8676             ts.tv_nsec = 0;
8677             if (get_user_sal(ts.tv_sec, arg1)) {
8678                 return -TARGET_EFAULT;
8679             }
8680             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8681         }
8682 #endif
8683 #ifdef TARGET_NR_alarm /* not on alpha */
8684     case TARGET_NR_alarm:
8685         return alarm(arg1);
8686 #endif
8687 #ifdef TARGET_NR_pause /* not on alpha */
8688     case TARGET_NR_pause:
8689         if (!block_signals()) {
8690             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8691         }
8692         return -TARGET_EINTR;
8693 #endif
8694 #ifdef TARGET_NR_utime
8695     case TARGET_NR_utime:
8696         {
8697             struct utimbuf tbuf, *host_tbuf;
8698             struct target_utimbuf *target_tbuf;
8699             if (arg2) {
8700                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8701                     return -TARGET_EFAULT;
8702                 tbuf.actime = tswapal(target_tbuf->actime);
8703                 tbuf.modtime = tswapal(target_tbuf->modtime);
8704                 unlock_user_struct(target_tbuf, arg2, 0);
8705                 host_tbuf = &tbuf;
8706             } else {
8707                 host_tbuf = NULL;
8708             }
8709             if (!(p = lock_user_string(arg1)))
8710                 return -TARGET_EFAULT;
8711             ret = get_errno(utime(p, host_tbuf));
8712             unlock_user(p, arg1, 0);
8713         }
8714         return ret;
8715 #endif
8716 #ifdef TARGET_NR_utimes
8717     case TARGET_NR_utimes:
8718         {
8719             struct timeval *tvp, tv[2];
8720             if (arg2) {
8721                 if (copy_from_user_timeval(&tv[0], arg2)
8722                     || copy_from_user_timeval(&tv[1],
8723                                               arg2 + sizeof(struct target_timeval)))
8724                     return -TARGET_EFAULT;
8725                 tvp = tv;
8726             } else {
8727                 tvp = NULL;
8728             }
8729             if (!(p = lock_user_string(arg1)))
8730                 return -TARGET_EFAULT;
8731             ret = get_errno(utimes(p, tvp));
8732             unlock_user(p, arg1, 0);
8733         }
8734         return ret;
8735 #endif
8736 #if defined(TARGET_NR_futimesat)
8737     case TARGET_NR_futimesat:
8738         {
8739             struct timeval *tvp, tv[2];
8740             if (arg3) {
8741                 if (copy_from_user_timeval(&tv[0], arg3)
8742                     || copy_from_user_timeval(&tv[1],
8743                                               arg3 + sizeof(struct target_timeval)))
8744                     return -TARGET_EFAULT;
8745                 tvp = tv;
8746             } else {
8747                 tvp = NULL;
8748             }
8749             if (!(p = lock_user_string(arg2))) {
8750                 return -TARGET_EFAULT;
8751             }
8752             ret = get_errno(futimesat(arg1, path(p), tvp));
8753             unlock_user(p, arg2, 0);
8754         }
8755         return ret;
8756 #endif
8757 #ifdef TARGET_NR_access
8758     case TARGET_NR_access:
8759         if (!(p = lock_user_string(arg1))) {
8760             return -TARGET_EFAULT;
8761         }
8762         ret = get_errno(access(path(p), arg2));
8763         unlock_user(p, arg1, 0);
8764         return ret;
8765 #endif
8766 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8767     case TARGET_NR_faccessat:
8768         if (!(p = lock_user_string(arg2))) {
8769             return -TARGET_EFAULT;
8770         }
8771         ret = get_errno(faccessat(arg1, p, arg3, 0));
8772         unlock_user(p, arg2, 0);
8773         return ret;
8774 #endif
8775 #ifdef TARGET_NR_nice /* not on alpha */
8776     case TARGET_NR_nice:
8777         return get_errno(nice(arg1));
8778 #endif
8779     case TARGET_NR_sync:
8780         sync();
8781         return 0;
8782 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8783     case TARGET_NR_syncfs:
8784         return get_errno(syncfs(arg1));
8785 #endif
8786     case TARGET_NR_kill:
8787         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8788 #ifdef TARGET_NR_rename
8789     case TARGET_NR_rename:
8790         {
8791             void *p2;
8792             p = lock_user_string(arg1);
8793             p2 = lock_user_string(arg2);
8794             if (!p || !p2)
8795                 ret = -TARGET_EFAULT;
8796             else
8797                 ret = get_errno(rename(p, p2));
8798             unlock_user(p2, arg2, 0);
8799             unlock_user(p, arg1, 0);
8800         }
8801         return ret;
8802 #endif
8803 #if defined(TARGET_NR_renameat)
8804     case TARGET_NR_renameat:
8805         {
8806             void *p2;
8807             p  = lock_user_string(arg2);
8808             p2 = lock_user_string(arg4);
8809             if (!p || !p2)
8810                 ret = -TARGET_EFAULT;
8811             else
8812                 ret = get_errno(renameat(arg1, p, arg3, p2));
8813             unlock_user(p2, arg4, 0);
8814             unlock_user(p, arg2, 0);
8815         }
8816         return ret;
8817 #endif
8818 #if defined(TARGET_NR_renameat2)
8819     case TARGET_NR_renameat2:
8820         {
8821             void *p2;
8822             p  = lock_user_string(arg2);
8823             p2 = lock_user_string(arg4);
8824             if (!p || !p2) {
8825                 ret = -TARGET_EFAULT;
8826             } else {
8827                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8828             }
8829             unlock_user(p2, arg4, 0);
8830             unlock_user(p, arg2, 0);
8831         }
8832         return ret;
8833 #endif
8834 #ifdef TARGET_NR_mkdir
8835     case TARGET_NR_mkdir:
8836         if (!(p = lock_user_string(arg1)))
8837             return -TARGET_EFAULT;
8838         ret = get_errno(mkdir(p, arg2));
8839         unlock_user(p, arg1, 0);
8840         return ret;
8841 #endif
8842 #if defined(TARGET_NR_mkdirat)
8843     case TARGET_NR_mkdirat:
8844         if (!(p = lock_user_string(arg2)))
8845             return -TARGET_EFAULT;
8846         ret = get_errno(mkdirat(arg1, p, arg3));
8847         unlock_user(p, arg2, 0);
8848         return ret;
8849 #endif
8850 #ifdef TARGET_NR_rmdir
8851     case TARGET_NR_rmdir:
8852         if (!(p = lock_user_string(arg1)))
8853             return -TARGET_EFAULT;
8854         ret = get_errno(rmdir(p));
8855         unlock_user(p, arg1, 0);
8856         return ret;
8857 #endif
8858     case TARGET_NR_dup:
8859         ret = get_errno(dup(arg1));
8860         if (ret >= 0) {
8861             fd_trans_dup(arg1, ret);
8862         }
8863         return ret;
8864 #ifdef TARGET_NR_pipe
8865     case TARGET_NR_pipe:
8866         return do_pipe(cpu_env, arg1, 0, 0);
8867 #endif
8868 #ifdef TARGET_NR_pipe2
8869     case TARGET_NR_pipe2:
8870         return do_pipe(cpu_env, arg1,
8871                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8872 #endif
8873     case TARGET_NR_times:
8874         {
8875             struct target_tms *tmsp;
8876             struct tms tms;
8877             ret = get_errno(times(&tms));
8878             if (arg1) {
8879                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8880                 if (!tmsp)
8881                     return -TARGET_EFAULT;
8882                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8883                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8884                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8885                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8886             }
8887             if (!is_error(ret))
8888                 ret = host_to_target_clock_t(ret);
8889         }
8890         return ret;
8891     case TARGET_NR_acct:
8892         if (arg1 == 0) {
8893             ret = get_errno(acct(NULL));
8894         } else {
8895             if (!(p = lock_user_string(arg1))) {
8896                 return -TARGET_EFAULT;
8897             }
8898             ret = get_errno(acct(path(p)));
8899             unlock_user(p, arg1, 0);
8900         }
8901         return ret;
8902 #ifdef TARGET_NR_umount2
8903     case TARGET_NR_umount2:
8904         if (!(p = lock_user_string(arg1)))
8905             return -TARGET_EFAULT;
8906         ret = get_errno(umount2(p, arg2));
8907         unlock_user(p, arg1, 0);
8908         return ret;
8909 #endif
8910     case TARGET_NR_ioctl:
8911         return do_ioctl(arg1, arg2, arg3);
8912 #ifdef TARGET_NR_fcntl
8913     case TARGET_NR_fcntl:
8914         return do_fcntl(arg1, arg2, arg3);
8915 #endif
8916     case TARGET_NR_setpgid:
8917         return get_errno(setpgid(arg1, arg2));
8918     case TARGET_NR_umask:
8919         return get_errno(umask(arg1));
8920     case TARGET_NR_chroot:
8921         if (!(p = lock_user_string(arg1)))
8922             return -TARGET_EFAULT;
8923         ret = get_errno(chroot(p));
8924         unlock_user(p, arg1, 0);
8925         return ret;
8926 #ifdef TARGET_NR_dup2
8927     case TARGET_NR_dup2:
8928         ret = get_errno(dup2(arg1, arg2));
8929         if (ret >= 0) {
8930             fd_trans_dup(arg1, arg2);
8931         }
8932         return ret;
8933 #endif
8934 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8935     case TARGET_NR_dup3:
8936     {
8937         int host_flags;
8938 
8939         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8940             return -EINVAL;
8941         }
8942         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8943         ret = get_errno(dup3(arg1, arg2, host_flags));
8944         if (ret >= 0) {
8945             fd_trans_dup(arg1, arg2);
8946         }
8947         return ret;
8948     }
8949 #endif
8950 #ifdef TARGET_NR_getppid /* not on alpha */
8951     case TARGET_NR_getppid:
8952         return get_errno(getppid());
8953 #endif
8954 #ifdef TARGET_NR_getpgrp
8955     case TARGET_NR_getpgrp:
8956         return get_errno(getpgrp());
8957 #endif
8958     case TARGET_NR_setsid:
8959         return get_errno(setsid());
8960 #ifdef TARGET_NR_sigaction
8961     case TARGET_NR_sigaction:
8962         {
8963 #if defined(TARGET_ALPHA)
8964             struct target_sigaction act, oact, *pact = 0;
8965             struct target_old_sigaction *old_act;
8966             if (arg2) {
8967                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8968                     return -TARGET_EFAULT;
8969                 act._sa_handler = old_act->_sa_handler;
8970                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8971                 act.sa_flags = old_act->sa_flags;
8972                 act.sa_restorer = 0;
8973                 unlock_user_struct(old_act, arg2, 0);
8974                 pact = &act;
8975             }
8976             ret = get_errno(do_sigaction(arg1, pact, &oact));
8977             if (!is_error(ret) && arg3) {
8978                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8979                     return -TARGET_EFAULT;
8980                 old_act->_sa_handler = oact._sa_handler;
8981                 old_act->sa_mask = oact.sa_mask.sig[0];
8982                 old_act->sa_flags = oact.sa_flags;
8983                 unlock_user_struct(old_act, arg3, 1);
8984             }
8985 #elif defined(TARGET_MIPS)
8986 	    struct target_sigaction act, oact, *pact, *old_act;
8987 
8988 	    if (arg2) {
8989                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8990                     return -TARGET_EFAULT;
8991 		act._sa_handler = old_act->_sa_handler;
8992 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8993 		act.sa_flags = old_act->sa_flags;
8994 		unlock_user_struct(old_act, arg2, 0);
8995 		pact = &act;
8996 	    } else {
8997 		pact = NULL;
8998 	    }
8999 
9000 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
9001 
9002 	    if (!is_error(ret) && arg3) {
9003                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9004                     return -TARGET_EFAULT;
9005 		old_act->_sa_handler = oact._sa_handler;
9006 		old_act->sa_flags = oact.sa_flags;
9007 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9008 		old_act->sa_mask.sig[1] = 0;
9009 		old_act->sa_mask.sig[2] = 0;
9010 		old_act->sa_mask.sig[3] = 0;
9011 		unlock_user_struct(old_act, arg3, 1);
9012 	    }
9013 #else
9014             struct target_old_sigaction *old_act;
9015             struct target_sigaction act, oact, *pact;
9016             if (arg2) {
9017                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9018                     return -TARGET_EFAULT;
9019                 act._sa_handler = old_act->_sa_handler;
9020                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9021                 act.sa_flags = old_act->sa_flags;
9022                 act.sa_restorer = old_act->sa_restorer;
9023 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9024                 act.ka_restorer = 0;
9025 #endif
9026                 unlock_user_struct(old_act, arg2, 0);
9027                 pact = &act;
9028             } else {
9029                 pact = NULL;
9030             }
9031             ret = get_errno(do_sigaction(arg1, pact, &oact));
9032             if (!is_error(ret) && arg3) {
9033                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9034                     return -TARGET_EFAULT;
9035                 old_act->_sa_handler = oact._sa_handler;
9036                 old_act->sa_mask = oact.sa_mask.sig[0];
9037                 old_act->sa_flags = oact.sa_flags;
9038                 old_act->sa_restorer = oact.sa_restorer;
9039                 unlock_user_struct(old_act, arg3, 1);
9040             }
9041 #endif
9042         }
9043         return ret;
9044 #endif
9045     case TARGET_NR_rt_sigaction:
9046         {
9047 #if defined(TARGET_ALPHA)
9048             /* For Alpha and SPARC this is a 5 argument syscall, with
9049              * a 'restorer' parameter which must be copied into the
9050              * sa_restorer field of the sigaction struct.
9051              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9052              * and arg5 is the sigsetsize.
9053              * Alpha also has a separate rt_sigaction struct that it uses
9054              * here; SPARC uses the usual sigaction struct.
9055              */
9056             struct target_rt_sigaction *rt_act;
9057             struct target_sigaction act, oact, *pact = 0;
9058 
9059             if (arg4 != sizeof(target_sigset_t)) {
9060                 return -TARGET_EINVAL;
9061             }
9062             if (arg2) {
9063                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
9064                     return -TARGET_EFAULT;
9065                 act._sa_handler = rt_act->_sa_handler;
9066                 act.sa_mask = rt_act->sa_mask;
9067                 act.sa_flags = rt_act->sa_flags;
9068                 act.sa_restorer = arg5;
9069                 unlock_user_struct(rt_act, arg2, 0);
9070                 pact = &act;
9071             }
9072             ret = get_errno(do_sigaction(arg1, pact, &oact));
9073             if (!is_error(ret) && arg3) {
9074                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
9075                     return -TARGET_EFAULT;
9076                 rt_act->_sa_handler = oact._sa_handler;
9077                 rt_act->sa_mask = oact.sa_mask;
9078                 rt_act->sa_flags = oact.sa_flags;
9079                 unlock_user_struct(rt_act, arg3, 1);
9080             }
9081 #else
9082 #ifdef TARGET_SPARC
9083             target_ulong restorer = arg4;
9084             target_ulong sigsetsize = arg5;
9085 #else
9086             target_ulong sigsetsize = arg4;
9087 #endif
9088             struct target_sigaction *act;
9089             struct target_sigaction *oact;
9090 
9091             if (sigsetsize != sizeof(target_sigset_t)) {
9092                 return -TARGET_EINVAL;
9093             }
9094             if (arg2) {
9095                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9096                     return -TARGET_EFAULT;
9097                 }
9098 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9099                 act->ka_restorer = restorer;
9100 #endif
9101             } else {
9102                 act = NULL;
9103             }
9104             if (arg3) {
9105                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9106                     ret = -TARGET_EFAULT;
9107                     goto rt_sigaction_fail;
9108                 }
9109             } else
9110                 oact = NULL;
9111             ret = get_errno(do_sigaction(arg1, act, oact));
9112 	rt_sigaction_fail:
9113             if (act)
9114                 unlock_user_struct(act, arg2, 0);
9115             if (oact)
9116                 unlock_user_struct(oact, arg3, 1);
9117 #endif
9118         }
9119         return ret;
9120 #ifdef TARGET_NR_sgetmask /* not on alpha */
9121     case TARGET_NR_sgetmask:
9122         {
9123             sigset_t cur_set;
9124             abi_ulong target_set;
9125             ret = do_sigprocmask(0, NULL, &cur_set);
9126             if (!ret) {
9127                 host_to_target_old_sigset(&target_set, &cur_set);
9128                 ret = target_set;
9129             }
9130         }
9131         return ret;
9132 #endif
9133 #ifdef TARGET_NR_ssetmask /* not on alpha */
9134     case TARGET_NR_ssetmask:
9135         {
9136             sigset_t set, oset;
9137             abi_ulong target_set = arg1;
9138             target_to_host_old_sigset(&set, &target_set);
9139             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9140             if (!ret) {
9141                 host_to_target_old_sigset(&target_set, &oset);
9142                 ret = target_set;
9143             }
9144         }
9145         return ret;
9146 #endif
9147 #ifdef TARGET_NR_sigprocmask
9148     case TARGET_NR_sigprocmask:
9149         {
9150 #if defined(TARGET_ALPHA)
9151             sigset_t set, oldset;
9152             abi_ulong mask;
9153             int how;
9154 
9155             switch (arg1) {
9156             case TARGET_SIG_BLOCK:
9157                 how = SIG_BLOCK;
9158                 break;
9159             case TARGET_SIG_UNBLOCK:
9160                 how = SIG_UNBLOCK;
9161                 break;
9162             case TARGET_SIG_SETMASK:
9163                 how = SIG_SETMASK;
9164                 break;
9165             default:
9166                 return -TARGET_EINVAL;
9167             }
9168             mask = arg2;
9169             target_to_host_old_sigset(&set, &mask);
9170 
9171             ret = do_sigprocmask(how, &set, &oldset);
9172             if (!is_error(ret)) {
9173                 host_to_target_old_sigset(&mask, &oldset);
9174                 ret = mask;
9175                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9176             }
9177 #else
9178             sigset_t set, oldset, *set_ptr;
9179             int how;
9180 
9181             if (arg2) {
9182                 switch (arg1) {
9183                 case TARGET_SIG_BLOCK:
9184                     how = SIG_BLOCK;
9185                     break;
9186                 case TARGET_SIG_UNBLOCK:
9187                     how = SIG_UNBLOCK;
9188                     break;
9189                 case TARGET_SIG_SETMASK:
9190                     how = SIG_SETMASK;
9191                     break;
9192                 default:
9193                     return -TARGET_EINVAL;
9194                 }
9195                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9196                     return -TARGET_EFAULT;
9197                 target_to_host_old_sigset(&set, p);
9198                 unlock_user(p, arg2, 0);
9199                 set_ptr = &set;
9200             } else {
9201                 how = 0;
9202                 set_ptr = NULL;
9203             }
9204             ret = do_sigprocmask(how, set_ptr, &oldset);
9205             if (!is_error(ret) && arg3) {
9206                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9207                     return -TARGET_EFAULT;
9208                 host_to_target_old_sigset(p, &oldset);
9209                 unlock_user(p, arg3, sizeof(target_sigset_t));
9210             }
9211 #endif
9212         }
9213         return ret;
9214 #endif
9215     case TARGET_NR_rt_sigprocmask:
9216         {
9217             int how = arg1;
9218             sigset_t set, oldset, *set_ptr;
9219 
9220             if (arg4 != sizeof(target_sigset_t)) {
9221                 return -TARGET_EINVAL;
9222             }
9223 
9224             if (arg2) {
9225                 switch(how) {
9226                 case TARGET_SIG_BLOCK:
9227                     how = SIG_BLOCK;
9228                     break;
9229                 case TARGET_SIG_UNBLOCK:
9230                     how = SIG_UNBLOCK;
9231                     break;
9232                 case TARGET_SIG_SETMASK:
9233                     how = SIG_SETMASK;
9234                     break;
9235                 default:
9236                     return -TARGET_EINVAL;
9237                 }
9238                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9239                     return -TARGET_EFAULT;
9240                 target_to_host_sigset(&set, p);
9241                 unlock_user(p, arg2, 0);
9242                 set_ptr = &set;
9243             } else {
9244                 how = 0;
9245                 set_ptr = NULL;
9246             }
9247             ret = do_sigprocmask(how, set_ptr, &oldset);
9248             if (!is_error(ret) && arg3) {
9249                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9250                     return -TARGET_EFAULT;
9251                 host_to_target_sigset(p, &oldset);
9252                 unlock_user(p, arg3, sizeof(target_sigset_t));
9253             }
9254         }
9255         return ret;
9256 #ifdef TARGET_NR_sigpending
9257     case TARGET_NR_sigpending:
9258         {
9259             sigset_t set;
9260             ret = get_errno(sigpending(&set));
9261             if (!is_error(ret)) {
9262                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9263                     return -TARGET_EFAULT;
9264                 host_to_target_old_sigset(p, &set);
9265                 unlock_user(p, arg1, sizeof(target_sigset_t));
9266             }
9267         }
9268         return ret;
9269 #endif
9270     case TARGET_NR_rt_sigpending:
9271         {
9272             sigset_t set;
9273 
9274             /* Yes, this check is >, not != like most. We follow the kernel's
9275              * logic and it does it like this because it implements
9276              * NR_sigpending through the same code path, and in that case
9277              * the old_sigset_t is smaller in size.
9278              */
9279             if (arg2 > sizeof(target_sigset_t)) {
9280                 return -TARGET_EINVAL;
9281             }
9282 
9283             ret = get_errno(sigpending(&set));
9284             if (!is_error(ret)) {
9285                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9286                     return -TARGET_EFAULT;
9287                 host_to_target_sigset(p, &set);
9288                 unlock_user(p, arg1, sizeof(target_sigset_t));
9289             }
9290         }
9291         return ret;
9292 #ifdef TARGET_NR_sigsuspend
9293     case TARGET_NR_sigsuspend:
9294         {
9295             TaskState *ts = cpu->opaque;
9296 #if defined(TARGET_ALPHA)
9297             abi_ulong mask = arg1;
9298             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9299 #else
9300             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9301                 return -TARGET_EFAULT;
9302             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9303             unlock_user(p, arg1, 0);
9304 #endif
9305             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9306                                                SIGSET_T_SIZE));
9307             if (ret != -TARGET_ERESTARTSYS) {
9308                 ts->in_sigsuspend = 1;
9309             }
9310         }
9311         return ret;
9312 #endif
9313     case TARGET_NR_rt_sigsuspend:
9314         {
9315             TaskState *ts = cpu->opaque;
9316 
9317             if (arg2 != sizeof(target_sigset_t)) {
9318                 return -TARGET_EINVAL;
9319             }
9320             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9321                 return -TARGET_EFAULT;
9322             target_to_host_sigset(&ts->sigsuspend_mask, p);
9323             unlock_user(p, arg1, 0);
9324             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9325                                                SIGSET_T_SIZE));
9326             if (ret != -TARGET_ERESTARTSYS) {
9327                 ts->in_sigsuspend = 1;
9328             }
9329         }
9330         return ret;
9331 #ifdef TARGET_NR_rt_sigtimedwait
9332     case TARGET_NR_rt_sigtimedwait:
9333         {
9334             sigset_t set;
9335             struct timespec uts, *puts;
9336             siginfo_t uinfo;
9337 
9338             if (arg4 != sizeof(target_sigset_t)) {
9339                 return -TARGET_EINVAL;
9340             }
9341 
9342             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9343                 return -TARGET_EFAULT;
9344             target_to_host_sigset(&set, p);
9345             unlock_user(p, arg1, 0);
9346             if (arg3) {
9347                 puts = &uts;
9348                 if (target_to_host_timespec(puts, arg3)) {
9349                     return -TARGET_EFAULT;
9350                 }
9351             } else {
9352                 puts = NULL;
9353             }
9354             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9355                                                  SIGSET_T_SIZE));
9356             if (!is_error(ret)) {
9357                 if (arg2) {
9358                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9359                                   0);
9360                     if (!p) {
9361                         return -TARGET_EFAULT;
9362                     }
9363                     host_to_target_siginfo(p, &uinfo);
9364                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9365                 }
9366                 ret = host_to_target_signal(ret);
9367             }
9368         }
9369         return ret;
9370 #endif
9371 #ifdef TARGET_NR_rt_sigtimedwait_time64
9372     case TARGET_NR_rt_sigtimedwait_time64:
9373         {
9374             sigset_t set;
9375             struct timespec uts, *puts;
9376             siginfo_t uinfo;
9377 
9378             if (arg4 != sizeof(target_sigset_t)) {
9379                 return -TARGET_EINVAL;
9380             }
9381 
9382             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9383             if (!p) {
9384                 return -TARGET_EFAULT;
9385             }
9386             target_to_host_sigset(&set, p);
9387             unlock_user(p, arg1, 0);
9388             if (arg3) {
9389                 puts = &uts;
9390                 if (target_to_host_timespec64(puts, arg3)) {
9391                     return -TARGET_EFAULT;
9392                 }
9393             } else {
9394                 puts = NULL;
9395             }
9396             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9397                                                  SIGSET_T_SIZE));
9398             if (!is_error(ret)) {
9399                 if (arg2) {
9400                     p = lock_user(VERIFY_WRITE, arg2,
9401                                   sizeof(target_siginfo_t), 0);
9402                     if (!p) {
9403                         return -TARGET_EFAULT;
9404                     }
9405                     host_to_target_siginfo(p, &uinfo);
9406                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9407                 }
9408                 ret = host_to_target_signal(ret);
9409             }
9410         }
9411         return ret;
9412 #endif
9413     case TARGET_NR_rt_sigqueueinfo:
9414         {
9415             siginfo_t uinfo;
9416 
9417             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9418             if (!p) {
9419                 return -TARGET_EFAULT;
9420             }
9421             target_to_host_siginfo(&uinfo, p);
9422             unlock_user(p, arg3, 0);
9423             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9424         }
9425         return ret;
9426     case TARGET_NR_rt_tgsigqueueinfo:
9427         {
9428             siginfo_t uinfo;
9429 
9430             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9431             if (!p) {
9432                 return -TARGET_EFAULT;
9433             }
9434             target_to_host_siginfo(&uinfo, p);
9435             unlock_user(p, arg4, 0);
9436             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9437         }
9438         return ret;
9439 #ifdef TARGET_NR_sigreturn
9440     case TARGET_NR_sigreturn:
9441         if (block_signals()) {
9442             return -TARGET_ERESTARTSYS;
9443         }
9444         return do_sigreturn(cpu_env);
9445 #endif
9446     case TARGET_NR_rt_sigreturn:
9447         if (block_signals()) {
9448             return -TARGET_ERESTARTSYS;
9449         }
9450         return do_rt_sigreturn(cpu_env);
9451     case TARGET_NR_sethostname:
9452         if (!(p = lock_user_string(arg1)))
9453             return -TARGET_EFAULT;
9454         ret = get_errno(sethostname(p, arg2));
9455         unlock_user(p, arg1, 0);
9456         return ret;
9457 #ifdef TARGET_NR_setrlimit
9458     case TARGET_NR_setrlimit:
9459         {
9460             int resource = target_to_host_resource(arg1);
9461             struct target_rlimit *target_rlim;
9462             struct rlimit rlim;
9463             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9464                 return -TARGET_EFAULT;
9465             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9466             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9467             unlock_user_struct(target_rlim, arg2, 0);
9468             /*
9469              * If we just passed through resource limit settings for memory then
9470              * they would also apply to QEMU's own allocations, and QEMU will
9471              * crash or hang or die if its allocations fail. Ideally we would
9472              * track the guest allocations in QEMU and apply the limits ourselves.
9473              * For now, just tell the guest the call succeeded but don't actually
9474              * limit anything.
9475              */
9476             if (resource != RLIMIT_AS &&
9477                 resource != RLIMIT_DATA &&
9478                 resource != RLIMIT_STACK) {
9479                 return get_errno(setrlimit(resource, &rlim));
9480             } else {
9481                 return 0;
9482             }
9483         }
9484 #endif
9485 #ifdef TARGET_NR_getrlimit
9486     case TARGET_NR_getrlimit:
9487         {
9488             int resource = target_to_host_resource(arg1);
9489             struct target_rlimit *target_rlim;
9490             struct rlimit rlim;
9491 
9492             ret = get_errno(getrlimit(resource, &rlim));
9493             if (!is_error(ret)) {
9494                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9495                     return -TARGET_EFAULT;
9496                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9497                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9498                 unlock_user_struct(target_rlim, arg2, 1);
9499             }
9500         }
9501         return ret;
9502 #endif
9503     case TARGET_NR_getrusage:
9504         {
9505             struct rusage rusage;
9506             ret = get_errno(getrusage(arg1, &rusage));
9507             if (!is_error(ret)) {
9508                 ret = host_to_target_rusage(arg2, &rusage);
9509             }
9510         }
9511         return ret;
9512 #if defined(TARGET_NR_gettimeofday)
9513     case TARGET_NR_gettimeofday:
9514         {
9515             struct timeval tv;
9516             struct timezone tz;
9517 
9518             ret = get_errno(gettimeofday(&tv, &tz));
9519             if (!is_error(ret)) {
9520                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9521                     return -TARGET_EFAULT;
9522                 }
9523                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9524                     return -TARGET_EFAULT;
9525                 }
9526             }
9527         }
9528         return ret;
9529 #endif
9530 #if defined(TARGET_NR_settimeofday)
9531     case TARGET_NR_settimeofday:
9532         {
9533             struct timeval tv, *ptv = NULL;
9534             struct timezone tz, *ptz = NULL;
9535 
9536             if (arg1) {
9537                 if (copy_from_user_timeval(&tv, arg1)) {
9538                     return -TARGET_EFAULT;
9539                 }
9540                 ptv = &tv;
9541             }
9542 
9543             if (arg2) {
9544                 if (copy_from_user_timezone(&tz, arg2)) {
9545                     return -TARGET_EFAULT;
9546                 }
9547                 ptz = &tz;
9548             }
9549 
9550             return get_errno(settimeofday(ptv, ptz));
9551         }
9552 #endif
9553 #if defined(TARGET_NR_select)
9554     case TARGET_NR_select:
9555 #if defined(TARGET_WANT_NI_OLD_SELECT)
9556         /* some architectures used to have old_select here
9557          * but now ENOSYS it.
9558          */
9559         ret = -TARGET_ENOSYS;
9560 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9561         ret = do_old_select(arg1);
9562 #else
9563         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9564 #endif
9565         return ret;
9566 #endif
9567 #ifdef TARGET_NR_pselect6
9568     case TARGET_NR_pselect6:
9569         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9570 #endif
9571 #ifdef TARGET_NR_pselect6_time64
9572     case TARGET_NR_pselect6_time64:
9573         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9574 #endif
9575 #ifdef TARGET_NR_symlink
9576     case TARGET_NR_symlink:
9577         {
9578             void *p2;
9579             p = lock_user_string(arg1);
9580             p2 = lock_user_string(arg2);
9581             if (!p || !p2)
9582                 ret = -TARGET_EFAULT;
9583             else
9584                 ret = get_errno(symlink(p, p2));
9585             unlock_user(p2, arg2, 0);
9586             unlock_user(p, arg1, 0);
9587         }
9588         return ret;
9589 #endif
9590 #if defined(TARGET_NR_symlinkat)
9591     case TARGET_NR_symlinkat:
9592         {
9593             void *p2;
9594             p  = lock_user_string(arg1);
9595             p2 = lock_user_string(arg3);
9596             if (!p || !p2)
9597                 ret = -TARGET_EFAULT;
9598             else
9599                 ret = get_errno(symlinkat(p, arg2, p2));
9600             unlock_user(p2, arg3, 0);
9601             unlock_user(p, arg1, 0);
9602         }
9603         return ret;
9604 #endif
9605 #ifdef TARGET_NR_readlink
9606     case TARGET_NR_readlink:
9607         {
9608             void *p2;
9609             p = lock_user_string(arg1);
9610             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9611             if (!p || !p2) {
9612                 ret = -TARGET_EFAULT;
9613             } else if (!arg3) {
9614                 /* Short circuit this for the magic exe check. */
9615                 ret = -TARGET_EINVAL;
9616             } else if (is_proc_myself((const char *)p, "exe")) {
9617                 char real[PATH_MAX], *temp;
9618                 temp = realpath(exec_path, real);
9619                 /* Return value is # of bytes that we wrote to the buffer. */
9620                 if (temp == NULL) {
9621                     ret = get_errno(-1);
9622                 } else {
9623                     /* Don't worry about sign mismatch as earlier mapping
9624                      * logic would have thrown a bad address error. */
9625                     ret = MIN(strlen(real), arg3);
9626                     /* We cannot NUL terminate the string. */
9627                     memcpy(p2, real, ret);
9628                 }
9629             } else {
9630                 ret = get_errno(readlink(path(p), p2, arg3));
9631             }
9632             unlock_user(p2, arg2, ret);
9633             unlock_user(p, arg1, 0);
9634         }
9635         return ret;
9636 #endif
9637 #if defined(TARGET_NR_readlinkat)
9638     case TARGET_NR_readlinkat:
9639         {
9640             void *p2;
9641             p  = lock_user_string(arg2);
9642             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9643             if (!p || !p2) {
9644                 ret = -TARGET_EFAULT;
9645             } else if (is_proc_myself((const char *)p, "exe")) {
9646                 char real[PATH_MAX], *temp;
9647                 temp = realpath(exec_path, real);
9648                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9649                 snprintf((char *)p2, arg4, "%s", real);
9650             } else {
9651                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9652             }
9653             unlock_user(p2, arg3, ret);
9654             unlock_user(p, arg2, 0);
9655         }
9656         return ret;
9657 #endif
9658 #ifdef TARGET_NR_swapon
9659     case TARGET_NR_swapon:
9660         if (!(p = lock_user_string(arg1)))
9661             return -TARGET_EFAULT;
9662         ret = get_errno(swapon(p, arg2));
9663         unlock_user(p, arg1, 0);
9664         return ret;
9665 #endif
9666     case TARGET_NR_reboot:
9667         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9668            /* arg4 must be ignored in all other cases */
9669            p = lock_user_string(arg4);
9670            if (!p) {
9671                return -TARGET_EFAULT;
9672            }
9673            ret = get_errno(reboot(arg1, arg2, arg3, p));
9674            unlock_user(p, arg4, 0);
9675         } else {
9676            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9677         }
9678         return ret;
9679 #ifdef TARGET_NR_mmap
9680     case TARGET_NR_mmap:
9681 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9682     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9683     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9684     || defined(TARGET_S390X)
9685         {
9686             abi_ulong *v;
9687             abi_ulong v1, v2, v3, v4, v5, v6;
9688             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9689                 return -TARGET_EFAULT;
9690             v1 = tswapal(v[0]);
9691             v2 = tswapal(v[1]);
9692             v3 = tswapal(v[2]);
9693             v4 = tswapal(v[3]);
9694             v5 = tswapal(v[4]);
9695             v6 = tswapal(v[5]);
9696             unlock_user(v, arg1, 0);
9697             ret = get_errno(target_mmap(v1, v2, v3,
9698                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9699                                         v5, v6));
9700         }
9701 #else
9702         ret = get_errno(target_mmap(arg1, arg2, arg3,
9703                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9704                                     arg5,
9705                                     arg6));
9706 #endif
9707         return ret;
9708 #endif
9709 #ifdef TARGET_NR_mmap2
9710     case TARGET_NR_mmap2:
9711 #ifndef MMAP_SHIFT
9712 #define MMAP_SHIFT 12
9713 #endif
9714         ret = target_mmap(arg1, arg2, arg3,
9715                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9716                           arg5, arg6 << MMAP_SHIFT);
9717         return get_errno(ret);
9718 #endif
9719     case TARGET_NR_munmap:
9720         return get_errno(target_munmap(arg1, arg2));
9721     case TARGET_NR_mprotect:
9722         {
9723             TaskState *ts = cpu->opaque;
9724             /* Special hack to detect libc making the stack executable.  */
9725             if ((arg3 & PROT_GROWSDOWN)
9726                 && arg1 >= ts->info->stack_limit
9727                 && arg1 <= ts->info->start_stack) {
9728                 arg3 &= ~PROT_GROWSDOWN;
9729                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9730                 arg1 = ts->info->stack_limit;
9731             }
9732         }
9733         return get_errno(target_mprotect(arg1, arg2, arg3));
9734 #ifdef TARGET_NR_mremap
9735     case TARGET_NR_mremap:
9736         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9737 #endif
9738         /* ??? msync/mlock/munlock are broken for softmmu.  */
9739 #ifdef TARGET_NR_msync
9740     case TARGET_NR_msync:
9741         return get_errno(msync(g2h(arg1), arg2, arg3));
9742 #endif
9743 #ifdef TARGET_NR_mlock
9744     case TARGET_NR_mlock:
9745         return get_errno(mlock(g2h(arg1), arg2));
9746 #endif
9747 #ifdef TARGET_NR_munlock
9748     case TARGET_NR_munlock:
9749         return get_errno(munlock(g2h(arg1), arg2));
9750 #endif
9751 #ifdef TARGET_NR_mlockall
9752     case TARGET_NR_mlockall:
9753         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9754 #endif
9755 #ifdef TARGET_NR_munlockall
9756     case TARGET_NR_munlockall:
9757         return get_errno(munlockall());
9758 #endif
9759 #ifdef TARGET_NR_truncate
9760     case TARGET_NR_truncate:
9761         if (!(p = lock_user_string(arg1)))
9762             return -TARGET_EFAULT;
9763         ret = get_errno(truncate(p, arg2));
9764         unlock_user(p, arg1, 0);
9765         return ret;
9766 #endif
9767 #ifdef TARGET_NR_ftruncate
9768     case TARGET_NR_ftruncate:
9769         return get_errno(ftruncate(arg1, arg2));
9770 #endif
9771     case TARGET_NR_fchmod:
9772         return get_errno(fchmod(arg1, arg2));
9773 #if defined(TARGET_NR_fchmodat)
9774     case TARGET_NR_fchmodat:
9775         if (!(p = lock_user_string(arg2)))
9776             return -TARGET_EFAULT;
9777         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9778         unlock_user(p, arg2, 0);
9779         return ret;
9780 #endif
9781     case TARGET_NR_getpriority:
9782         /* Note that negative values are valid for getpriority, so we must
9783            differentiate based on errno settings.  */
9784         errno = 0;
9785         ret = getpriority(arg1, arg2);
9786         if (ret == -1 && errno != 0) {
9787             return -host_to_target_errno(errno);
9788         }
9789 #ifdef TARGET_ALPHA
9790         /* Return value is the unbiased priority.  Signal no error.  */
9791         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9792 #else
9793         /* Return value is a biased priority to avoid negative numbers.  */
9794         ret = 20 - ret;
9795 #endif
9796         return ret;
9797     case TARGET_NR_setpriority:
9798         return get_errno(setpriority(arg1, arg2, arg3));
9799 #ifdef TARGET_NR_statfs
9800     case TARGET_NR_statfs:
9801         if (!(p = lock_user_string(arg1))) {
9802             return -TARGET_EFAULT;
9803         }
9804         ret = get_errno(statfs(path(p), &stfs));
9805         unlock_user(p, arg1, 0);
9806     convert_statfs:
9807         if (!is_error(ret)) {
9808             struct target_statfs *target_stfs;
9809 
9810             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9811                 return -TARGET_EFAULT;
9812             __put_user(stfs.f_type, &target_stfs->f_type);
9813             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9814             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9815             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9816             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9817             __put_user(stfs.f_files, &target_stfs->f_files);
9818             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9819             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9820             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9821             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9822             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9823 #ifdef _STATFS_F_FLAGS
9824             __put_user(stfs.f_flags, &target_stfs->f_flags);
9825 #else
9826             __put_user(0, &target_stfs->f_flags);
9827 #endif
9828             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9829             unlock_user_struct(target_stfs, arg2, 1);
9830         }
9831         return ret;
9832 #endif
9833 #ifdef TARGET_NR_fstatfs
9834     case TARGET_NR_fstatfs:
9835         ret = get_errno(fstatfs(arg1, &stfs));
9836         goto convert_statfs;
9837 #endif
9838 #ifdef TARGET_NR_statfs64
9839     case TARGET_NR_statfs64:
9840         if (!(p = lock_user_string(arg1))) {
9841             return -TARGET_EFAULT;
9842         }
9843         ret = get_errno(statfs(path(p), &stfs));
9844         unlock_user(p, arg1, 0);
9845     convert_statfs64:
9846         if (!is_error(ret)) {
9847             struct target_statfs64 *target_stfs;
9848 
9849             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9850                 return -TARGET_EFAULT;
9851             __put_user(stfs.f_type, &target_stfs->f_type);
9852             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9853             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9854             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9855             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9856             __put_user(stfs.f_files, &target_stfs->f_files);
9857             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9858             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9859             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9860             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9861             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9862 #ifdef _STATFS_F_FLAGS
9863             __put_user(stfs.f_flags, &target_stfs->f_flags);
9864 #else
9865             __put_user(0, &target_stfs->f_flags);
9866 #endif
9867             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9868             unlock_user_struct(target_stfs, arg3, 1);
9869         }
9870         return ret;
9871     case TARGET_NR_fstatfs64:
9872         ret = get_errno(fstatfs(arg1, &stfs));
9873         goto convert_statfs64;
9874 #endif
9875 #ifdef TARGET_NR_socketcall
9876     case TARGET_NR_socketcall:
9877         return do_socketcall(arg1, arg2);
9878 #endif
9879 #ifdef TARGET_NR_accept
9880     case TARGET_NR_accept:
9881         return do_accept4(arg1, arg2, arg3, 0);
9882 #endif
9883 #ifdef TARGET_NR_accept4
9884     case TARGET_NR_accept4:
9885         return do_accept4(arg1, arg2, arg3, arg4);
9886 #endif
9887 #ifdef TARGET_NR_bind
9888     case TARGET_NR_bind:
9889         return do_bind(arg1, arg2, arg3);
9890 #endif
9891 #ifdef TARGET_NR_connect
9892     case TARGET_NR_connect:
9893         return do_connect(arg1, arg2, arg3);
9894 #endif
9895 #ifdef TARGET_NR_getpeername
9896     case TARGET_NR_getpeername:
9897         return do_getpeername(arg1, arg2, arg3);
9898 #endif
9899 #ifdef TARGET_NR_getsockname
9900     case TARGET_NR_getsockname:
9901         return do_getsockname(arg1, arg2, arg3);
9902 #endif
9903 #ifdef TARGET_NR_getsockopt
9904     case TARGET_NR_getsockopt:
9905         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9906 #endif
9907 #ifdef TARGET_NR_listen
9908     case TARGET_NR_listen:
9909         return get_errno(listen(arg1, arg2));
9910 #endif
9911 #ifdef TARGET_NR_recv
9912     case TARGET_NR_recv:
9913         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9914 #endif
9915 #ifdef TARGET_NR_recvfrom
9916     case TARGET_NR_recvfrom:
9917         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9918 #endif
9919 #ifdef TARGET_NR_recvmsg
9920     case TARGET_NR_recvmsg:
9921         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9922 #endif
9923 #ifdef TARGET_NR_send
9924     case TARGET_NR_send:
9925         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9926 #endif
9927 #ifdef TARGET_NR_sendmsg
9928     case TARGET_NR_sendmsg:
9929         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9930 #endif
9931 #ifdef TARGET_NR_sendmmsg
9932     case TARGET_NR_sendmmsg:
9933         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9934 #endif
9935 #ifdef TARGET_NR_recvmmsg
9936     case TARGET_NR_recvmmsg:
9937         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9938 #endif
9939 #ifdef TARGET_NR_sendto
9940     case TARGET_NR_sendto:
9941         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9942 #endif
9943 #ifdef TARGET_NR_shutdown
9944     case TARGET_NR_shutdown:
9945         return get_errno(shutdown(arg1, arg2));
9946 #endif
9947 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9948     case TARGET_NR_getrandom:
9949         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9950         if (!p) {
9951             return -TARGET_EFAULT;
9952         }
9953         ret = get_errno(getrandom(p, arg2, arg3));
9954         unlock_user(p, arg1, ret);
9955         return ret;
9956 #endif
9957 #ifdef TARGET_NR_socket
9958     case TARGET_NR_socket:
9959         return do_socket(arg1, arg2, arg3);
9960 #endif
9961 #ifdef TARGET_NR_socketpair
9962     case TARGET_NR_socketpair:
9963         return do_socketpair(arg1, arg2, arg3, arg4);
9964 #endif
9965 #ifdef TARGET_NR_setsockopt
9966     case TARGET_NR_setsockopt:
9967         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9968 #endif
9969 #if defined(TARGET_NR_syslog)
9970     case TARGET_NR_syslog:
9971         {
9972             int len = arg2;
9973 
9974             switch (arg1) {
9975             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9976             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9977             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9978             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9979             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9980             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9981             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9982             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9983                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9984             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9985             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9986             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9987                 {
9988                     if (len < 0) {
9989                         return -TARGET_EINVAL;
9990                     }
9991                     if (len == 0) {
9992                         return 0;
9993                     }
9994                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9995                     if (!p) {
9996                         return -TARGET_EFAULT;
9997                     }
9998                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9999                     unlock_user(p, arg2, arg3);
10000                 }
10001                 return ret;
10002             default:
10003                 return -TARGET_EINVAL;
10004             }
10005         }
10006         break;
10007 #endif
10008     case TARGET_NR_setitimer:
10009         {
10010             struct itimerval value, ovalue, *pvalue;
10011 
10012             if (arg2) {
10013                 pvalue = &value;
10014                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10015                     || copy_from_user_timeval(&pvalue->it_value,
10016                                               arg2 + sizeof(struct target_timeval)))
10017                     return -TARGET_EFAULT;
10018             } else {
10019                 pvalue = NULL;
10020             }
10021             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10022             if (!is_error(ret) && arg3) {
10023                 if (copy_to_user_timeval(arg3,
10024                                          &ovalue.it_interval)
10025                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10026                                             &ovalue.it_value))
10027                     return -TARGET_EFAULT;
10028             }
10029         }
10030         return ret;
10031     case TARGET_NR_getitimer:
10032         {
10033             struct itimerval value;
10034 
10035             ret = get_errno(getitimer(arg1, &value));
10036             if (!is_error(ret) && arg2) {
10037                 if (copy_to_user_timeval(arg2,
10038                                          &value.it_interval)
10039                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10040                                             &value.it_value))
10041                     return -TARGET_EFAULT;
10042             }
10043         }
10044         return ret;
10045 #ifdef TARGET_NR_stat
10046     case TARGET_NR_stat:
10047         if (!(p = lock_user_string(arg1))) {
10048             return -TARGET_EFAULT;
10049         }
10050         ret = get_errno(stat(path(p), &st));
10051         unlock_user(p, arg1, 0);
10052         goto do_stat;
10053 #endif
10054 #ifdef TARGET_NR_lstat
10055     case TARGET_NR_lstat:
10056         if (!(p = lock_user_string(arg1))) {
10057             return -TARGET_EFAULT;
10058         }
10059         ret = get_errno(lstat(path(p), &st));
10060         unlock_user(p, arg1, 0);
10061         goto do_stat;
10062 #endif
10063 #ifdef TARGET_NR_fstat
10064     case TARGET_NR_fstat:
10065         {
10066             ret = get_errno(fstat(arg1, &st));
10067 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10068         do_stat:
10069 #endif
10070             if (!is_error(ret)) {
10071                 struct target_stat *target_st;
10072 
10073                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10074                     return -TARGET_EFAULT;
10075                 memset(target_st, 0, sizeof(*target_st));
10076                 __put_user(st.st_dev, &target_st->st_dev);
10077                 __put_user(st.st_ino, &target_st->st_ino);
10078                 __put_user(st.st_mode, &target_st->st_mode);
10079                 __put_user(st.st_uid, &target_st->st_uid);
10080                 __put_user(st.st_gid, &target_st->st_gid);
10081                 __put_user(st.st_nlink, &target_st->st_nlink);
10082                 __put_user(st.st_rdev, &target_st->st_rdev);
10083                 __put_user(st.st_size, &target_st->st_size);
10084                 __put_user(st.st_blksize, &target_st->st_blksize);
10085                 __put_user(st.st_blocks, &target_st->st_blocks);
10086                 __put_user(st.st_atime, &target_st->target_st_atime);
10087                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10088                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10089 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10090     defined(TARGET_STAT_HAVE_NSEC)
10091                 __put_user(st.st_atim.tv_nsec,
10092                            &target_st->target_st_atime_nsec);
10093                 __put_user(st.st_mtim.tv_nsec,
10094                            &target_st->target_st_mtime_nsec);
10095                 __put_user(st.st_ctim.tv_nsec,
10096                            &target_st->target_st_ctime_nsec);
10097 #endif
10098                 unlock_user_struct(target_st, arg2, 1);
10099             }
10100         }
10101         return ret;
10102 #endif
10103     case TARGET_NR_vhangup:
10104         return get_errno(vhangup());
10105 #ifdef TARGET_NR_syscall
10106     case TARGET_NR_syscall:
10107         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10108                           arg6, arg7, arg8, 0);
10109 #endif
10110 #if defined(TARGET_NR_wait4)
10111     case TARGET_NR_wait4:
10112         {
10113             int status;
10114             abi_long status_ptr = arg2;
10115             struct rusage rusage, *rusage_ptr;
10116             abi_ulong target_rusage = arg4;
10117             abi_long rusage_err;
10118             if (target_rusage)
10119                 rusage_ptr = &rusage;
10120             else
10121                 rusage_ptr = NULL;
10122             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10123             if (!is_error(ret)) {
10124                 if (status_ptr && ret) {
10125                     status = host_to_target_waitstatus(status);
10126                     if (put_user_s32(status, status_ptr))
10127                         return -TARGET_EFAULT;
10128                 }
10129                 if (target_rusage) {
10130                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10131                     if (rusage_err) {
10132                         ret = rusage_err;
10133                     }
10134                 }
10135             }
10136         }
10137         return ret;
10138 #endif
10139 #ifdef TARGET_NR_swapoff
10140     case TARGET_NR_swapoff:
10141         if (!(p = lock_user_string(arg1)))
10142             return -TARGET_EFAULT;
10143         ret = get_errno(swapoff(p));
10144         unlock_user(p, arg1, 0);
10145         return ret;
10146 #endif
10147     case TARGET_NR_sysinfo:
10148         {
10149             struct target_sysinfo *target_value;
10150             struct sysinfo value;
10151             ret = get_errno(sysinfo(&value));
10152             if (!is_error(ret) && arg1)
10153             {
10154                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10155                     return -TARGET_EFAULT;
10156                 __put_user(value.uptime, &target_value->uptime);
10157                 __put_user(value.loads[0], &target_value->loads[0]);
10158                 __put_user(value.loads[1], &target_value->loads[1]);
10159                 __put_user(value.loads[2], &target_value->loads[2]);
10160                 __put_user(value.totalram, &target_value->totalram);
10161                 __put_user(value.freeram, &target_value->freeram);
10162                 __put_user(value.sharedram, &target_value->sharedram);
10163                 __put_user(value.bufferram, &target_value->bufferram);
10164                 __put_user(value.totalswap, &target_value->totalswap);
10165                 __put_user(value.freeswap, &target_value->freeswap);
10166                 __put_user(value.procs, &target_value->procs);
10167                 __put_user(value.totalhigh, &target_value->totalhigh);
10168                 __put_user(value.freehigh, &target_value->freehigh);
10169                 __put_user(value.mem_unit, &target_value->mem_unit);
10170                 unlock_user_struct(target_value, arg1, 1);
10171             }
10172         }
10173         return ret;
10174 #ifdef TARGET_NR_ipc
10175     case TARGET_NR_ipc:
10176         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10177 #endif
10178 #ifdef TARGET_NR_semget
10179     case TARGET_NR_semget:
10180         return get_errno(semget(arg1, arg2, arg3));
10181 #endif
10182 #ifdef TARGET_NR_semop
10183     case TARGET_NR_semop:
10184         return do_semtimedop(arg1, arg2, arg3, 0, false);
10185 #endif
10186 #ifdef TARGET_NR_semtimedop
10187     case TARGET_NR_semtimedop:
10188         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10189 #endif
10190 #ifdef TARGET_NR_semtimedop_time64
10191     case TARGET_NR_semtimedop_time64:
10192         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10193 #endif
10194 #ifdef TARGET_NR_semctl
10195     case TARGET_NR_semctl:
10196         return do_semctl(arg1, arg2, arg3, arg4);
10197 #endif
10198 #ifdef TARGET_NR_msgctl
10199     case TARGET_NR_msgctl:
10200         return do_msgctl(arg1, arg2, arg3);
10201 #endif
10202 #ifdef TARGET_NR_msgget
10203     case TARGET_NR_msgget:
10204         return get_errno(msgget(arg1, arg2));
10205 #endif
10206 #ifdef TARGET_NR_msgrcv
10207     case TARGET_NR_msgrcv:
10208         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10209 #endif
10210 #ifdef TARGET_NR_msgsnd
10211     case TARGET_NR_msgsnd:
10212         return do_msgsnd(arg1, arg2, arg3, arg4);
10213 #endif
10214 #ifdef TARGET_NR_shmget
10215     case TARGET_NR_shmget:
10216         return get_errno(shmget(arg1, arg2, arg3));
10217 #endif
10218 #ifdef TARGET_NR_shmctl
10219     case TARGET_NR_shmctl:
10220         return do_shmctl(arg1, arg2, arg3);
10221 #endif
10222 #ifdef TARGET_NR_shmat
10223     case TARGET_NR_shmat:
10224         return do_shmat(cpu_env, arg1, arg2, arg3);
10225 #endif
10226 #ifdef TARGET_NR_shmdt
10227     case TARGET_NR_shmdt:
10228         return do_shmdt(arg1);
10229 #endif
10230     case TARGET_NR_fsync:
10231         return get_errno(fsync(arg1));
10232     case TARGET_NR_clone:
10233         /* Linux manages to have three different orderings for its
10234          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10235          * match the kernel's CONFIG_CLONE_* settings.
10236          * Microblaze is further special in that it uses a sixth
10237          * implicit argument to clone for the TLS pointer.
10238          */
10239 #if defined(TARGET_MICROBLAZE)
10240         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10241 #elif defined(TARGET_CLONE_BACKWARDS)
10242         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10243 #elif defined(TARGET_CLONE_BACKWARDS2)
10244         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10245 #else
10246         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10247 #endif
10248         return ret;
10249 #ifdef __NR_exit_group
10250         /* new thread calls */
10251     case TARGET_NR_exit_group:
10252         preexit_cleanup(cpu_env, arg1);
10253         return get_errno(exit_group(arg1));
10254 #endif
10255     case TARGET_NR_setdomainname:
10256         if (!(p = lock_user_string(arg1)))
10257             return -TARGET_EFAULT;
10258         ret = get_errno(setdomainname(p, arg2));
10259         unlock_user(p, arg1, 0);
10260         return ret;
10261     case TARGET_NR_uname:
10262         /* no need to transcode because we use the linux syscall */
10263         {
10264             struct new_utsname * buf;
10265 
10266             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10267                 return -TARGET_EFAULT;
10268             ret = get_errno(sys_uname(buf));
10269             if (!is_error(ret)) {
10270                 /* Overwrite the native machine name with whatever is being
10271                    emulated. */
10272                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10273                           sizeof(buf->machine));
10274                 /* Allow the user to override the reported release.  */
10275                 if (qemu_uname_release && *qemu_uname_release) {
10276                     g_strlcpy(buf->release, qemu_uname_release,
10277                               sizeof(buf->release));
10278                 }
10279             }
10280             unlock_user_struct(buf, arg1, 1);
10281         }
10282         return ret;
10283 #ifdef TARGET_I386
10284     case TARGET_NR_modify_ldt:
10285         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10286 #if !defined(TARGET_X86_64)
10287     case TARGET_NR_vm86:
10288         return do_vm86(cpu_env, arg1, arg2);
10289 #endif
10290 #endif
10291 #if defined(TARGET_NR_adjtimex)
10292     case TARGET_NR_adjtimex:
10293         {
10294             struct timex host_buf;
10295 
10296             if (target_to_host_timex(&host_buf, arg1) != 0) {
10297                 return -TARGET_EFAULT;
10298             }
10299             ret = get_errno(adjtimex(&host_buf));
10300             if (!is_error(ret)) {
10301                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10302                     return -TARGET_EFAULT;
10303                 }
10304             }
10305         }
10306         return ret;
10307 #endif
10308 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10309     case TARGET_NR_clock_adjtime:
10310         {
10311             struct timex htx, *phtx = &htx;
10312 
10313             if (target_to_host_timex(phtx, arg2) != 0) {
10314                 return -TARGET_EFAULT;
10315             }
10316             ret = get_errno(clock_adjtime(arg1, phtx));
10317             if (!is_error(ret) && phtx) {
10318                 if (host_to_target_timex(arg2, phtx) != 0) {
10319                     return -TARGET_EFAULT;
10320                 }
10321             }
10322         }
10323         return ret;
10324 #endif
10325 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10326     case TARGET_NR_clock_adjtime64:
10327         {
10328             struct timex htx;
10329 
10330             if (target_to_host_timex64(&htx, arg2) != 0) {
10331                 return -TARGET_EFAULT;
10332             }
10333             ret = get_errno(clock_adjtime(arg1, &htx));
10334             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10335                     return -TARGET_EFAULT;
10336             }
10337         }
10338         return ret;
10339 #endif
10340     case TARGET_NR_getpgid:
10341         return get_errno(getpgid(arg1));
10342     case TARGET_NR_fchdir:
10343         return get_errno(fchdir(arg1));
10344     case TARGET_NR_personality:
10345         return get_errno(personality(arg1));
10346 #ifdef TARGET_NR__llseek /* Not on alpha */
10347     case TARGET_NR__llseek:
10348         {
10349             int64_t res;
10350 #if !defined(__NR_llseek)
10351             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10352             if (res == -1) {
10353                 ret = get_errno(res);
10354             } else {
10355                 ret = 0;
10356             }
10357 #else
10358             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10359 #endif
10360             if ((ret == 0) && put_user_s64(res, arg4)) {
10361                 return -TARGET_EFAULT;
10362             }
10363         }
10364         return ret;
10365 #endif
10366 #ifdef TARGET_NR_getdents
10367     case TARGET_NR_getdents:
10368 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10369 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10370         {
10371             struct target_dirent *target_dirp;
10372             struct linux_dirent *dirp;
10373             abi_long count = arg3;
10374 
10375             dirp = g_try_malloc(count);
10376             if (!dirp) {
10377                 return -TARGET_ENOMEM;
10378             }
10379 
10380             ret = get_errno(sys_getdents(arg1, dirp, count));
10381             if (!is_error(ret)) {
10382                 struct linux_dirent *de;
10383 		struct target_dirent *tde;
10384                 int len = ret;
10385                 int reclen, treclen;
10386 		int count1, tnamelen;
10387 
10388 		count1 = 0;
10389                 de = dirp;
10390                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10391                     return -TARGET_EFAULT;
10392 		tde = target_dirp;
10393                 while (len > 0) {
10394                     reclen = de->d_reclen;
10395                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10396                     assert(tnamelen >= 0);
10397                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10398                     assert(count1 + treclen <= count);
10399                     tde->d_reclen = tswap16(treclen);
10400                     tde->d_ino = tswapal(de->d_ino);
10401                     tde->d_off = tswapal(de->d_off);
10402                     memcpy(tde->d_name, de->d_name, tnamelen);
10403                     de = (struct linux_dirent *)((char *)de + reclen);
10404                     len -= reclen;
10405                     tde = (struct target_dirent *)((char *)tde + treclen);
10406 		    count1 += treclen;
10407                 }
10408 		ret = count1;
10409                 unlock_user(target_dirp, arg2, ret);
10410             }
10411             g_free(dirp);
10412         }
10413 #else
10414         {
10415             struct linux_dirent *dirp;
10416             abi_long count = arg3;
10417 
10418             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10419                 return -TARGET_EFAULT;
10420             ret = get_errno(sys_getdents(arg1, dirp, count));
10421             if (!is_error(ret)) {
10422                 struct linux_dirent *de;
10423                 int len = ret;
10424                 int reclen;
10425                 de = dirp;
10426                 while (len > 0) {
10427                     reclen = de->d_reclen;
10428                     if (reclen > len)
10429                         break;
10430                     de->d_reclen = tswap16(reclen);
10431                     tswapls(&de->d_ino);
10432                     tswapls(&de->d_off);
10433                     de = (struct linux_dirent *)((char *)de + reclen);
10434                     len -= reclen;
10435                 }
10436             }
10437             unlock_user(dirp, arg2, ret);
10438         }
10439 #endif
10440 #else
10441         /* Implement getdents in terms of getdents64 */
10442         {
10443             struct linux_dirent64 *dirp;
10444             abi_long count = arg3;
10445 
10446             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10447             if (!dirp) {
10448                 return -TARGET_EFAULT;
10449             }
10450             ret = get_errno(sys_getdents64(arg1, dirp, count));
10451             if (!is_error(ret)) {
10452                 /* Convert the dirent64 structs to target dirent.  We do this
10453                  * in-place, since we can guarantee that a target_dirent is no
10454                  * larger than a dirent64; however this means we have to be
10455                  * careful to read everything before writing in the new format.
10456                  */
10457                 struct linux_dirent64 *de;
10458                 struct target_dirent *tde;
10459                 int len = ret;
10460                 int tlen = 0;
10461 
10462                 de = dirp;
10463                 tde = (struct target_dirent *)dirp;
10464                 while (len > 0) {
10465                     int namelen, treclen;
10466                     int reclen = de->d_reclen;
10467                     uint64_t ino = de->d_ino;
10468                     int64_t off = de->d_off;
10469                     uint8_t type = de->d_type;
10470 
10471                     namelen = strlen(de->d_name);
10472                     treclen = offsetof(struct target_dirent, d_name)
10473                         + namelen + 2;
10474                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10475 
10476                     memmove(tde->d_name, de->d_name, namelen + 1);
10477                     tde->d_ino = tswapal(ino);
10478                     tde->d_off = tswapal(off);
10479                     tde->d_reclen = tswap16(treclen);
10480                     /* The target_dirent type is in what was formerly a padding
10481                      * byte at the end of the structure:
10482                      */
10483                     *(((char *)tde) + treclen - 1) = type;
10484 
10485                     de = (struct linux_dirent64 *)((char *)de + reclen);
10486                     tde = (struct target_dirent *)((char *)tde + treclen);
10487                     len -= reclen;
10488                     tlen += treclen;
10489                 }
10490                 ret = tlen;
10491             }
10492             unlock_user(dirp, arg2, ret);
10493         }
10494 #endif
10495         return ret;
10496 #endif /* TARGET_NR_getdents */
10497 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10498     case TARGET_NR_getdents64:
10499         {
10500             struct linux_dirent64 *dirp;
10501             abi_long count = arg3;
10502             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10503                 return -TARGET_EFAULT;
10504             ret = get_errno(sys_getdents64(arg1, dirp, count));
10505             if (!is_error(ret)) {
10506                 struct linux_dirent64 *de;
10507                 int len = ret;
10508                 int reclen;
10509                 de = dirp;
10510                 while (len > 0) {
10511                     reclen = de->d_reclen;
10512                     if (reclen > len)
10513                         break;
10514                     de->d_reclen = tswap16(reclen);
10515                     tswap64s((uint64_t *)&de->d_ino);
10516                     tswap64s((uint64_t *)&de->d_off);
10517                     de = (struct linux_dirent64 *)((char *)de + reclen);
10518                     len -= reclen;
10519                 }
10520             }
10521             unlock_user(dirp, arg2, ret);
10522         }
10523         return ret;
10524 #endif /* TARGET_NR_getdents64 */
10525 #if defined(TARGET_NR__newselect)
10526     case TARGET_NR__newselect:
10527         return do_select(arg1, arg2, arg3, arg4, arg5);
10528 #endif
10529 #ifdef TARGET_NR_poll
10530     case TARGET_NR_poll:
10531         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10532 #endif
10533 #ifdef TARGET_NR_ppoll
10534     case TARGET_NR_ppoll:
10535         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10536 #endif
10537 #ifdef TARGET_NR_ppoll_time64
10538     case TARGET_NR_ppoll_time64:
10539         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10540 #endif
10541     case TARGET_NR_flock:
10542         /* NOTE: the flock constant seems to be the same for every
10543            Linux platform */
10544         return get_errno(safe_flock(arg1, arg2));
10545     case TARGET_NR_readv:
10546         {
10547             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10548             if (vec != NULL) {
10549                 ret = get_errno(safe_readv(arg1, vec, arg3));
10550                 unlock_iovec(vec, arg2, arg3, 1);
10551             } else {
10552                 ret = -host_to_target_errno(errno);
10553             }
10554         }
10555         return ret;
10556     case TARGET_NR_writev:
10557         {
10558             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10559             if (vec != NULL) {
10560                 ret = get_errno(safe_writev(arg1, vec, arg3));
10561                 unlock_iovec(vec, arg2, arg3, 0);
10562             } else {
10563                 ret = -host_to_target_errno(errno);
10564             }
10565         }
10566         return ret;
10567 #if defined(TARGET_NR_preadv)
10568     case TARGET_NR_preadv:
10569         {
10570             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10571             if (vec != NULL) {
10572                 unsigned long low, high;
10573 
10574                 target_to_host_low_high(arg4, arg5, &low, &high);
10575                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10576                 unlock_iovec(vec, arg2, arg3, 1);
10577             } else {
10578                 ret = -host_to_target_errno(errno);
10579            }
10580         }
10581         return ret;
10582 #endif
10583 #if defined(TARGET_NR_pwritev)
10584     case TARGET_NR_pwritev:
10585         {
10586             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10587             if (vec != NULL) {
10588                 unsigned long low, high;
10589 
10590                 target_to_host_low_high(arg4, arg5, &low, &high);
10591                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10592                 unlock_iovec(vec, arg2, arg3, 0);
10593             } else {
10594                 ret = -host_to_target_errno(errno);
10595            }
10596         }
10597         return ret;
10598 #endif
10599     case TARGET_NR_getsid:
10600         return get_errno(getsid(arg1));
10601 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10602     case TARGET_NR_fdatasync:
10603         return get_errno(fdatasync(arg1));
10604 #endif
10605     case TARGET_NR_sched_getaffinity:
10606         {
10607             unsigned int mask_size;
10608             unsigned long *mask;
10609 
10610             /*
10611              * sched_getaffinity needs multiples of ulong, so need to take
10612              * care of mismatches between target ulong and host ulong sizes.
10613              */
10614             if (arg2 & (sizeof(abi_ulong) - 1)) {
10615                 return -TARGET_EINVAL;
10616             }
10617             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10618 
10619             mask = alloca(mask_size);
10620             memset(mask, 0, mask_size);
10621             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10622 
10623             if (!is_error(ret)) {
10624                 if (ret > arg2) {
10625                     /* More data returned than the caller's buffer will fit.
10626                      * This only happens if sizeof(abi_long) < sizeof(long)
10627                      * and the caller passed us a buffer holding an odd number
10628                      * of abi_longs. If the host kernel is actually using the
10629                      * extra 4 bytes then fail EINVAL; otherwise we can just
10630                      * ignore them and only copy the interesting part.
10631                      */
10632                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10633                     if (numcpus > arg2 * 8) {
10634                         return -TARGET_EINVAL;
10635                     }
10636                     ret = arg2;
10637                 }
10638 
10639                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10640                     return -TARGET_EFAULT;
10641                 }
10642             }
10643         }
10644         return ret;
10645     case TARGET_NR_sched_setaffinity:
10646         {
10647             unsigned int mask_size;
10648             unsigned long *mask;
10649 
10650             /*
10651              * sched_setaffinity needs multiples of ulong, so need to take
10652              * care of mismatches between target ulong and host ulong sizes.
10653              */
10654             if (arg2 & (sizeof(abi_ulong) - 1)) {
10655                 return -TARGET_EINVAL;
10656             }
10657             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10658             mask = alloca(mask_size);
10659 
10660             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10661             if (ret) {
10662                 return ret;
10663             }
10664 
10665             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10666         }
10667     case TARGET_NR_getcpu:
10668         {
10669             unsigned cpu, node;
10670             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10671                                        arg2 ? &node : NULL,
10672                                        NULL));
10673             if (is_error(ret)) {
10674                 return ret;
10675             }
10676             if (arg1 && put_user_u32(cpu, arg1)) {
10677                 return -TARGET_EFAULT;
10678             }
10679             if (arg2 && put_user_u32(node, arg2)) {
10680                 return -TARGET_EFAULT;
10681             }
10682         }
10683         return ret;
10684     case TARGET_NR_sched_setparam:
10685         {
10686             struct sched_param *target_schp;
10687             struct sched_param schp;
10688 
10689             if (arg2 == 0) {
10690                 return -TARGET_EINVAL;
10691             }
10692             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10693                 return -TARGET_EFAULT;
10694             schp.sched_priority = tswap32(target_schp->sched_priority);
10695             unlock_user_struct(target_schp, arg2, 0);
10696             return get_errno(sched_setparam(arg1, &schp));
10697         }
10698     case TARGET_NR_sched_getparam:
10699         {
10700             struct sched_param *target_schp;
10701             struct sched_param schp;
10702 
10703             if (arg2 == 0) {
10704                 return -TARGET_EINVAL;
10705             }
10706             ret = get_errno(sched_getparam(arg1, &schp));
10707             if (!is_error(ret)) {
10708                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10709                     return -TARGET_EFAULT;
10710                 target_schp->sched_priority = tswap32(schp.sched_priority);
10711                 unlock_user_struct(target_schp, arg2, 1);
10712             }
10713         }
10714         return ret;
10715     case TARGET_NR_sched_setscheduler:
10716         {
10717             struct sched_param *target_schp;
10718             struct sched_param schp;
10719             if (arg3 == 0) {
10720                 return -TARGET_EINVAL;
10721             }
10722             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10723                 return -TARGET_EFAULT;
10724             schp.sched_priority = tswap32(target_schp->sched_priority);
10725             unlock_user_struct(target_schp, arg3, 0);
10726             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10727         }
10728     case TARGET_NR_sched_getscheduler:
10729         return get_errno(sched_getscheduler(arg1));
10730     case TARGET_NR_sched_yield:
10731         return get_errno(sched_yield());
10732     case TARGET_NR_sched_get_priority_max:
10733         return get_errno(sched_get_priority_max(arg1));
10734     case TARGET_NR_sched_get_priority_min:
10735         return get_errno(sched_get_priority_min(arg1));
10736 #ifdef TARGET_NR_sched_rr_get_interval
10737     case TARGET_NR_sched_rr_get_interval:
10738         {
10739             struct timespec ts;
10740             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10741             if (!is_error(ret)) {
10742                 ret = host_to_target_timespec(arg2, &ts);
10743             }
10744         }
10745         return ret;
10746 #endif
10747 #ifdef TARGET_NR_sched_rr_get_interval_time64
10748     case TARGET_NR_sched_rr_get_interval_time64:
10749         {
10750             struct timespec ts;
10751             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10752             if (!is_error(ret)) {
10753                 ret = host_to_target_timespec64(arg2, &ts);
10754             }
10755         }
10756         return ret;
10757 #endif
10758 #if defined(TARGET_NR_nanosleep)
10759     case TARGET_NR_nanosleep:
10760         {
10761             struct timespec req, rem;
10762             target_to_host_timespec(&req, arg1);
10763             ret = get_errno(safe_nanosleep(&req, &rem));
10764             if (is_error(ret) && arg2) {
10765                 host_to_target_timespec(arg2, &rem);
10766             }
10767         }
10768         return ret;
10769 #endif
10770     case TARGET_NR_prctl:
10771         switch (arg1) {
10772         case PR_GET_PDEATHSIG:
10773         {
10774             int deathsig;
10775             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10776             if (!is_error(ret) && arg2
10777                 && put_user_s32(deathsig, arg2)) {
10778                 return -TARGET_EFAULT;
10779             }
10780             return ret;
10781         }
10782 #ifdef PR_GET_NAME
10783         case PR_GET_NAME:
10784         {
10785             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10786             if (!name) {
10787                 return -TARGET_EFAULT;
10788             }
10789             ret = get_errno(prctl(arg1, (unsigned long)name,
10790                                   arg3, arg4, arg5));
10791             unlock_user(name, arg2, 16);
10792             return ret;
10793         }
10794         case PR_SET_NAME:
10795         {
10796             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10797             if (!name) {
10798                 return -TARGET_EFAULT;
10799             }
10800             ret = get_errno(prctl(arg1, (unsigned long)name,
10801                                   arg3, arg4, arg5));
10802             unlock_user(name, arg2, 0);
10803             return ret;
10804         }
10805 #endif
10806 #ifdef TARGET_MIPS
10807         case TARGET_PR_GET_FP_MODE:
10808         {
10809             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10810             ret = 0;
10811             if (env->CP0_Status & (1 << CP0St_FR)) {
10812                 ret |= TARGET_PR_FP_MODE_FR;
10813             }
10814             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10815                 ret |= TARGET_PR_FP_MODE_FRE;
10816             }
10817             return ret;
10818         }
10819         case TARGET_PR_SET_FP_MODE:
10820         {
10821             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10822             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10823             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10824             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10825             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10826 
10827             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10828                                             TARGET_PR_FP_MODE_FRE;
10829 
10830             /* If nothing to change, return right away, successfully.  */
10831             if (old_fr == new_fr && old_fre == new_fre) {
10832                 return 0;
10833             }
10834             /* Check the value is valid */
10835             if (arg2 & ~known_bits) {
10836                 return -TARGET_EOPNOTSUPP;
10837             }
10838             /* Setting FRE without FR is not supported.  */
10839             if (new_fre && !new_fr) {
10840                 return -TARGET_EOPNOTSUPP;
10841             }
10842             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10843                 /* FR1 is not supported */
10844                 return -TARGET_EOPNOTSUPP;
10845             }
10846             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10847                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10848                 /* cannot set FR=0 */
10849                 return -TARGET_EOPNOTSUPP;
10850             }
10851             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10852                 /* Cannot set FRE=1 */
10853                 return -TARGET_EOPNOTSUPP;
10854             }
10855 
10856             int i;
10857             fpr_t *fpr = env->active_fpu.fpr;
10858             for (i = 0; i < 32 ; i += 2) {
10859                 if (!old_fr && new_fr) {
10860                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10861                 } else if (old_fr && !new_fr) {
10862                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10863                 }
10864             }
10865 
10866             if (new_fr) {
10867                 env->CP0_Status |= (1 << CP0St_FR);
10868                 env->hflags |= MIPS_HFLAG_F64;
10869             } else {
10870                 env->CP0_Status &= ~(1 << CP0St_FR);
10871                 env->hflags &= ~MIPS_HFLAG_F64;
10872             }
10873             if (new_fre) {
10874                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10875                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10876                     env->hflags |= MIPS_HFLAG_FRE;
10877                 }
10878             } else {
10879                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10880                 env->hflags &= ~MIPS_HFLAG_FRE;
10881             }
10882 
10883             return 0;
10884         }
10885 #endif /* MIPS */
10886 #ifdef TARGET_AARCH64
10887         case TARGET_PR_SVE_SET_VL:
10888             /*
10889              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10890              * PR_SVE_VL_INHERIT.  Note the kernel definition
10891              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10892              * even though the current architectural maximum is VQ=16.
10893              */
10894             ret = -TARGET_EINVAL;
10895             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10896                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10897                 CPUARMState *env = cpu_env;
10898                 ARMCPU *cpu = env_archcpu(env);
10899                 uint32_t vq, old_vq;
10900 
10901                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10902                 vq = MAX(arg2 / 16, 1);
10903                 vq = MIN(vq, cpu->sve_max_vq);
10904 
10905                 if (vq < old_vq) {
10906                     aarch64_sve_narrow_vq(env, vq);
10907                 }
10908                 env->vfp.zcr_el[1] = vq - 1;
10909                 arm_rebuild_hflags(env);
10910                 ret = vq * 16;
10911             }
10912             return ret;
10913         case TARGET_PR_SVE_GET_VL:
10914             ret = -TARGET_EINVAL;
10915             {
10916                 ARMCPU *cpu = env_archcpu(cpu_env);
10917                 if (cpu_isar_feature(aa64_sve, cpu)) {
10918                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10919                 }
10920             }
10921             return ret;
10922         case TARGET_PR_PAC_RESET_KEYS:
10923             {
10924                 CPUARMState *env = cpu_env;
10925                 ARMCPU *cpu = env_archcpu(env);
10926 
10927                 if (arg3 || arg4 || arg5) {
10928                     return -TARGET_EINVAL;
10929                 }
10930                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10931                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10932                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10933                                TARGET_PR_PAC_APGAKEY);
10934                     int ret = 0;
10935                     Error *err = NULL;
10936 
10937                     if (arg2 == 0) {
10938                         arg2 = all;
10939                     } else if (arg2 & ~all) {
10940                         return -TARGET_EINVAL;
10941                     }
10942                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10943                         ret |= qemu_guest_getrandom(&env->keys.apia,
10944                                                     sizeof(ARMPACKey), &err);
10945                     }
10946                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10947                         ret |= qemu_guest_getrandom(&env->keys.apib,
10948                                                     sizeof(ARMPACKey), &err);
10949                     }
10950                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10951                         ret |= qemu_guest_getrandom(&env->keys.apda,
10952                                                     sizeof(ARMPACKey), &err);
10953                     }
10954                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10955                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10956                                                     sizeof(ARMPACKey), &err);
10957                     }
10958                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10959                         ret |= qemu_guest_getrandom(&env->keys.apga,
10960                                                     sizeof(ARMPACKey), &err);
10961                     }
10962                     if (ret != 0) {
10963                         /*
10964                          * Some unknown failure in the crypto.  The best
10965                          * we can do is log it and fail the syscall.
10966                          * The real syscall cannot fail this way.
10967                          */
10968                         qemu_log_mask(LOG_UNIMP,
10969                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10970                                       error_get_pretty(err));
10971                         error_free(err);
10972                         return -TARGET_EIO;
10973                     }
10974                     return 0;
10975                 }
10976             }
10977             return -TARGET_EINVAL;
10978 #endif /* AARCH64 */
10979         case PR_GET_SECCOMP:
10980         case PR_SET_SECCOMP:
10981             /* Disable seccomp to prevent the target disabling syscalls we
10982              * need. */
10983             return -TARGET_EINVAL;
10984         default:
10985             /* Most prctl options have no pointer arguments */
10986             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10987         }
10988         break;
10989 #ifdef TARGET_NR_arch_prctl
10990     case TARGET_NR_arch_prctl:
10991         return do_arch_prctl(cpu_env, arg1, arg2);
10992 #endif
10993 #ifdef TARGET_NR_pread64
10994     case TARGET_NR_pread64:
10995         if (regpairs_aligned(cpu_env, num)) {
10996             arg4 = arg5;
10997             arg5 = arg6;
10998         }
10999         if (arg2 == 0 && arg3 == 0) {
11000             /* Special-case NULL buffer and zero length, which should succeed */
11001             p = 0;
11002         } else {
11003             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11004             if (!p) {
11005                 return -TARGET_EFAULT;
11006             }
11007         }
11008         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11009         unlock_user(p, arg2, ret);
11010         return ret;
11011     case TARGET_NR_pwrite64:
11012         if (regpairs_aligned(cpu_env, num)) {
11013             arg4 = arg5;
11014             arg5 = arg6;
11015         }
11016         if (arg2 == 0 && arg3 == 0) {
11017             /* Special-case NULL buffer and zero length, which should succeed */
11018             p = 0;
11019         } else {
11020             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11021             if (!p) {
11022                 return -TARGET_EFAULT;
11023             }
11024         }
11025         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11026         unlock_user(p, arg2, 0);
11027         return ret;
11028 #endif
11029     case TARGET_NR_getcwd:
11030         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11031             return -TARGET_EFAULT;
11032         ret = get_errno(sys_getcwd1(p, arg2));
11033         unlock_user(p, arg1, ret);
11034         return ret;
11035     case TARGET_NR_capget:
11036     case TARGET_NR_capset:
11037     {
11038         struct target_user_cap_header *target_header;
11039         struct target_user_cap_data *target_data = NULL;
11040         struct __user_cap_header_struct header;
11041         struct __user_cap_data_struct data[2];
11042         struct __user_cap_data_struct *dataptr = NULL;
11043         int i, target_datalen;
11044         int data_items = 1;
11045 
11046         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11047             return -TARGET_EFAULT;
11048         }
11049         header.version = tswap32(target_header->version);
11050         header.pid = tswap32(target_header->pid);
11051 
11052         if (header.version != _LINUX_CAPABILITY_VERSION) {
11053             /* Version 2 and up takes pointer to two user_data structs */
11054             data_items = 2;
11055         }
11056 
11057         target_datalen = sizeof(*target_data) * data_items;
11058 
11059         if (arg2) {
11060             if (num == TARGET_NR_capget) {
11061                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11062             } else {
11063                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11064             }
11065             if (!target_data) {
11066                 unlock_user_struct(target_header, arg1, 0);
11067                 return -TARGET_EFAULT;
11068             }
11069 
11070             if (num == TARGET_NR_capset) {
11071                 for (i = 0; i < data_items; i++) {
11072                     data[i].effective = tswap32(target_data[i].effective);
11073                     data[i].permitted = tswap32(target_data[i].permitted);
11074                     data[i].inheritable = tswap32(target_data[i].inheritable);
11075                 }
11076             }
11077 
11078             dataptr = data;
11079         }
11080 
11081         if (num == TARGET_NR_capget) {
11082             ret = get_errno(capget(&header, dataptr));
11083         } else {
11084             ret = get_errno(capset(&header, dataptr));
11085         }
11086 
11087         /* The kernel always updates version for both capget and capset */
11088         target_header->version = tswap32(header.version);
11089         unlock_user_struct(target_header, arg1, 1);
11090 
11091         if (arg2) {
11092             if (num == TARGET_NR_capget) {
11093                 for (i = 0; i < data_items; i++) {
11094                     target_data[i].effective = tswap32(data[i].effective);
11095                     target_data[i].permitted = tswap32(data[i].permitted);
11096                     target_data[i].inheritable = tswap32(data[i].inheritable);
11097                 }
11098                 unlock_user(target_data, arg2, target_datalen);
11099             } else {
11100                 unlock_user(target_data, arg2, 0);
11101             }
11102         }
11103         return ret;
11104     }
11105     case TARGET_NR_sigaltstack:
11106         return do_sigaltstack(arg1, arg2,
11107                               get_sp_from_cpustate((CPUArchState *)cpu_env));
11108 
11109 #ifdef CONFIG_SENDFILE
11110 #ifdef TARGET_NR_sendfile
11111     case TARGET_NR_sendfile:
11112     {
11113         off_t *offp = NULL;
11114         off_t off;
11115         if (arg3) {
11116             ret = get_user_sal(off, arg3);
11117             if (is_error(ret)) {
11118                 return ret;
11119             }
11120             offp = &off;
11121         }
11122         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11123         if (!is_error(ret) && arg3) {
11124             abi_long ret2 = put_user_sal(off, arg3);
11125             if (is_error(ret2)) {
11126                 ret = ret2;
11127             }
11128         }
11129         return ret;
11130     }
11131 #endif
11132 #ifdef TARGET_NR_sendfile64
11133     case TARGET_NR_sendfile64:
11134     {
11135         off_t *offp = NULL;
11136         off_t off;
11137         if (arg3) {
11138             ret = get_user_s64(off, arg3);
11139             if (is_error(ret)) {
11140                 return ret;
11141             }
11142             offp = &off;
11143         }
11144         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11145         if (!is_error(ret) && arg3) {
11146             abi_long ret2 = put_user_s64(off, arg3);
11147             if (is_error(ret2)) {
11148                 ret = ret2;
11149             }
11150         }
11151         return ret;
11152     }
11153 #endif
11154 #endif
11155 #ifdef TARGET_NR_vfork
11156     case TARGET_NR_vfork:
11157         return get_errno(do_fork(cpu_env,
11158                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11159                          0, 0, 0, 0));
11160 #endif
11161 #ifdef TARGET_NR_ugetrlimit
11162     case TARGET_NR_ugetrlimit:
11163     {
11164 	struct rlimit rlim;
11165 	int resource = target_to_host_resource(arg1);
11166 	ret = get_errno(getrlimit(resource, &rlim));
11167 	if (!is_error(ret)) {
11168 	    struct target_rlimit *target_rlim;
11169             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11170                 return -TARGET_EFAULT;
11171 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11172 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11173             unlock_user_struct(target_rlim, arg2, 1);
11174 	}
11175         return ret;
11176     }
11177 #endif
11178 #ifdef TARGET_NR_truncate64
11179     case TARGET_NR_truncate64:
11180         if (!(p = lock_user_string(arg1)))
11181             return -TARGET_EFAULT;
11182 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11183         unlock_user(p, arg1, 0);
11184         return ret;
11185 #endif
11186 #ifdef TARGET_NR_ftruncate64
11187     case TARGET_NR_ftruncate64:
11188         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11189 #endif
11190 #ifdef TARGET_NR_stat64
11191     case TARGET_NR_stat64:
11192         if (!(p = lock_user_string(arg1))) {
11193             return -TARGET_EFAULT;
11194         }
11195         ret = get_errno(stat(path(p), &st));
11196         unlock_user(p, arg1, 0);
11197         if (!is_error(ret))
11198             ret = host_to_target_stat64(cpu_env, arg2, &st);
11199         return ret;
11200 #endif
11201 #ifdef TARGET_NR_lstat64
11202     case TARGET_NR_lstat64:
11203         if (!(p = lock_user_string(arg1))) {
11204             return -TARGET_EFAULT;
11205         }
11206         ret = get_errno(lstat(path(p), &st));
11207         unlock_user(p, arg1, 0);
11208         if (!is_error(ret))
11209             ret = host_to_target_stat64(cpu_env, arg2, &st);
11210         return ret;
11211 #endif
11212 #ifdef TARGET_NR_fstat64
11213     case TARGET_NR_fstat64:
11214         ret = get_errno(fstat(arg1, &st));
11215         if (!is_error(ret))
11216             ret = host_to_target_stat64(cpu_env, arg2, &st);
11217         return ret;
11218 #endif
11219 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11220 #ifdef TARGET_NR_fstatat64
11221     case TARGET_NR_fstatat64:
11222 #endif
11223 #ifdef TARGET_NR_newfstatat
11224     case TARGET_NR_newfstatat:
11225 #endif
11226         if (!(p = lock_user_string(arg2))) {
11227             return -TARGET_EFAULT;
11228         }
11229         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11230         unlock_user(p, arg2, 0);
11231         if (!is_error(ret))
11232             ret = host_to_target_stat64(cpu_env, arg3, &st);
11233         return ret;
11234 #endif
11235 #if defined(TARGET_NR_statx)
11236     case TARGET_NR_statx:
11237         {
11238             struct target_statx *target_stx;
11239             int dirfd = arg1;
11240             int flags = arg3;
11241 
11242             p = lock_user_string(arg2);
11243             if (p == NULL) {
11244                 return -TARGET_EFAULT;
11245             }
11246 #if defined(__NR_statx)
11247             {
11248                 /*
11249                  * It is assumed that struct statx is architecture independent.
11250                  */
11251                 struct target_statx host_stx;
11252                 int mask = arg4;
11253 
11254                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11255                 if (!is_error(ret)) {
11256                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11257                         unlock_user(p, arg2, 0);
11258                         return -TARGET_EFAULT;
11259                     }
11260                 }
11261 
11262                 if (ret != -TARGET_ENOSYS) {
11263                     unlock_user(p, arg2, 0);
11264                     return ret;
11265                 }
11266             }
11267 #endif
11268             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11269             unlock_user(p, arg2, 0);
11270 
11271             if (!is_error(ret)) {
11272                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11273                     return -TARGET_EFAULT;
11274                 }
11275                 memset(target_stx, 0, sizeof(*target_stx));
11276                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11277                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11278                 __put_user(st.st_ino, &target_stx->stx_ino);
11279                 __put_user(st.st_mode, &target_stx->stx_mode);
11280                 __put_user(st.st_uid, &target_stx->stx_uid);
11281                 __put_user(st.st_gid, &target_stx->stx_gid);
11282                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11283                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11284                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11285                 __put_user(st.st_size, &target_stx->stx_size);
11286                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11287                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11288                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11289                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11290                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11291                 unlock_user_struct(target_stx, arg5, 1);
11292             }
11293         }
11294         return ret;
11295 #endif
11296 #ifdef TARGET_NR_lchown
11297     case TARGET_NR_lchown:
11298         if (!(p = lock_user_string(arg1)))
11299             return -TARGET_EFAULT;
11300         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11301         unlock_user(p, arg1, 0);
11302         return ret;
11303 #endif
11304 #ifdef TARGET_NR_getuid
11305     case TARGET_NR_getuid:
11306         return get_errno(high2lowuid(getuid()));
11307 #endif
11308 #ifdef TARGET_NR_getgid
11309     case TARGET_NR_getgid:
11310         return get_errno(high2lowgid(getgid()));
11311 #endif
11312 #ifdef TARGET_NR_geteuid
11313     case TARGET_NR_geteuid:
11314         return get_errno(high2lowuid(geteuid()));
11315 #endif
11316 #ifdef TARGET_NR_getegid
11317     case TARGET_NR_getegid:
11318         return get_errno(high2lowgid(getegid()));
11319 #endif
11320     case TARGET_NR_setreuid:
11321         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11322     case TARGET_NR_setregid:
11323         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11324     case TARGET_NR_getgroups:
11325         {
11326             int gidsetsize = arg1;
11327             target_id *target_grouplist;
11328             gid_t *grouplist;
11329             int i;
11330 
11331             grouplist = alloca(gidsetsize * sizeof(gid_t));
11332             ret = get_errno(getgroups(gidsetsize, grouplist));
11333             if (gidsetsize == 0)
11334                 return ret;
11335             if (!is_error(ret)) {
11336                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11337                 if (!target_grouplist)
11338                     return -TARGET_EFAULT;
11339                 for(i = 0;i < ret; i++)
11340                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11341                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11342             }
11343         }
11344         return ret;
11345     case TARGET_NR_setgroups:
11346         {
11347             int gidsetsize = arg1;
11348             target_id *target_grouplist;
11349             gid_t *grouplist = NULL;
11350             int i;
11351             if (gidsetsize) {
11352                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11353                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11354                 if (!target_grouplist) {
11355                     return -TARGET_EFAULT;
11356                 }
11357                 for (i = 0; i < gidsetsize; i++) {
11358                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11359                 }
11360                 unlock_user(target_grouplist, arg2, 0);
11361             }
11362             return get_errno(setgroups(gidsetsize, grouplist));
11363         }
11364     case TARGET_NR_fchown:
11365         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11366 #if defined(TARGET_NR_fchownat)
11367     case TARGET_NR_fchownat:
11368         if (!(p = lock_user_string(arg2)))
11369             return -TARGET_EFAULT;
11370         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11371                                  low2highgid(arg4), arg5));
11372         unlock_user(p, arg2, 0);
11373         return ret;
11374 #endif
11375 #ifdef TARGET_NR_setresuid
11376     case TARGET_NR_setresuid:
11377         return get_errno(sys_setresuid(low2highuid(arg1),
11378                                        low2highuid(arg2),
11379                                        low2highuid(arg3)));
11380 #endif
11381 #ifdef TARGET_NR_getresuid
11382     case TARGET_NR_getresuid:
11383         {
11384             uid_t ruid, euid, suid;
11385             ret = get_errno(getresuid(&ruid, &euid, &suid));
11386             if (!is_error(ret)) {
11387                 if (put_user_id(high2lowuid(ruid), arg1)
11388                     || put_user_id(high2lowuid(euid), arg2)
11389                     || put_user_id(high2lowuid(suid), arg3))
11390                     return -TARGET_EFAULT;
11391             }
11392         }
11393         return ret;
11394 #endif
11395 #ifdef TARGET_NR_getresgid
11396     case TARGET_NR_setresgid:
11397         return get_errno(sys_setresgid(low2highgid(arg1),
11398                                        low2highgid(arg2),
11399                                        low2highgid(arg3)));
11400 #endif
11401 #ifdef TARGET_NR_getresgid
11402     case TARGET_NR_getresgid:
11403         {
11404             gid_t rgid, egid, sgid;
11405             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11406             if (!is_error(ret)) {
11407                 if (put_user_id(high2lowgid(rgid), arg1)
11408                     || put_user_id(high2lowgid(egid), arg2)
11409                     || put_user_id(high2lowgid(sgid), arg3))
11410                     return -TARGET_EFAULT;
11411             }
11412         }
11413         return ret;
11414 #endif
11415 #ifdef TARGET_NR_chown
11416     case TARGET_NR_chown:
11417         if (!(p = lock_user_string(arg1)))
11418             return -TARGET_EFAULT;
11419         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11420         unlock_user(p, arg1, 0);
11421         return ret;
11422 #endif
11423     case TARGET_NR_setuid:
11424         return get_errno(sys_setuid(low2highuid(arg1)));
11425     case TARGET_NR_setgid:
11426         return get_errno(sys_setgid(low2highgid(arg1)));
11427     case TARGET_NR_setfsuid:
11428         return get_errno(setfsuid(arg1));
11429     case TARGET_NR_setfsgid:
11430         return get_errno(setfsgid(arg1));
11431 
11432 #ifdef TARGET_NR_lchown32
11433     case TARGET_NR_lchown32:
11434         if (!(p = lock_user_string(arg1)))
11435             return -TARGET_EFAULT;
11436         ret = get_errno(lchown(p, arg2, arg3));
11437         unlock_user(p, arg1, 0);
11438         return ret;
11439 #endif
11440 #ifdef TARGET_NR_getuid32
11441     case TARGET_NR_getuid32:
11442         return get_errno(getuid());
11443 #endif
11444 
11445 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11446    /* Alpha specific */
11447     case TARGET_NR_getxuid:
11448          {
11449             uid_t euid;
11450             euid=geteuid();
11451             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11452          }
11453         return get_errno(getuid());
11454 #endif
11455 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11456    /* Alpha specific */
11457     case TARGET_NR_getxgid:
11458          {
11459             uid_t egid;
11460             egid=getegid();
11461             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11462          }
11463         return get_errno(getgid());
11464 #endif
11465 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11466     /* Alpha specific */
11467     case TARGET_NR_osf_getsysinfo:
11468         ret = -TARGET_EOPNOTSUPP;
11469         switch (arg1) {
11470           case TARGET_GSI_IEEE_FP_CONTROL:
11471             {
11472                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11473                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11474 
11475                 swcr &= ~SWCR_STATUS_MASK;
11476                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11477 
11478                 if (put_user_u64 (swcr, arg2))
11479                         return -TARGET_EFAULT;
11480                 ret = 0;
11481             }
11482             break;
11483 
11484           /* case GSI_IEEE_STATE_AT_SIGNAL:
11485              -- Not implemented in linux kernel.
11486              case GSI_UACPROC:
11487              -- Retrieves current unaligned access state; not much used.
11488              case GSI_PROC_TYPE:
11489              -- Retrieves implver information; surely not used.
11490              case GSI_GET_HWRPB:
11491              -- Grabs a copy of the HWRPB; surely not used.
11492           */
11493         }
11494         return ret;
11495 #endif
11496 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11497     /* Alpha specific */
11498     case TARGET_NR_osf_setsysinfo:
11499         ret = -TARGET_EOPNOTSUPP;
11500         switch (arg1) {
11501           case TARGET_SSI_IEEE_FP_CONTROL:
11502             {
11503                 uint64_t swcr, fpcr;
11504 
11505                 if (get_user_u64 (swcr, arg2)) {
11506                     return -TARGET_EFAULT;
11507                 }
11508 
11509                 /*
11510                  * The kernel calls swcr_update_status to update the
11511                  * status bits from the fpcr at every point that it
11512                  * could be queried.  Therefore, we store the status
11513                  * bits only in FPCR.
11514                  */
11515                 ((CPUAlphaState *)cpu_env)->swcr
11516                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11517 
11518                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11519                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11520                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11521                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11522                 ret = 0;
11523             }
11524             break;
11525 
11526           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11527             {
11528                 uint64_t exc, fpcr, fex;
11529 
11530                 if (get_user_u64(exc, arg2)) {
11531                     return -TARGET_EFAULT;
11532                 }
11533                 exc &= SWCR_STATUS_MASK;
11534                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11535 
11536                 /* Old exceptions are not signaled.  */
11537                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11538                 fex = exc & ~fex;
11539                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11540                 fex &= ((CPUArchState *)cpu_env)->swcr;
11541 
11542                 /* Update the hardware fpcr.  */
11543                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11544                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11545 
11546                 if (fex) {
11547                     int si_code = TARGET_FPE_FLTUNK;
11548                     target_siginfo_t info;
11549 
11550                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11551                         si_code = TARGET_FPE_FLTUND;
11552                     }
11553                     if (fex & SWCR_TRAP_ENABLE_INE) {
11554                         si_code = TARGET_FPE_FLTRES;
11555                     }
11556                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11557                         si_code = TARGET_FPE_FLTUND;
11558                     }
11559                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11560                         si_code = TARGET_FPE_FLTOVF;
11561                     }
11562                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11563                         si_code = TARGET_FPE_FLTDIV;
11564                     }
11565                     if (fex & SWCR_TRAP_ENABLE_INV) {
11566                         si_code = TARGET_FPE_FLTINV;
11567                     }
11568 
11569                     info.si_signo = SIGFPE;
11570                     info.si_errno = 0;
11571                     info.si_code = si_code;
11572                     info._sifields._sigfault._addr
11573                         = ((CPUArchState *)cpu_env)->pc;
11574                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11575                                  QEMU_SI_FAULT, &info);
11576                 }
11577                 ret = 0;
11578             }
11579             break;
11580 
11581           /* case SSI_NVPAIRS:
11582              -- Used with SSIN_UACPROC to enable unaligned accesses.
11583              case SSI_IEEE_STATE_AT_SIGNAL:
11584              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11585              -- Not implemented in linux kernel
11586           */
11587         }
11588         return ret;
11589 #endif
11590 #ifdef TARGET_NR_osf_sigprocmask
11591     /* Alpha specific.  */
11592     case TARGET_NR_osf_sigprocmask:
11593         {
11594             abi_ulong mask;
11595             int how;
11596             sigset_t set, oldset;
11597 
11598             switch(arg1) {
11599             case TARGET_SIG_BLOCK:
11600                 how = SIG_BLOCK;
11601                 break;
11602             case TARGET_SIG_UNBLOCK:
11603                 how = SIG_UNBLOCK;
11604                 break;
11605             case TARGET_SIG_SETMASK:
11606                 how = SIG_SETMASK;
11607                 break;
11608             default:
11609                 return -TARGET_EINVAL;
11610             }
11611             mask = arg2;
11612             target_to_host_old_sigset(&set, &mask);
11613             ret = do_sigprocmask(how, &set, &oldset);
11614             if (!ret) {
11615                 host_to_target_old_sigset(&mask, &oldset);
11616                 ret = mask;
11617             }
11618         }
11619         return ret;
11620 #endif
11621 
11622 #ifdef TARGET_NR_getgid32
11623     case TARGET_NR_getgid32:
11624         return get_errno(getgid());
11625 #endif
11626 #ifdef TARGET_NR_geteuid32
11627     case TARGET_NR_geteuid32:
11628         return get_errno(geteuid());
11629 #endif
11630 #ifdef TARGET_NR_getegid32
11631     case TARGET_NR_getegid32:
11632         return get_errno(getegid());
11633 #endif
11634 #ifdef TARGET_NR_setreuid32
11635     case TARGET_NR_setreuid32:
11636         return get_errno(setreuid(arg1, arg2));
11637 #endif
11638 #ifdef TARGET_NR_setregid32
11639     case TARGET_NR_setregid32:
11640         return get_errno(setregid(arg1, arg2));
11641 #endif
11642 #ifdef TARGET_NR_getgroups32
11643     case TARGET_NR_getgroups32:
11644         {
11645             int gidsetsize = arg1;
11646             uint32_t *target_grouplist;
11647             gid_t *grouplist;
11648             int i;
11649 
11650             grouplist = alloca(gidsetsize * sizeof(gid_t));
11651             ret = get_errno(getgroups(gidsetsize, grouplist));
11652             if (gidsetsize == 0)
11653                 return ret;
11654             if (!is_error(ret)) {
11655                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11656                 if (!target_grouplist) {
11657                     return -TARGET_EFAULT;
11658                 }
11659                 for(i = 0;i < ret; i++)
11660                     target_grouplist[i] = tswap32(grouplist[i]);
11661                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11662             }
11663         }
11664         return ret;
11665 #endif
11666 #ifdef TARGET_NR_setgroups32
11667     case TARGET_NR_setgroups32:
11668         {
11669             int gidsetsize = arg1;
11670             uint32_t *target_grouplist;
11671             gid_t *grouplist;
11672             int i;
11673 
11674             grouplist = alloca(gidsetsize * sizeof(gid_t));
11675             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11676             if (!target_grouplist) {
11677                 return -TARGET_EFAULT;
11678             }
11679             for(i = 0;i < gidsetsize; i++)
11680                 grouplist[i] = tswap32(target_grouplist[i]);
11681             unlock_user(target_grouplist, arg2, 0);
11682             return get_errno(setgroups(gidsetsize, grouplist));
11683         }
11684 #endif
11685 #ifdef TARGET_NR_fchown32
11686     case TARGET_NR_fchown32:
11687         return get_errno(fchown(arg1, arg2, arg3));
11688 #endif
11689 #ifdef TARGET_NR_setresuid32
11690     case TARGET_NR_setresuid32:
11691         return get_errno(sys_setresuid(arg1, arg2, arg3));
11692 #endif
11693 #ifdef TARGET_NR_getresuid32
11694     case TARGET_NR_getresuid32:
11695         {
11696             uid_t ruid, euid, suid;
11697             ret = get_errno(getresuid(&ruid, &euid, &suid));
11698             if (!is_error(ret)) {
11699                 if (put_user_u32(ruid, arg1)
11700                     || put_user_u32(euid, arg2)
11701                     || put_user_u32(suid, arg3))
11702                     return -TARGET_EFAULT;
11703             }
11704         }
11705         return ret;
11706 #endif
11707 #ifdef TARGET_NR_setresgid32
11708     case TARGET_NR_setresgid32:
11709         return get_errno(sys_setresgid(arg1, arg2, arg3));
11710 #endif
11711 #ifdef TARGET_NR_getresgid32
11712     case TARGET_NR_getresgid32:
11713         {
11714             gid_t rgid, egid, sgid;
11715             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11716             if (!is_error(ret)) {
11717                 if (put_user_u32(rgid, arg1)
11718                     || put_user_u32(egid, arg2)
11719                     || put_user_u32(sgid, arg3))
11720                     return -TARGET_EFAULT;
11721             }
11722         }
11723         return ret;
11724 #endif
11725 #ifdef TARGET_NR_chown32
11726     case TARGET_NR_chown32:
11727         if (!(p = lock_user_string(arg1)))
11728             return -TARGET_EFAULT;
11729         ret = get_errno(chown(p, arg2, arg3));
11730         unlock_user(p, arg1, 0);
11731         return ret;
11732 #endif
11733 #ifdef TARGET_NR_setuid32
11734     case TARGET_NR_setuid32:
11735         return get_errno(sys_setuid(arg1));
11736 #endif
11737 #ifdef TARGET_NR_setgid32
11738     case TARGET_NR_setgid32:
11739         return get_errno(sys_setgid(arg1));
11740 #endif
11741 #ifdef TARGET_NR_setfsuid32
11742     case TARGET_NR_setfsuid32:
11743         return get_errno(setfsuid(arg1));
11744 #endif
11745 #ifdef TARGET_NR_setfsgid32
11746     case TARGET_NR_setfsgid32:
11747         return get_errno(setfsgid(arg1));
11748 #endif
11749 #ifdef TARGET_NR_mincore
11750     case TARGET_NR_mincore:
11751         {
11752             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11753             if (!a) {
11754                 return -TARGET_ENOMEM;
11755             }
11756             p = lock_user_string(arg3);
11757             if (!p) {
11758                 ret = -TARGET_EFAULT;
11759             } else {
11760                 ret = get_errno(mincore(a, arg2, p));
11761                 unlock_user(p, arg3, ret);
11762             }
11763             unlock_user(a, arg1, 0);
11764         }
11765         return ret;
11766 #endif
11767 #ifdef TARGET_NR_arm_fadvise64_64
11768     case TARGET_NR_arm_fadvise64_64:
11769         /* arm_fadvise64_64 looks like fadvise64_64 but
11770          * with different argument order: fd, advice, offset, len
11771          * rather than the usual fd, offset, len, advice.
11772          * Note that offset and len are both 64-bit so appear as
11773          * pairs of 32-bit registers.
11774          */
11775         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11776                             target_offset64(arg5, arg6), arg2);
11777         return -host_to_target_errno(ret);
11778 #endif
11779 
11780 #if TARGET_ABI_BITS == 32
11781 
11782 #ifdef TARGET_NR_fadvise64_64
11783     case TARGET_NR_fadvise64_64:
11784 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11785         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11786         ret = arg2;
11787         arg2 = arg3;
11788         arg3 = arg4;
11789         arg4 = arg5;
11790         arg5 = arg6;
11791         arg6 = ret;
11792 #else
11793         /* 6 args: fd, offset (high, low), len (high, low), advice */
11794         if (regpairs_aligned(cpu_env, num)) {
11795             /* offset is in (3,4), len in (5,6) and advice in 7 */
11796             arg2 = arg3;
11797             arg3 = arg4;
11798             arg4 = arg5;
11799             arg5 = arg6;
11800             arg6 = arg7;
11801         }
11802 #endif
11803         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11804                             target_offset64(arg4, arg5), arg6);
11805         return -host_to_target_errno(ret);
11806 #endif
11807 
11808 #ifdef TARGET_NR_fadvise64
11809     case TARGET_NR_fadvise64:
11810         /* 5 args: fd, offset (high, low), len, advice */
11811         if (regpairs_aligned(cpu_env, num)) {
11812             /* offset is in (3,4), len in 5 and advice in 6 */
11813             arg2 = arg3;
11814             arg3 = arg4;
11815             arg4 = arg5;
11816             arg5 = arg6;
11817         }
11818         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11819         return -host_to_target_errno(ret);
11820 #endif
11821 
11822 #else /* not a 32-bit ABI */
11823 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11824 #ifdef TARGET_NR_fadvise64_64
11825     case TARGET_NR_fadvise64_64:
11826 #endif
11827 #ifdef TARGET_NR_fadvise64
11828     case TARGET_NR_fadvise64:
11829 #endif
11830 #ifdef TARGET_S390X
11831         switch (arg4) {
11832         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11833         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11834         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11835         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11836         default: break;
11837         }
11838 #endif
11839         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11840 #endif
11841 #endif /* end of 64-bit ABI fadvise handling */
11842 
11843 #ifdef TARGET_NR_madvise
11844     case TARGET_NR_madvise:
11845         /* A straight passthrough may not be safe because qemu sometimes
11846            turns private file-backed mappings into anonymous mappings.
11847            This will break MADV_DONTNEED.
11848            This is a hint, so ignoring and returning success is ok.  */
11849         return 0;
11850 #endif
11851 #ifdef TARGET_NR_fcntl64
11852     case TARGET_NR_fcntl64:
11853     {
11854         int cmd;
11855         struct flock64 fl;
11856         from_flock64_fn *copyfrom = copy_from_user_flock64;
11857         to_flock64_fn *copyto = copy_to_user_flock64;
11858 
11859 #ifdef TARGET_ARM
11860         if (!((CPUARMState *)cpu_env)->eabi) {
11861             copyfrom = copy_from_user_oabi_flock64;
11862             copyto = copy_to_user_oabi_flock64;
11863         }
11864 #endif
11865 
11866         cmd = target_to_host_fcntl_cmd(arg2);
11867         if (cmd == -TARGET_EINVAL) {
11868             return cmd;
11869         }
11870 
11871         switch(arg2) {
11872         case TARGET_F_GETLK64:
11873             ret = copyfrom(&fl, arg3);
11874             if (ret) {
11875                 break;
11876             }
11877             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11878             if (ret == 0) {
11879                 ret = copyto(arg3, &fl);
11880             }
11881 	    break;
11882 
11883         case TARGET_F_SETLK64:
11884         case TARGET_F_SETLKW64:
11885             ret = copyfrom(&fl, arg3);
11886             if (ret) {
11887                 break;
11888             }
11889             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11890 	    break;
11891         default:
11892             ret = do_fcntl(arg1, arg2, arg3);
11893             break;
11894         }
11895         return ret;
11896     }
11897 #endif
11898 #ifdef TARGET_NR_cacheflush
11899     case TARGET_NR_cacheflush:
11900         /* self-modifying code is handled automatically, so nothing needed */
11901         return 0;
11902 #endif
11903 #ifdef TARGET_NR_getpagesize
11904     case TARGET_NR_getpagesize:
11905         return TARGET_PAGE_SIZE;
11906 #endif
11907     case TARGET_NR_gettid:
11908         return get_errno(sys_gettid());
11909 #ifdef TARGET_NR_readahead
11910     case TARGET_NR_readahead:
11911 #if TARGET_ABI_BITS == 32
11912         if (regpairs_aligned(cpu_env, num)) {
11913             arg2 = arg3;
11914             arg3 = arg4;
11915             arg4 = arg5;
11916         }
11917         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11918 #else
11919         ret = get_errno(readahead(arg1, arg2, arg3));
11920 #endif
11921         return ret;
11922 #endif
11923 #ifdef CONFIG_ATTR
11924 #ifdef TARGET_NR_setxattr
11925     case TARGET_NR_listxattr:
11926     case TARGET_NR_llistxattr:
11927     {
11928         void *p, *b = 0;
11929         if (arg2) {
11930             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11931             if (!b) {
11932                 return -TARGET_EFAULT;
11933             }
11934         }
11935         p = lock_user_string(arg1);
11936         if (p) {
11937             if (num == TARGET_NR_listxattr) {
11938                 ret = get_errno(listxattr(p, b, arg3));
11939             } else {
11940                 ret = get_errno(llistxattr(p, b, arg3));
11941             }
11942         } else {
11943             ret = -TARGET_EFAULT;
11944         }
11945         unlock_user(p, arg1, 0);
11946         unlock_user(b, arg2, arg3);
11947         return ret;
11948     }
11949     case TARGET_NR_flistxattr:
11950     {
11951         void *b = 0;
11952         if (arg2) {
11953             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11954             if (!b) {
11955                 return -TARGET_EFAULT;
11956             }
11957         }
11958         ret = get_errno(flistxattr(arg1, b, arg3));
11959         unlock_user(b, arg2, arg3);
11960         return ret;
11961     }
11962     case TARGET_NR_setxattr:
11963     case TARGET_NR_lsetxattr:
11964         {
11965             void *p, *n, *v = 0;
11966             if (arg3) {
11967                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11968                 if (!v) {
11969                     return -TARGET_EFAULT;
11970                 }
11971             }
11972             p = lock_user_string(arg1);
11973             n = lock_user_string(arg2);
11974             if (p && n) {
11975                 if (num == TARGET_NR_setxattr) {
11976                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11977                 } else {
11978                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11979                 }
11980             } else {
11981                 ret = -TARGET_EFAULT;
11982             }
11983             unlock_user(p, arg1, 0);
11984             unlock_user(n, arg2, 0);
11985             unlock_user(v, arg3, 0);
11986         }
11987         return ret;
11988     case TARGET_NR_fsetxattr:
11989         {
11990             void *n, *v = 0;
11991             if (arg3) {
11992                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11993                 if (!v) {
11994                     return -TARGET_EFAULT;
11995                 }
11996             }
11997             n = lock_user_string(arg2);
11998             if (n) {
11999                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12000             } else {
12001                 ret = -TARGET_EFAULT;
12002             }
12003             unlock_user(n, arg2, 0);
12004             unlock_user(v, arg3, 0);
12005         }
12006         return ret;
12007     case TARGET_NR_getxattr:
12008     case TARGET_NR_lgetxattr:
12009         {
12010             void *p, *n, *v = 0;
12011             if (arg3) {
12012                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12013                 if (!v) {
12014                     return -TARGET_EFAULT;
12015                 }
12016             }
12017             p = lock_user_string(arg1);
12018             n = lock_user_string(arg2);
12019             if (p && n) {
12020                 if (num == TARGET_NR_getxattr) {
12021                     ret = get_errno(getxattr(p, n, v, arg4));
12022                 } else {
12023                     ret = get_errno(lgetxattr(p, n, v, arg4));
12024                 }
12025             } else {
12026                 ret = -TARGET_EFAULT;
12027             }
12028             unlock_user(p, arg1, 0);
12029             unlock_user(n, arg2, 0);
12030             unlock_user(v, arg3, arg4);
12031         }
12032         return ret;
12033     case TARGET_NR_fgetxattr:
12034         {
12035             void *n, *v = 0;
12036             if (arg3) {
12037                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12038                 if (!v) {
12039                     return -TARGET_EFAULT;
12040                 }
12041             }
12042             n = lock_user_string(arg2);
12043             if (n) {
12044                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12045             } else {
12046                 ret = -TARGET_EFAULT;
12047             }
12048             unlock_user(n, arg2, 0);
12049             unlock_user(v, arg3, arg4);
12050         }
12051         return ret;
12052     case TARGET_NR_removexattr:
12053     case TARGET_NR_lremovexattr:
12054         {
12055             void *p, *n;
12056             p = lock_user_string(arg1);
12057             n = lock_user_string(arg2);
12058             if (p && n) {
12059                 if (num == TARGET_NR_removexattr) {
12060                     ret = get_errno(removexattr(p, n));
12061                 } else {
12062                     ret = get_errno(lremovexattr(p, n));
12063                 }
12064             } else {
12065                 ret = -TARGET_EFAULT;
12066             }
12067             unlock_user(p, arg1, 0);
12068             unlock_user(n, arg2, 0);
12069         }
12070         return ret;
12071     case TARGET_NR_fremovexattr:
12072         {
12073             void *n;
12074             n = lock_user_string(arg2);
12075             if (n) {
12076                 ret = get_errno(fremovexattr(arg1, n));
12077             } else {
12078                 ret = -TARGET_EFAULT;
12079             }
12080             unlock_user(n, arg2, 0);
12081         }
12082         return ret;
12083 #endif
12084 #endif /* CONFIG_ATTR */
12085 #ifdef TARGET_NR_set_thread_area
12086     case TARGET_NR_set_thread_area:
12087 #if defined(TARGET_MIPS)
12088       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12089       return 0;
12090 #elif defined(TARGET_CRIS)
12091       if (arg1 & 0xff)
12092           ret = -TARGET_EINVAL;
12093       else {
12094           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12095           ret = 0;
12096       }
12097       return ret;
12098 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12099       return do_set_thread_area(cpu_env, arg1);
12100 #elif defined(TARGET_M68K)
12101       {
12102           TaskState *ts = cpu->opaque;
12103           ts->tp_value = arg1;
12104           return 0;
12105       }
12106 #else
12107       return -TARGET_ENOSYS;
12108 #endif
12109 #endif
12110 #ifdef TARGET_NR_get_thread_area
12111     case TARGET_NR_get_thread_area:
12112 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12113         return do_get_thread_area(cpu_env, arg1);
12114 #elif defined(TARGET_M68K)
12115         {
12116             TaskState *ts = cpu->opaque;
12117             return ts->tp_value;
12118         }
12119 #else
12120         return -TARGET_ENOSYS;
12121 #endif
12122 #endif
12123 #ifdef TARGET_NR_getdomainname
12124     case TARGET_NR_getdomainname:
12125         return -TARGET_ENOSYS;
12126 #endif
12127 
12128 #ifdef TARGET_NR_clock_settime
12129     case TARGET_NR_clock_settime:
12130     {
12131         struct timespec ts;
12132 
12133         ret = target_to_host_timespec(&ts, arg2);
12134         if (!is_error(ret)) {
12135             ret = get_errno(clock_settime(arg1, &ts));
12136         }
12137         return ret;
12138     }
12139 #endif
12140 #ifdef TARGET_NR_clock_settime64
12141     case TARGET_NR_clock_settime64:
12142     {
12143         struct timespec ts;
12144 
12145         ret = target_to_host_timespec64(&ts, arg2);
12146         if (!is_error(ret)) {
12147             ret = get_errno(clock_settime(arg1, &ts));
12148         }
12149         return ret;
12150     }
12151 #endif
12152 #ifdef TARGET_NR_clock_gettime
12153     case TARGET_NR_clock_gettime:
12154     {
12155         struct timespec ts;
12156         ret = get_errno(clock_gettime(arg1, &ts));
12157         if (!is_error(ret)) {
12158             ret = host_to_target_timespec(arg2, &ts);
12159         }
12160         return ret;
12161     }
12162 #endif
12163 #ifdef TARGET_NR_clock_gettime64
12164     case TARGET_NR_clock_gettime64:
12165     {
12166         struct timespec ts;
12167         ret = get_errno(clock_gettime(arg1, &ts));
12168         if (!is_error(ret)) {
12169             ret = host_to_target_timespec64(arg2, &ts);
12170         }
12171         return ret;
12172     }
12173 #endif
12174 #ifdef TARGET_NR_clock_getres
12175     case TARGET_NR_clock_getres:
12176     {
12177         struct timespec ts;
12178         ret = get_errno(clock_getres(arg1, &ts));
12179         if (!is_error(ret)) {
12180             host_to_target_timespec(arg2, &ts);
12181         }
12182         return ret;
12183     }
12184 #endif
12185 #ifdef TARGET_NR_clock_getres_time64
12186     case TARGET_NR_clock_getres_time64:
12187     {
12188         struct timespec ts;
12189         ret = get_errno(clock_getres(arg1, &ts));
12190         if (!is_error(ret)) {
12191             host_to_target_timespec64(arg2, &ts);
12192         }
12193         return ret;
12194     }
12195 #endif
12196 #ifdef TARGET_NR_clock_nanosleep
12197     case TARGET_NR_clock_nanosleep:
12198     {
12199         struct timespec ts;
12200         if (target_to_host_timespec(&ts, arg3)) {
12201             return -TARGET_EFAULT;
12202         }
12203         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12204                                              &ts, arg4 ? &ts : NULL));
12205         /*
12206          * if the call is interrupted by a signal handler, it fails
12207          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12208          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12209          */
12210         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12211             host_to_target_timespec(arg4, &ts)) {
12212               return -TARGET_EFAULT;
12213         }
12214 
12215         return ret;
12216     }
12217 #endif
12218 #ifdef TARGET_NR_clock_nanosleep_time64
12219     case TARGET_NR_clock_nanosleep_time64:
12220     {
12221         struct timespec ts;
12222 
12223         if (target_to_host_timespec64(&ts, arg3)) {
12224             return -TARGET_EFAULT;
12225         }
12226 
12227         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12228                                              &ts, arg4 ? &ts : NULL));
12229 
12230         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12231             host_to_target_timespec64(arg4, &ts)) {
12232             return -TARGET_EFAULT;
12233         }
12234         return ret;
12235     }
12236 #endif
12237 
12238 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12239     case TARGET_NR_set_tid_address:
12240         return get_errno(set_tid_address((int *)g2h(arg1)));
12241 #endif
12242 
12243     case TARGET_NR_tkill:
12244         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12245 
12246     case TARGET_NR_tgkill:
12247         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12248                          target_to_host_signal(arg3)));
12249 
12250 #ifdef TARGET_NR_set_robust_list
12251     case TARGET_NR_set_robust_list:
12252     case TARGET_NR_get_robust_list:
12253         /* The ABI for supporting robust futexes has userspace pass
12254          * the kernel a pointer to a linked list which is updated by
12255          * userspace after the syscall; the list is walked by the kernel
12256          * when the thread exits. Since the linked list in QEMU guest
12257          * memory isn't a valid linked list for the host and we have
12258          * no way to reliably intercept the thread-death event, we can't
12259          * support these. Silently return ENOSYS so that guest userspace
12260          * falls back to a non-robust futex implementation (which should
12261          * be OK except in the corner case of the guest crashing while
12262          * holding a mutex that is shared with another process via
12263          * shared memory).
12264          */
12265         return -TARGET_ENOSYS;
12266 #endif
12267 
12268 #if defined(TARGET_NR_utimensat)
12269     case TARGET_NR_utimensat:
12270         {
12271             struct timespec *tsp, ts[2];
12272             if (!arg3) {
12273                 tsp = NULL;
12274             } else {
12275                 if (target_to_host_timespec(ts, arg3)) {
12276                     return -TARGET_EFAULT;
12277                 }
12278                 if (target_to_host_timespec(ts + 1, arg3 +
12279                                             sizeof(struct target_timespec))) {
12280                     return -TARGET_EFAULT;
12281                 }
12282                 tsp = ts;
12283             }
12284             if (!arg2)
12285                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12286             else {
12287                 if (!(p = lock_user_string(arg2))) {
12288                     return -TARGET_EFAULT;
12289                 }
12290                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12291                 unlock_user(p, arg2, 0);
12292             }
12293         }
12294         return ret;
12295 #endif
12296 #ifdef TARGET_NR_utimensat_time64
12297     case TARGET_NR_utimensat_time64:
12298         {
12299             struct timespec *tsp, ts[2];
12300             if (!arg3) {
12301                 tsp = NULL;
12302             } else {
12303                 if (target_to_host_timespec64(ts, arg3)) {
12304                     return -TARGET_EFAULT;
12305                 }
12306                 if (target_to_host_timespec64(ts + 1, arg3 +
12307                                      sizeof(struct target__kernel_timespec))) {
12308                     return -TARGET_EFAULT;
12309                 }
12310                 tsp = ts;
12311             }
12312             if (!arg2)
12313                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12314             else {
12315                 p = lock_user_string(arg2);
12316                 if (!p) {
12317                     return -TARGET_EFAULT;
12318                 }
12319                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12320                 unlock_user(p, arg2, 0);
12321             }
12322         }
12323         return ret;
12324 #endif
12325 #ifdef TARGET_NR_futex
12326     case TARGET_NR_futex:
12327         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12328 #endif
12329 #ifdef TARGET_NR_futex_time64
12330     case TARGET_NR_futex_time64:
12331         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
12332 #endif
12333 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12334     case TARGET_NR_inotify_init:
12335         ret = get_errno(sys_inotify_init());
12336         if (ret >= 0) {
12337             fd_trans_register(ret, &target_inotify_trans);
12338         }
12339         return ret;
12340 #endif
12341 #ifdef CONFIG_INOTIFY1
12342 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12343     case TARGET_NR_inotify_init1:
12344         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12345                                           fcntl_flags_tbl)));
12346         if (ret >= 0) {
12347             fd_trans_register(ret, &target_inotify_trans);
12348         }
12349         return ret;
12350 #endif
12351 #endif
12352 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12353     case TARGET_NR_inotify_add_watch:
12354         p = lock_user_string(arg2);
12355         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12356         unlock_user(p, arg2, 0);
12357         return ret;
12358 #endif
12359 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12360     case TARGET_NR_inotify_rm_watch:
12361         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12362 #endif
12363 
12364 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12365     case TARGET_NR_mq_open:
12366         {
12367             struct mq_attr posix_mq_attr;
12368             struct mq_attr *pposix_mq_attr;
12369             int host_flags;
12370 
12371             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12372             pposix_mq_attr = NULL;
12373             if (arg4) {
12374                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12375                     return -TARGET_EFAULT;
12376                 }
12377                 pposix_mq_attr = &posix_mq_attr;
12378             }
12379             p = lock_user_string(arg1 - 1);
12380             if (!p) {
12381                 return -TARGET_EFAULT;
12382             }
12383             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12384             unlock_user (p, arg1, 0);
12385         }
12386         return ret;
12387 
12388     case TARGET_NR_mq_unlink:
12389         p = lock_user_string(arg1 - 1);
12390         if (!p) {
12391             return -TARGET_EFAULT;
12392         }
12393         ret = get_errno(mq_unlink(p));
12394         unlock_user (p, arg1, 0);
12395         return ret;
12396 
12397 #ifdef TARGET_NR_mq_timedsend
12398     case TARGET_NR_mq_timedsend:
12399         {
12400             struct timespec ts;
12401 
12402             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12403             if (arg5 != 0) {
12404                 if (target_to_host_timespec(&ts, arg5)) {
12405                     return -TARGET_EFAULT;
12406                 }
12407                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12408                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12409                     return -TARGET_EFAULT;
12410                 }
12411             } else {
12412                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12413             }
12414             unlock_user (p, arg2, arg3);
12415         }
12416         return ret;
12417 #endif
12418 #ifdef TARGET_NR_mq_timedsend_time64
12419     case TARGET_NR_mq_timedsend_time64:
12420         {
12421             struct timespec ts;
12422 
12423             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12424             if (arg5 != 0) {
12425                 if (target_to_host_timespec64(&ts, arg5)) {
12426                     return -TARGET_EFAULT;
12427                 }
12428                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12429                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12430                     return -TARGET_EFAULT;
12431                 }
12432             } else {
12433                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12434             }
12435             unlock_user(p, arg2, arg3);
12436         }
12437         return ret;
12438 #endif
12439 
12440 #ifdef TARGET_NR_mq_timedreceive
12441     case TARGET_NR_mq_timedreceive:
12442         {
12443             struct timespec ts;
12444             unsigned int prio;
12445 
12446             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12447             if (arg5 != 0) {
12448                 if (target_to_host_timespec(&ts, arg5)) {
12449                     return -TARGET_EFAULT;
12450                 }
12451                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12452                                                      &prio, &ts));
12453                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12454                     return -TARGET_EFAULT;
12455                 }
12456             } else {
12457                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12458                                                      &prio, NULL));
12459             }
12460             unlock_user (p, arg2, arg3);
12461             if (arg4 != 0)
12462                 put_user_u32(prio, arg4);
12463         }
12464         return ret;
12465 #endif
12466 #ifdef TARGET_NR_mq_timedreceive_time64
12467     case TARGET_NR_mq_timedreceive_time64:
12468         {
12469             struct timespec ts;
12470             unsigned int prio;
12471 
12472             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12473             if (arg5 != 0) {
12474                 if (target_to_host_timespec64(&ts, arg5)) {
12475                     return -TARGET_EFAULT;
12476                 }
12477                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12478                                                      &prio, &ts));
12479                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12480                     return -TARGET_EFAULT;
12481                 }
12482             } else {
12483                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12484                                                      &prio, NULL));
12485             }
12486             unlock_user(p, arg2, arg3);
12487             if (arg4 != 0) {
12488                 put_user_u32(prio, arg4);
12489             }
12490         }
12491         return ret;
12492 #endif
12493 
12494     /* Not implemented for now... */
12495 /*     case TARGET_NR_mq_notify: */
12496 /*         break; */
12497 
12498     case TARGET_NR_mq_getsetattr:
12499         {
12500             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12501             ret = 0;
12502             if (arg2 != 0) {
12503                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12504                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12505                                            &posix_mq_attr_out));
12506             } else if (arg3 != 0) {
12507                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12508             }
12509             if (ret == 0 && arg3 != 0) {
12510                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12511             }
12512         }
12513         return ret;
12514 #endif
12515 
12516 #ifdef CONFIG_SPLICE
12517 #ifdef TARGET_NR_tee
12518     case TARGET_NR_tee:
12519         {
12520             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12521         }
12522         return ret;
12523 #endif
12524 #ifdef TARGET_NR_splice
12525     case TARGET_NR_splice:
12526         {
12527             loff_t loff_in, loff_out;
12528             loff_t *ploff_in = NULL, *ploff_out = NULL;
12529             if (arg2) {
12530                 if (get_user_u64(loff_in, arg2)) {
12531                     return -TARGET_EFAULT;
12532                 }
12533                 ploff_in = &loff_in;
12534             }
12535             if (arg4) {
12536                 if (get_user_u64(loff_out, arg4)) {
12537                     return -TARGET_EFAULT;
12538                 }
12539                 ploff_out = &loff_out;
12540             }
12541             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12542             if (arg2) {
12543                 if (put_user_u64(loff_in, arg2)) {
12544                     return -TARGET_EFAULT;
12545                 }
12546             }
12547             if (arg4) {
12548                 if (put_user_u64(loff_out, arg4)) {
12549                     return -TARGET_EFAULT;
12550                 }
12551             }
12552         }
12553         return ret;
12554 #endif
12555 #ifdef TARGET_NR_vmsplice
12556 	case TARGET_NR_vmsplice:
12557         {
12558             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12559             if (vec != NULL) {
12560                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12561                 unlock_iovec(vec, arg2, arg3, 0);
12562             } else {
12563                 ret = -host_to_target_errno(errno);
12564             }
12565         }
12566         return ret;
12567 #endif
12568 #endif /* CONFIG_SPLICE */
12569 #ifdef CONFIG_EVENTFD
12570 #if defined(TARGET_NR_eventfd)
12571     case TARGET_NR_eventfd:
12572         ret = get_errno(eventfd(arg1, 0));
12573         if (ret >= 0) {
12574             fd_trans_register(ret, &target_eventfd_trans);
12575         }
12576         return ret;
12577 #endif
12578 #if defined(TARGET_NR_eventfd2)
12579     case TARGET_NR_eventfd2:
12580     {
12581         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12582         if (arg2 & TARGET_O_NONBLOCK) {
12583             host_flags |= O_NONBLOCK;
12584         }
12585         if (arg2 & TARGET_O_CLOEXEC) {
12586             host_flags |= O_CLOEXEC;
12587         }
12588         ret = get_errno(eventfd(arg1, host_flags));
12589         if (ret >= 0) {
12590             fd_trans_register(ret, &target_eventfd_trans);
12591         }
12592         return ret;
12593     }
12594 #endif
12595 #endif /* CONFIG_EVENTFD  */
12596 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12597     case TARGET_NR_fallocate:
12598 #if TARGET_ABI_BITS == 32
12599         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12600                                   target_offset64(arg5, arg6)));
12601 #else
12602         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12603 #endif
12604         return ret;
12605 #endif
12606 #if defined(CONFIG_SYNC_FILE_RANGE)
12607 #if defined(TARGET_NR_sync_file_range)
12608     case TARGET_NR_sync_file_range:
12609 #if TARGET_ABI_BITS == 32
12610 #if defined(TARGET_MIPS)
12611         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12612                                         target_offset64(arg5, arg6), arg7));
12613 #else
12614         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12615                                         target_offset64(arg4, arg5), arg6));
12616 #endif /* !TARGET_MIPS */
12617 #else
12618         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12619 #endif
12620         return ret;
12621 #endif
12622 #if defined(TARGET_NR_sync_file_range2) || \
12623     defined(TARGET_NR_arm_sync_file_range)
12624 #if defined(TARGET_NR_sync_file_range2)
12625     case TARGET_NR_sync_file_range2:
12626 #endif
12627 #if defined(TARGET_NR_arm_sync_file_range)
12628     case TARGET_NR_arm_sync_file_range:
12629 #endif
12630         /* This is like sync_file_range but the arguments are reordered */
12631 #if TARGET_ABI_BITS == 32
12632         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12633                                         target_offset64(arg5, arg6), arg2));
12634 #else
12635         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12636 #endif
12637         return ret;
12638 #endif
12639 #endif
12640 #if defined(TARGET_NR_signalfd4)
12641     case TARGET_NR_signalfd4:
12642         return do_signalfd4(arg1, arg2, arg4);
12643 #endif
12644 #if defined(TARGET_NR_signalfd)
12645     case TARGET_NR_signalfd:
12646         return do_signalfd4(arg1, arg2, 0);
12647 #endif
12648 #if defined(CONFIG_EPOLL)
12649 #if defined(TARGET_NR_epoll_create)
12650     case TARGET_NR_epoll_create:
12651         return get_errno(epoll_create(arg1));
12652 #endif
12653 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12654     case TARGET_NR_epoll_create1:
12655         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12656 #endif
12657 #if defined(TARGET_NR_epoll_ctl)
12658     case TARGET_NR_epoll_ctl:
12659     {
12660         struct epoll_event ep;
12661         struct epoll_event *epp = 0;
12662         if (arg4) {
12663             if (arg2 != EPOLL_CTL_DEL) {
12664                 struct target_epoll_event *target_ep;
12665                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12666                     return -TARGET_EFAULT;
12667                 }
12668                 ep.events = tswap32(target_ep->events);
12669                 /*
12670                  * The epoll_data_t union is just opaque data to the kernel,
12671                  * so we transfer all 64 bits across and need not worry what
12672                  * actual data type it is.
12673                  */
12674                 ep.data.u64 = tswap64(target_ep->data.u64);
12675                 unlock_user_struct(target_ep, arg4, 0);
12676             }
12677             /*
12678              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12679              * non-null pointer, even though this argument is ignored.
12680              *
12681              */
12682             epp = &ep;
12683         }
12684         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12685     }
12686 #endif
12687 
12688 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12689 #if defined(TARGET_NR_epoll_wait)
12690     case TARGET_NR_epoll_wait:
12691 #endif
12692 #if defined(TARGET_NR_epoll_pwait)
12693     case TARGET_NR_epoll_pwait:
12694 #endif
12695     {
12696         struct target_epoll_event *target_ep;
12697         struct epoll_event *ep;
12698         int epfd = arg1;
12699         int maxevents = arg3;
12700         int timeout = arg4;
12701 
12702         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12703             return -TARGET_EINVAL;
12704         }
12705 
12706         target_ep = lock_user(VERIFY_WRITE, arg2,
12707                               maxevents * sizeof(struct target_epoll_event), 1);
12708         if (!target_ep) {
12709             return -TARGET_EFAULT;
12710         }
12711 
12712         ep = g_try_new(struct epoll_event, maxevents);
12713         if (!ep) {
12714             unlock_user(target_ep, arg2, 0);
12715             return -TARGET_ENOMEM;
12716         }
12717 
12718         switch (num) {
12719 #if defined(TARGET_NR_epoll_pwait)
12720         case TARGET_NR_epoll_pwait:
12721         {
12722             target_sigset_t *target_set;
12723             sigset_t _set, *set = &_set;
12724 
12725             if (arg5) {
12726                 if (arg6 != sizeof(target_sigset_t)) {
12727                     ret = -TARGET_EINVAL;
12728                     break;
12729                 }
12730 
12731                 target_set = lock_user(VERIFY_READ, arg5,
12732                                        sizeof(target_sigset_t), 1);
12733                 if (!target_set) {
12734                     ret = -TARGET_EFAULT;
12735                     break;
12736                 }
12737                 target_to_host_sigset(set, target_set);
12738                 unlock_user(target_set, arg5, 0);
12739             } else {
12740                 set = NULL;
12741             }
12742 
12743             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12744                                              set, SIGSET_T_SIZE));
12745             break;
12746         }
12747 #endif
12748 #if defined(TARGET_NR_epoll_wait)
12749         case TARGET_NR_epoll_wait:
12750             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12751                                              NULL, 0));
12752             break;
12753 #endif
12754         default:
12755             ret = -TARGET_ENOSYS;
12756         }
12757         if (!is_error(ret)) {
12758             int i;
12759             for (i = 0; i < ret; i++) {
12760                 target_ep[i].events = tswap32(ep[i].events);
12761                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12762             }
12763             unlock_user(target_ep, arg2,
12764                         ret * sizeof(struct target_epoll_event));
12765         } else {
12766             unlock_user(target_ep, arg2, 0);
12767         }
12768         g_free(ep);
12769         return ret;
12770     }
12771 #endif
12772 #endif
12773 #ifdef TARGET_NR_prlimit64
12774     case TARGET_NR_prlimit64:
12775     {
12776         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12777         struct target_rlimit64 *target_rnew, *target_rold;
12778         struct host_rlimit64 rnew, rold, *rnewp = 0;
12779         int resource = target_to_host_resource(arg2);
12780 
12781         if (arg3 && (resource != RLIMIT_AS &&
12782                      resource != RLIMIT_DATA &&
12783                      resource != RLIMIT_STACK)) {
12784             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12785                 return -TARGET_EFAULT;
12786             }
12787             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12788             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12789             unlock_user_struct(target_rnew, arg3, 0);
12790             rnewp = &rnew;
12791         }
12792 
12793         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12794         if (!is_error(ret) && arg4) {
12795             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12796                 return -TARGET_EFAULT;
12797             }
12798             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12799             target_rold->rlim_max = tswap64(rold.rlim_max);
12800             unlock_user_struct(target_rold, arg4, 1);
12801         }
12802         return ret;
12803     }
12804 #endif
12805 #ifdef TARGET_NR_gethostname
12806     case TARGET_NR_gethostname:
12807     {
12808         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12809         if (name) {
12810             ret = get_errno(gethostname(name, arg2));
12811             unlock_user(name, arg1, arg2);
12812         } else {
12813             ret = -TARGET_EFAULT;
12814         }
12815         return ret;
12816     }
12817 #endif
12818 #ifdef TARGET_NR_atomic_cmpxchg_32
12819     case TARGET_NR_atomic_cmpxchg_32:
12820     {
12821         /* should use start_exclusive from main.c */
12822         abi_ulong mem_value;
12823         if (get_user_u32(mem_value, arg6)) {
12824             target_siginfo_t info;
12825             info.si_signo = SIGSEGV;
12826             info.si_errno = 0;
12827             info.si_code = TARGET_SEGV_MAPERR;
12828             info._sifields._sigfault._addr = arg6;
12829             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12830                          QEMU_SI_FAULT, &info);
12831             ret = 0xdeadbeef;
12832 
12833         }
12834         if (mem_value == arg2)
12835             put_user_u32(arg1, arg6);
12836         return mem_value;
12837     }
12838 #endif
12839 #ifdef TARGET_NR_atomic_barrier
12840     case TARGET_NR_atomic_barrier:
12841         /* Like the kernel implementation and the
12842            qemu arm barrier, no-op this? */
12843         return 0;
12844 #endif
12845 
12846 #ifdef TARGET_NR_timer_create
12847     case TARGET_NR_timer_create:
12848     {
12849         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12850 
12851         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12852 
12853         int clkid = arg1;
12854         int timer_index = next_free_host_timer();
12855 
12856         if (timer_index < 0) {
12857             ret = -TARGET_EAGAIN;
12858         } else {
12859             timer_t *phtimer = g_posix_timers  + timer_index;
12860 
12861             if (arg2) {
12862                 phost_sevp = &host_sevp;
12863                 ret = target_to_host_sigevent(phost_sevp, arg2);
12864                 if (ret != 0) {
12865                     return ret;
12866                 }
12867             }
12868 
12869             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12870             if (ret) {
12871                 phtimer = NULL;
12872             } else {
12873                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12874                     return -TARGET_EFAULT;
12875                 }
12876             }
12877         }
12878         return ret;
12879     }
12880 #endif
12881 
12882 #ifdef TARGET_NR_timer_settime
12883     case TARGET_NR_timer_settime:
12884     {
12885         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12886          * struct itimerspec * old_value */
12887         target_timer_t timerid = get_timer_id(arg1);
12888 
12889         if (timerid < 0) {
12890             ret = timerid;
12891         } else if (arg3 == 0) {
12892             ret = -TARGET_EINVAL;
12893         } else {
12894             timer_t htimer = g_posix_timers[timerid];
12895             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12896 
12897             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12898                 return -TARGET_EFAULT;
12899             }
12900             ret = get_errno(
12901                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12902             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12903                 return -TARGET_EFAULT;
12904             }
12905         }
12906         return ret;
12907     }
12908 #endif
12909 
12910 #ifdef TARGET_NR_timer_settime64
12911     case TARGET_NR_timer_settime64:
12912     {
12913         target_timer_t timerid = get_timer_id(arg1);
12914 
12915         if (timerid < 0) {
12916             ret = timerid;
12917         } else if (arg3 == 0) {
12918             ret = -TARGET_EINVAL;
12919         } else {
12920             timer_t htimer = g_posix_timers[timerid];
12921             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12922 
12923             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12924                 return -TARGET_EFAULT;
12925             }
12926             ret = get_errno(
12927                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12928             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12929                 return -TARGET_EFAULT;
12930             }
12931         }
12932         return ret;
12933     }
12934 #endif
12935 
12936 #ifdef TARGET_NR_timer_gettime
12937     case TARGET_NR_timer_gettime:
12938     {
12939         /* args: timer_t timerid, struct itimerspec *curr_value */
12940         target_timer_t timerid = get_timer_id(arg1);
12941 
12942         if (timerid < 0) {
12943             ret = timerid;
12944         } else if (!arg2) {
12945             ret = -TARGET_EFAULT;
12946         } else {
12947             timer_t htimer = g_posix_timers[timerid];
12948             struct itimerspec hspec;
12949             ret = get_errno(timer_gettime(htimer, &hspec));
12950 
12951             if (host_to_target_itimerspec(arg2, &hspec)) {
12952                 ret = -TARGET_EFAULT;
12953             }
12954         }
12955         return ret;
12956     }
12957 #endif
12958 
12959 #ifdef TARGET_NR_timer_gettime64
12960     case TARGET_NR_timer_gettime64:
12961     {
12962         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12963         target_timer_t timerid = get_timer_id(arg1);
12964 
12965         if (timerid < 0) {
12966             ret = timerid;
12967         } else if (!arg2) {
12968             ret = -TARGET_EFAULT;
12969         } else {
12970             timer_t htimer = g_posix_timers[timerid];
12971             struct itimerspec hspec;
12972             ret = get_errno(timer_gettime(htimer, &hspec));
12973 
12974             if (host_to_target_itimerspec64(arg2, &hspec)) {
12975                 ret = -TARGET_EFAULT;
12976             }
12977         }
12978         return ret;
12979     }
12980 #endif
12981 
12982 #ifdef TARGET_NR_timer_getoverrun
12983     case TARGET_NR_timer_getoverrun:
12984     {
12985         /* args: timer_t timerid */
12986         target_timer_t timerid = get_timer_id(arg1);
12987 
12988         if (timerid < 0) {
12989             ret = timerid;
12990         } else {
12991             timer_t htimer = g_posix_timers[timerid];
12992             ret = get_errno(timer_getoverrun(htimer));
12993         }
12994         return ret;
12995     }
12996 #endif
12997 
12998 #ifdef TARGET_NR_timer_delete
12999     case TARGET_NR_timer_delete:
13000     {
13001         /* args: timer_t timerid */
13002         target_timer_t timerid = get_timer_id(arg1);
13003 
13004         if (timerid < 0) {
13005             ret = timerid;
13006         } else {
13007             timer_t htimer = g_posix_timers[timerid];
13008             ret = get_errno(timer_delete(htimer));
13009             g_posix_timers[timerid] = 0;
13010         }
13011         return ret;
13012     }
13013 #endif
13014 
13015 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13016     case TARGET_NR_timerfd_create:
13017         return get_errno(timerfd_create(arg1,
13018                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13019 #endif
13020 
13021 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13022     case TARGET_NR_timerfd_gettime:
13023         {
13024             struct itimerspec its_curr;
13025 
13026             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13027 
13028             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13029                 return -TARGET_EFAULT;
13030             }
13031         }
13032         return ret;
13033 #endif
13034 
13035 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13036     case TARGET_NR_timerfd_gettime64:
13037         {
13038             struct itimerspec its_curr;
13039 
13040             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13041 
13042             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13043                 return -TARGET_EFAULT;
13044             }
13045         }
13046         return ret;
13047 #endif
13048 
13049 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13050     case TARGET_NR_timerfd_settime:
13051         {
13052             struct itimerspec its_new, its_old, *p_new;
13053 
13054             if (arg3) {
13055                 if (target_to_host_itimerspec(&its_new, arg3)) {
13056                     return -TARGET_EFAULT;
13057                 }
13058                 p_new = &its_new;
13059             } else {
13060                 p_new = NULL;
13061             }
13062 
13063             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13064 
13065             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13066                 return -TARGET_EFAULT;
13067             }
13068         }
13069         return ret;
13070 #endif
13071 
13072 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13073     case TARGET_NR_timerfd_settime64:
13074         {
13075             struct itimerspec its_new, its_old, *p_new;
13076 
13077             if (arg3) {
13078                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13079                     return -TARGET_EFAULT;
13080                 }
13081                 p_new = &its_new;
13082             } else {
13083                 p_new = NULL;
13084             }
13085 
13086             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13087 
13088             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13089                 return -TARGET_EFAULT;
13090             }
13091         }
13092         return ret;
13093 #endif
13094 
13095 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13096     case TARGET_NR_ioprio_get:
13097         return get_errno(ioprio_get(arg1, arg2));
13098 #endif
13099 
13100 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13101     case TARGET_NR_ioprio_set:
13102         return get_errno(ioprio_set(arg1, arg2, arg3));
13103 #endif
13104 
13105 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13106     case TARGET_NR_setns:
13107         return get_errno(setns(arg1, arg2));
13108 #endif
13109 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13110     case TARGET_NR_unshare:
13111         return get_errno(unshare(arg1));
13112 #endif
13113 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13114     case TARGET_NR_kcmp:
13115         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13116 #endif
13117 #ifdef TARGET_NR_swapcontext
13118     case TARGET_NR_swapcontext:
13119         /* PowerPC specific.  */
13120         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13121 #endif
13122 #ifdef TARGET_NR_memfd_create
13123     case TARGET_NR_memfd_create:
13124         p = lock_user_string(arg1);
13125         if (!p) {
13126             return -TARGET_EFAULT;
13127         }
13128         ret = get_errno(memfd_create(p, arg2));
13129         fd_trans_unregister(ret);
13130         unlock_user(p, arg1, 0);
13131         return ret;
13132 #endif
13133 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13134     case TARGET_NR_membarrier:
13135         return get_errno(membarrier(arg1, arg2));
13136 #endif
13137 
13138 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13139     case TARGET_NR_copy_file_range:
13140         {
13141             loff_t inoff, outoff;
13142             loff_t *pinoff = NULL, *poutoff = NULL;
13143 
13144             if (arg2) {
13145                 if (get_user_u64(inoff, arg2)) {
13146                     return -TARGET_EFAULT;
13147                 }
13148                 pinoff = &inoff;
13149             }
13150             if (arg4) {
13151                 if (get_user_u64(outoff, arg4)) {
13152                     return -TARGET_EFAULT;
13153                 }
13154                 poutoff = &outoff;
13155             }
13156             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13157                                                  arg5, arg6));
13158             if (!is_error(ret) && ret > 0) {
13159                 if (arg2) {
13160                     if (put_user_u64(inoff, arg2)) {
13161                         return -TARGET_EFAULT;
13162                     }
13163                 }
13164                 if (arg4) {
13165                     if (put_user_u64(outoff, arg4)) {
13166                         return -TARGET_EFAULT;
13167                     }
13168                 }
13169             }
13170         }
13171         return ret;
13172 #endif
13173 
13174     default:
13175         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13176         return -TARGET_ENOSYS;
13177     }
13178     return ret;
13179 }
13180 
13181 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13182                     abi_long arg2, abi_long arg3, abi_long arg4,
13183                     abi_long arg5, abi_long arg6, abi_long arg7,
13184                     abi_long arg8)
13185 {
13186     CPUState *cpu = env_cpu(cpu_env);
13187     abi_long ret;
13188 
13189 #ifdef DEBUG_ERESTARTSYS
13190     /* Debug-only code for exercising the syscall-restart code paths
13191      * in the per-architecture cpu main loops: restart every syscall
13192      * the guest makes once before letting it through.
13193      */
13194     {
13195         static bool flag;
13196         flag = !flag;
13197         if (flag) {
13198             return -TARGET_ERESTARTSYS;
13199         }
13200     }
13201 #endif
13202 
13203     record_syscall_start(cpu, num, arg1,
13204                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13205 
13206     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13207         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13208     }
13209 
13210     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13211                       arg5, arg6, arg7, arg8);
13212 
13213     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13214         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13215                           arg3, arg4, arg5, arg6);
13216     }
13217 
13218     record_syscall_return(cpu, num, ret);
13219     return ret;
13220 }
13221