xref: /openbmc/qemu/linux-user/syscall.c (revision 22db1213)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
135 #include "tcg/tcg.h"
136 
137 #ifndef CLONE_IO
138 #define CLONE_IO                0x80000000      /* Clone io context */
139 #endif
140 
141 /* We can't directly call the host clone syscall, because this will
142  * badly confuse libc (breaking mutexes, for example). So we must
143  * divide clone flags into:
144  *  * flag combinations that look like pthread_create()
145  *  * flag combinations that look like fork()
146  *  * flags we can implement within QEMU itself
147  *  * flags we can't support and will return an error for
148  */
149 /* For thread creation, all these flags must be present; for
150  * fork, none must be present.
151  */
152 #define CLONE_THREAD_FLAGS                              \
153     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
154      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
155 
156 /* These flags are ignored:
157  * CLONE_DETACHED is now ignored by the kernel;
158  * CLONE_IO is just an optimisation hint to the I/O scheduler
159  */
160 #define CLONE_IGNORED_FLAGS                     \
161     (CLONE_DETACHED | CLONE_IO)
162 
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS               \
165     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
166      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
167 
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
170     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
171      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
172 
173 #define CLONE_INVALID_FORK_FLAGS                                        \
174     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
175 
176 #define CLONE_INVALID_THREAD_FLAGS                                      \
177     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
178        CLONE_IGNORED_FLAGS))
179 
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181  * have almost all been allocated. We cannot support any of
182  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184  * The checks against the invalid thread masks above will catch these.
185  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
186  */
187 
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189  * once. This exercises the codepaths for restart.
190  */
191 //#define DEBUG_ERESTARTSYS
192 
193 //#include <linux/msdos_fs.h>
194 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
195 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
196 
197 #undef _syscall0
198 #undef _syscall1
199 #undef _syscall2
200 #undef _syscall3
201 #undef _syscall4
202 #undef _syscall5
203 #undef _syscall6
204 
205 #define _syscall0(type,name)		\
206 static type name (void)			\
207 {					\
208 	return syscall(__NR_##name);	\
209 }
210 
211 #define _syscall1(type,name,type1,arg1)		\
212 static type name (type1 arg1)			\
213 {						\
214 	return syscall(__NR_##name, arg1);	\
215 }
216 
217 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
218 static type name (type1 arg1,type2 arg2)		\
219 {							\
220 	return syscall(__NR_##name, arg1, arg2);	\
221 }
222 
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
224 static type name (type1 arg1,type2 arg2,type3 arg3)		\
225 {								\
226 	return syscall(__NR_##name, arg1, arg2, arg3);		\
227 }
228 
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
231 {										\
232 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
233 }
234 
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236 		  type5,arg5)							\
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
238 {										\
239 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
240 }
241 
242 
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
244 		  type5,arg5,type6,arg6)					\
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
246                   type6 arg6)							\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
249 }
250 
251 
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
262 #endif
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
265 #endif
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
270 
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
273 #endif
274 
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
278 #endif
279 
280 #define __NR_sys_gettid __NR_gettid
281 _syscall0(int, sys_gettid)
282 
283 /* For the 64-bit guest on 32-bit host case we must emulate
284  * getdents using getdents64, because otherwise the host
285  * might hand us back more dirent records than we can fit
286  * into the guest buffer after structure format conversion.
287  * Otherwise we emulate getdents with getdents if the host has it.
288  */
289 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
290 #define EMULATE_GETDENTS_WITH_GETDENTS
291 #endif
292 
293 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
294 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
295 #endif
296 #if (defined(TARGET_NR_getdents) && \
297       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
298     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
299 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
300 #endif
301 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
302 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
303           loff_t *, res, uint, wh);
304 #endif
305 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
306 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
307           siginfo_t *, uinfo)
308 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
309 #ifdef __NR_exit_group
310 _syscall1(int,exit_group,int,error_code)
311 #endif
312 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
313 _syscall1(int,set_tid_address,int *,tidptr)
314 #endif
315 #if defined(__NR_futex)
316 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
317           const struct timespec *,timeout,int *,uaddr2,int,val3)
318 #endif
319 #if defined(__NR_futex_time64)
320 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
321           const struct timespec *,timeout,int *,uaddr2,int,val3)
322 #endif
323 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
324 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
325           unsigned long *, user_mask_ptr);
326 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
327 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
328           unsigned long *, user_mask_ptr);
329 #define __NR_sys_getcpu __NR_getcpu
330 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
331 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
332           void *, arg);
333 _syscall2(int, capget, struct __user_cap_header_struct *, header,
334           struct __user_cap_data_struct *, data);
335 _syscall2(int, capset, struct __user_cap_header_struct *, header,
336           struct __user_cap_data_struct *, data);
337 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
338 _syscall2(int, ioprio_get, int, which, int, who)
339 #endif
340 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
341 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
342 #endif
343 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
344 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
345 #endif
346 
347 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
348 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
349           unsigned long, idx1, unsigned long, idx2)
350 #endif
351 
352 /*
353  * It is assumed that struct statx is architecture independent.
354  */
355 #if defined(TARGET_NR_statx) && defined(__NR_statx)
356 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
357           unsigned int, mask, struct target_statx *, statxbuf)
358 #endif
359 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
360 _syscall2(int, membarrier, int, cmd, int, flags)
361 #endif
362 
363 static bitmask_transtbl fcntl_flags_tbl[] = {
364   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
365   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
366   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
367   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
368   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
369   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
370   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
371   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
372   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
373   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
374   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
375   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
376   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
377 #if defined(O_DIRECT)
378   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
379 #endif
380 #if defined(O_NOATIME)
381   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
382 #endif
383 #if defined(O_CLOEXEC)
384   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
385 #endif
386 #if defined(O_PATH)
387   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
388 #endif
389 #if defined(O_TMPFILE)
390   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
391 #endif
392   /* Don't terminate the list prematurely on 64-bit host+guest.  */
393 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
394   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
395 #endif
396   { 0, 0, 0, 0 }
397 };
398 
399 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
400 
401 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
402 #if defined(__NR_utimensat)
403 #define __NR_sys_utimensat __NR_utimensat
404 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
405           const struct timespec *,tsp,int,flags)
406 #else
407 static int sys_utimensat(int dirfd, const char *pathname,
408                          const struct timespec times[2], int flags)
409 {
410     errno = ENOSYS;
411     return -1;
412 }
413 #endif
414 #endif /* TARGET_NR_utimensat */
415 
416 #ifdef TARGET_NR_renameat2
417 #if defined(__NR_renameat2)
418 #define __NR_sys_renameat2 __NR_renameat2
419 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
420           const char *, new, unsigned int, flags)
421 #else
422 static int sys_renameat2(int oldfd, const char *old,
423                          int newfd, const char *new, int flags)
424 {
425     if (flags == 0) {
426         return renameat(oldfd, old, newfd, new);
427     }
428     errno = ENOSYS;
429     return -1;
430 }
431 #endif
432 #endif /* TARGET_NR_renameat2 */
433 
434 #ifdef CONFIG_INOTIFY
435 #include <sys/inotify.h>
436 
437 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
438 static int sys_inotify_init(void)
439 {
440   return (inotify_init());
441 }
442 #endif
443 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
444 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
445 {
446   return (inotify_add_watch(fd, pathname, mask));
447 }
448 #endif
449 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
450 static int sys_inotify_rm_watch(int fd, int32_t wd)
451 {
452   return (inotify_rm_watch(fd, wd));
453 }
454 #endif
455 #ifdef CONFIG_INOTIFY1
456 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
457 static int sys_inotify_init1(int flags)
458 {
459   return (inotify_init1(flags));
460 }
461 #endif
462 #endif
463 #else
464 /* Userspace can usually survive runtime without inotify */
465 #undef TARGET_NR_inotify_init
466 #undef TARGET_NR_inotify_init1
467 #undef TARGET_NR_inotify_add_watch
468 #undef TARGET_NR_inotify_rm_watch
469 #endif /* CONFIG_INOTIFY  */
470 
471 #if defined(TARGET_NR_prlimit64)
472 #ifndef __NR_prlimit64
473 # define __NR_prlimit64 -1
474 #endif
475 #define __NR_sys_prlimit64 __NR_prlimit64
476 /* The glibc rlimit structure may not be that used by the underlying syscall */
477 struct host_rlimit64 {
478     uint64_t rlim_cur;
479     uint64_t rlim_max;
480 };
481 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
482           const struct host_rlimit64 *, new_limit,
483           struct host_rlimit64 *, old_limit)
484 #endif
485 
486 
487 #if defined(TARGET_NR_timer_create)
488 /* Maximum of 32 active POSIX timers allowed at any one time. */
489 static timer_t g_posix_timers[32] = { 0, } ;
490 
491 static inline int next_free_host_timer(void)
492 {
493     int k ;
494     /* FIXME: Does finding the next free slot require a lock? */
495     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
496         if (g_posix_timers[k] == 0) {
497             g_posix_timers[k] = (timer_t) 1;
498             return k;
499         }
500     }
501     return -1;
502 }
503 #endif
504 
505 #define ERRNO_TABLE_SIZE 1200
506 
507 /* target_to_host_errno_table[] is initialized from
508  * host_to_target_errno_table[] in syscall_init(). */
509 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
510 };
511 
512 /*
513  * This list is the union of errno values overridden in asm-<arch>/errno.h
514  * minus the errnos that are not actually generic to all archs.
515  */
516 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
517     [EAGAIN]		= TARGET_EAGAIN,
518     [EIDRM]		= TARGET_EIDRM,
519     [ECHRNG]		= TARGET_ECHRNG,
520     [EL2NSYNC]		= TARGET_EL2NSYNC,
521     [EL3HLT]		= TARGET_EL3HLT,
522     [EL3RST]		= TARGET_EL3RST,
523     [ELNRNG]		= TARGET_ELNRNG,
524     [EUNATCH]		= TARGET_EUNATCH,
525     [ENOCSI]		= TARGET_ENOCSI,
526     [EL2HLT]		= TARGET_EL2HLT,
527     [EDEADLK]		= TARGET_EDEADLK,
528     [ENOLCK]		= TARGET_ENOLCK,
529     [EBADE]		= TARGET_EBADE,
530     [EBADR]		= TARGET_EBADR,
531     [EXFULL]		= TARGET_EXFULL,
532     [ENOANO]		= TARGET_ENOANO,
533     [EBADRQC]		= TARGET_EBADRQC,
534     [EBADSLT]		= TARGET_EBADSLT,
535     [EBFONT]		= TARGET_EBFONT,
536     [ENOSTR]		= TARGET_ENOSTR,
537     [ENODATA]		= TARGET_ENODATA,
538     [ETIME]		= TARGET_ETIME,
539     [ENOSR]		= TARGET_ENOSR,
540     [ENONET]		= TARGET_ENONET,
541     [ENOPKG]		= TARGET_ENOPKG,
542     [EREMOTE]		= TARGET_EREMOTE,
543     [ENOLINK]		= TARGET_ENOLINK,
544     [EADV]		= TARGET_EADV,
545     [ESRMNT]		= TARGET_ESRMNT,
546     [ECOMM]		= TARGET_ECOMM,
547     [EPROTO]		= TARGET_EPROTO,
548     [EDOTDOT]		= TARGET_EDOTDOT,
549     [EMULTIHOP]		= TARGET_EMULTIHOP,
550     [EBADMSG]		= TARGET_EBADMSG,
551     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
552     [EOVERFLOW]		= TARGET_EOVERFLOW,
553     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
554     [EBADFD]		= TARGET_EBADFD,
555     [EREMCHG]		= TARGET_EREMCHG,
556     [ELIBACC]		= TARGET_ELIBACC,
557     [ELIBBAD]		= TARGET_ELIBBAD,
558     [ELIBSCN]		= TARGET_ELIBSCN,
559     [ELIBMAX]		= TARGET_ELIBMAX,
560     [ELIBEXEC]		= TARGET_ELIBEXEC,
561     [EILSEQ]		= TARGET_EILSEQ,
562     [ENOSYS]		= TARGET_ENOSYS,
563     [ELOOP]		= TARGET_ELOOP,
564     [ERESTART]		= TARGET_ERESTART,
565     [ESTRPIPE]		= TARGET_ESTRPIPE,
566     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
567     [EUSERS]		= TARGET_EUSERS,
568     [ENOTSOCK]		= TARGET_ENOTSOCK,
569     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
570     [EMSGSIZE]		= TARGET_EMSGSIZE,
571     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
572     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
573     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
574     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
575     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
576     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
577     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
578     [EADDRINUSE]	= TARGET_EADDRINUSE,
579     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
580     [ENETDOWN]		= TARGET_ENETDOWN,
581     [ENETUNREACH]	= TARGET_ENETUNREACH,
582     [ENETRESET]		= TARGET_ENETRESET,
583     [ECONNABORTED]	= TARGET_ECONNABORTED,
584     [ECONNRESET]	= TARGET_ECONNRESET,
585     [ENOBUFS]		= TARGET_ENOBUFS,
586     [EISCONN]		= TARGET_EISCONN,
587     [ENOTCONN]		= TARGET_ENOTCONN,
588     [EUCLEAN]		= TARGET_EUCLEAN,
589     [ENOTNAM]		= TARGET_ENOTNAM,
590     [ENAVAIL]		= TARGET_ENAVAIL,
591     [EISNAM]		= TARGET_EISNAM,
592     [EREMOTEIO]		= TARGET_EREMOTEIO,
593     [EDQUOT]            = TARGET_EDQUOT,
594     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
595     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
596     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
597     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
598     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
599     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
600     [EALREADY]		= TARGET_EALREADY,
601     [EINPROGRESS]	= TARGET_EINPROGRESS,
602     [ESTALE]		= TARGET_ESTALE,
603     [ECANCELED]		= TARGET_ECANCELED,
604     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
605     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
606 #ifdef ENOKEY
607     [ENOKEY]		= TARGET_ENOKEY,
608 #endif
609 #ifdef EKEYEXPIRED
610     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
611 #endif
612 #ifdef EKEYREVOKED
613     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
614 #endif
615 #ifdef EKEYREJECTED
616     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
617 #endif
618 #ifdef EOWNERDEAD
619     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
620 #endif
621 #ifdef ENOTRECOVERABLE
622     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
623 #endif
624 #ifdef ENOMSG
625     [ENOMSG]            = TARGET_ENOMSG,
626 #endif
627 #ifdef ERKFILL
628     [ERFKILL]           = TARGET_ERFKILL,
629 #endif
630 #ifdef EHWPOISON
631     [EHWPOISON]         = TARGET_EHWPOISON,
632 #endif
633 };
634 
635 static inline int host_to_target_errno(int err)
636 {
637     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
638         host_to_target_errno_table[err]) {
639         return host_to_target_errno_table[err];
640     }
641     return err;
642 }
643 
644 static inline int target_to_host_errno(int err)
645 {
646     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
647         target_to_host_errno_table[err]) {
648         return target_to_host_errno_table[err];
649     }
650     return err;
651 }
652 
653 static inline abi_long get_errno(abi_long ret)
654 {
655     if (ret == -1)
656         return -host_to_target_errno(errno);
657     else
658         return ret;
659 }
660 
661 const char *target_strerror(int err)
662 {
663     if (err == TARGET_ERESTARTSYS) {
664         return "To be restarted";
665     }
666     if (err == TARGET_QEMU_ESIGRETURN) {
667         return "Successful exit from sigreturn";
668     }
669 
670     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
671         return NULL;
672     }
673     return strerror(target_to_host_errno(err));
674 }
675 
676 #define safe_syscall0(type, name) \
677 static type safe_##name(void) \
678 { \
679     return safe_syscall(__NR_##name); \
680 }
681 
682 #define safe_syscall1(type, name, type1, arg1) \
683 static type safe_##name(type1 arg1) \
684 { \
685     return safe_syscall(__NR_##name, arg1); \
686 }
687 
688 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
689 static type safe_##name(type1 arg1, type2 arg2) \
690 { \
691     return safe_syscall(__NR_##name, arg1, arg2); \
692 }
693 
694 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
696 { \
697     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
698 }
699 
700 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
701     type4, arg4) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
703 { \
704     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
705 }
706 
707 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
708     type4, arg4, type5, arg5) \
709 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
710     type5 arg5) \
711 { \
712     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
713 }
714 
715 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
716     type4, arg4, type5, arg5, type6, arg6) \
717 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
718     type5 arg5, type6 arg6) \
719 { \
720     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
721 }
722 
723 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
724 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
725 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
726               int, flags, mode_t, mode)
727 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
728 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
729               struct rusage *, rusage)
730 #endif
731 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
732               int, options, struct rusage *, rusage)
733 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
734 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
735     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
736 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
737               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
738 #endif
739 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
740 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
741               struct timespec *, tsp, const sigset_t *, sigmask,
742               size_t, sigsetsize)
743 #endif
744 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
745               int, maxevents, int, timeout, const sigset_t *, sigmask,
746               size_t, sigsetsize)
747 #if defined(__NR_futex)
748 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
749               const struct timespec *,timeout,int *,uaddr2,int,val3)
750 #endif
751 #if defined(__NR_futex_time64)
752 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
753               const struct timespec *,timeout,int *,uaddr2,int,val3)
754 #endif
755 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
756 safe_syscall2(int, kill, pid_t, pid, int, sig)
757 safe_syscall2(int, tkill, int, tid, int, sig)
758 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
759 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
760 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
761 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
762               unsigned long, pos_l, unsigned long, pos_h)
763 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
764               unsigned long, pos_l, unsigned long, pos_h)
765 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
766               socklen_t, addrlen)
767 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
768               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
769 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
770               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
771 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
772 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
773 safe_syscall2(int, flock, int, fd, int, operation)
774 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
775 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
776               const struct timespec *, uts, size_t, sigsetsize)
777 #endif
778 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
779               int, flags)
780 #if defined(TARGET_NR_nanosleep)
781 safe_syscall2(int, nanosleep, const struct timespec *, req,
782               struct timespec *, rem)
783 #endif
784 #if defined(TARGET_NR_clock_nanosleep) || \
785     defined(TARGET_NR_clock_nanosleep_time64)
786 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
787               const struct timespec *, req, struct timespec *, rem)
788 #endif
789 #ifdef __NR_ipc
790 #ifdef __s390x__
791 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
792               void *, ptr)
793 #else
794 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
795               void *, ptr, long, fifth)
796 #endif
797 #endif
798 #ifdef __NR_msgsnd
799 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
800               int, flags)
801 #endif
802 #ifdef __NR_msgrcv
803 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
804               long, msgtype, int, flags)
805 #endif
806 #ifdef __NR_semtimedop
807 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
808               unsigned, nsops, const struct timespec *, timeout)
809 #endif
810 #if defined(TARGET_NR_mq_timedsend) || \
811     defined(TARGET_NR_mq_timedsend_time64)
812 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
813               size_t, len, unsigned, prio, const struct timespec *, timeout)
814 #endif
815 #if defined(TARGET_NR_mq_timedreceive) || \
816     defined(TARGET_NR_mq_timedreceive_time64)
817 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
818               size_t, len, unsigned *, prio, const struct timespec *, timeout)
819 #endif
820 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
821 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
822               int, outfd, loff_t *, poutoff, size_t, length,
823               unsigned int, flags)
824 #endif
825 
826 /* We do ioctl like this rather than via safe_syscall3 to preserve the
827  * "third argument might be integer or pointer or not present" behaviour of
828  * the libc function.
829  */
830 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
831 /* Similarly for fcntl. Note that callers must always:
832  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
833  *  use the flock64 struct rather than unsuffixed flock
834  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
835  */
836 #ifdef __NR_fcntl64
837 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
838 #else
839 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
840 #endif
841 
842 static inline int host_to_target_sock_type(int host_type)
843 {
844     int target_type;
845 
846     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
847     case SOCK_DGRAM:
848         target_type = TARGET_SOCK_DGRAM;
849         break;
850     case SOCK_STREAM:
851         target_type = TARGET_SOCK_STREAM;
852         break;
853     default:
854         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
855         break;
856     }
857 
858 #if defined(SOCK_CLOEXEC)
859     if (host_type & SOCK_CLOEXEC) {
860         target_type |= TARGET_SOCK_CLOEXEC;
861     }
862 #endif
863 
864 #if defined(SOCK_NONBLOCK)
865     if (host_type & SOCK_NONBLOCK) {
866         target_type |= TARGET_SOCK_NONBLOCK;
867     }
868 #endif
869 
870     return target_type;
871 }
872 
873 static abi_ulong target_brk;
874 static abi_ulong target_original_brk;
875 static abi_ulong brk_page;
876 
877 void target_set_brk(abi_ulong new_brk)
878 {
879     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
880     brk_page = HOST_PAGE_ALIGN(target_brk);
881 }
882 
883 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
884 #define DEBUGF_BRK(message, args...)
885 
886 /* do_brk() must return target values and target errnos. */
887 abi_long do_brk(abi_ulong new_brk)
888 {
889     abi_long mapped_addr;
890     abi_ulong new_alloc_size;
891 
892     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
893 
894     if (!new_brk) {
895         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
896         return target_brk;
897     }
898     if (new_brk < target_original_brk) {
899         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
900                    target_brk);
901         return target_brk;
902     }
903 
904     /* If the new brk is less than the highest page reserved to the
905      * target heap allocation, set it and we're almost done...  */
906     if (new_brk <= brk_page) {
907         /* Heap contents are initialized to zero, as for anonymous
908          * mapped pages.  */
909         if (new_brk > target_brk) {
910             memset(g2h(target_brk), 0, new_brk - target_brk);
911         }
912 	target_brk = new_brk;
913         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
914 	return target_brk;
915     }
916 
917     /* We need to allocate more memory after the brk... Note that
918      * we don't use MAP_FIXED because that will map over the top of
919      * any existing mapping (like the one with the host libc or qemu
920      * itself); instead we treat "mapped but at wrong address" as
921      * a failure and unmap again.
922      */
923     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
924     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
925                                         PROT_READ|PROT_WRITE,
926                                         MAP_ANON|MAP_PRIVATE, 0, 0));
927 
928     if (mapped_addr == brk_page) {
929         /* Heap contents are initialized to zero, as for anonymous
930          * mapped pages.  Technically the new pages are already
931          * initialized to zero since they *are* anonymous mapped
932          * pages, however we have to take care with the contents that
933          * come from the remaining part of the previous page: it may
934          * contains garbage data due to a previous heap usage (grown
935          * then shrunken).  */
936         memset(g2h(target_brk), 0, brk_page - target_brk);
937 
938         target_brk = new_brk;
939         brk_page = HOST_PAGE_ALIGN(target_brk);
940         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
941             target_brk);
942         return target_brk;
943     } else if (mapped_addr != -1) {
944         /* Mapped but at wrong address, meaning there wasn't actually
945          * enough space for this brk.
946          */
947         target_munmap(mapped_addr, new_alloc_size);
948         mapped_addr = -1;
949         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
950     }
951     else {
952         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
953     }
954 
955 #if defined(TARGET_ALPHA)
956     /* We (partially) emulate OSF/1 on Alpha, which requires we
957        return a proper errno, not an unchanged brk value.  */
958     return -TARGET_ENOMEM;
959 #endif
960     /* For everything else, return the previous break. */
961     return target_brk;
962 }
963 
964 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
965     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
966 static inline abi_long copy_from_user_fdset(fd_set *fds,
967                                             abi_ulong target_fds_addr,
968                                             int n)
969 {
970     int i, nw, j, k;
971     abi_ulong b, *target_fds;
972 
973     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
974     if (!(target_fds = lock_user(VERIFY_READ,
975                                  target_fds_addr,
976                                  sizeof(abi_ulong) * nw,
977                                  1)))
978         return -TARGET_EFAULT;
979 
980     FD_ZERO(fds);
981     k = 0;
982     for (i = 0; i < nw; i++) {
983         /* grab the abi_ulong */
984         __get_user(b, &target_fds[i]);
985         for (j = 0; j < TARGET_ABI_BITS; j++) {
986             /* check the bit inside the abi_ulong */
987             if ((b >> j) & 1)
988                 FD_SET(k, fds);
989             k++;
990         }
991     }
992 
993     unlock_user(target_fds, target_fds_addr, 0);
994 
995     return 0;
996 }
997 
998 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
999                                                  abi_ulong target_fds_addr,
1000                                                  int n)
1001 {
1002     if (target_fds_addr) {
1003         if (copy_from_user_fdset(fds, target_fds_addr, n))
1004             return -TARGET_EFAULT;
1005         *fds_ptr = fds;
1006     } else {
1007         *fds_ptr = NULL;
1008     }
1009     return 0;
1010 }
1011 
1012 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1013                                           const fd_set *fds,
1014                                           int n)
1015 {
1016     int i, nw, j, k;
1017     abi_long v;
1018     abi_ulong *target_fds;
1019 
1020     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1021     if (!(target_fds = lock_user(VERIFY_WRITE,
1022                                  target_fds_addr,
1023                                  sizeof(abi_ulong) * nw,
1024                                  0)))
1025         return -TARGET_EFAULT;
1026 
1027     k = 0;
1028     for (i = 0; i < nw; i++) {
1029         v = 0;
1030         for (j = 0; j < TARGET_ABI_BITS; j++) {
1031             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1032             k++;
1033         }
1034         __put_user(v, &target_fds[i]);
1035     }
1036 
1037     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1038 
1039     return 0;
1040 }
1041 #endif
1042 
1043 #if defined(__alpha__)
1044 #define HOST_HZ 1024
1045 #else
1046 #define HOST_HZ 100
1047 #endif
1048 
1049 static inline abi_long host_to_target_clock_t(long ticks)
1050 {
1051 #if HOST_HZ == TARGET_HZ
1052     return ticks;
1053 #else
1054     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1055 #endif
1056 }
1057 
1058 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1059                                              const struct rusage *rusage)
1060 {
1061     struct target_rusage *target_rusage;
1062 
1063     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1064         return -TARGET_EFAULT;
1065     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1066     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1067     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1068     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1069     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1070     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1071     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1072     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1073     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1074     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1075     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1076     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1077     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1078     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1079     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1080     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1081     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1082     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1083     unlock_user_struct(target_rusage, target_addr, 1);
1084 
1085     return 0;
1086 }
1087 
1088 #ifdef TARGET_NR_setrlimit
1089 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1090 {
1091     abi_ulong target_rlim_swap;
1092     rlim_t result;
1093 
1094     target_rlim_swap = tswapal(target_rlim);
1095     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1096         return RLIM_INFINITY;
1097 
1098     result = target_rlim_swap;
1099     if (target_rlim_swap != (rlim_t)result)
1100         return RLIM_INFINITY;
1101 
1102     return result;
1103 }
1104 #endif
1105 
1106 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1107 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1108 {
1109     abi_ulong target_rlim_swap;
1110     abi_ulong result;
1111 
1112     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1113         target_rlim_swap = TARGET_RLIM_INFINITY;
1114     else
1115         target_rlim_swap = rlim;
1116     result = tswapal(target_rlim_swap);
1117 
1118     return result;
1119 }
1120 #endif
1121 
1122 static inline int target_to_host_resource(int code)
1123 {
1124     switch (code) {
1125     case TARGET_RLIMIT_AS:
1126         return RLIMIT_AS;
1127     case TARGET_RLIMIT_CORE:
1128         return RLIMIT_CORE;
1129     case TARGET_RLIMIT_CPU:
1130         return RLIMIT_CPU;
1131     case TARGET_RLIMIT_DATA:
1132         return RLIMIT_DATA;
1133     case TARGET_RLIMIT_FSIZE:
1134         return RLIMIT_FSIZE;
1135     case TARGET_RLIMIT_LOCKS:
1136         return RLIMIT_LOCKS;
1137     case TARGET_RLIMIT_MEMLOCK:
1138         return RLIMIT_MEMLOCK;
1139     case TARGET_RLIMIT_MSGQUEUE:
1140         return RLIMIT_MSGQUEUE;
1141     case TARGET_RLIMIT_NICE:
1142         return RLIMIT_NICE;
1143     case TARGET_RLIMIT_NOFILE:
1144         return RLIMIT_NOFILE;
1145     case TARGET_RLIMIT_NPROC:
1146         return RLIMIT_NPROC;
1147     case TARGET_RLIMIT_RSS:
1148         return RLIMIT_RSS;
1149     case TARGET_RLIMIT_RTPRIO:
1150         return RLIMIT_RTPRIO;
1151     case TARGET_RLIMIT_SIGPENDING:
1152         return RLIMIT_SIGPENDING;
1153     case TARGET_RLIMIT_STACK:
1154         return RLIMIT_STACK;
1155     default:
1156         return code;
1157     }
1158 }
1159 
1160 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1161                                               abi_ulong target_tv_addr)
1162 {
1163     struct target_timeval *target_tv;
1164 
1165     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1166         return -TARGET_EFAULT;
1167     }
1168 
1169     __get_user(tv->tv_sec, &target_tv->tv_sec);
1170     __get_user(tv->tv_usec, &target_tv->tv_usec);
1171 
1172     unlock_user_struct(target_tv, target_tv_addr, 0);
1173 
1174     return 0;
1175 }
1176 
1177 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1178                                             const struct timeval *tv)
1179 {
1180     struct target_timeval *target_tv;
1181 
1182     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1183         return -TARGET_EFAULT;
1184     }
1185 
1186     __put_user(tv->tv_sec, &target_tv->tv_sec);
1187     __put_user(tv->tv_usec, &target_tv->tv_usec);
1188 
1189     unlock_user_struct(target_tv, target_tv_addr, 1);
1190 
1191     return 0;
1192 }
1193 
1194 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1195 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1196                                                 abi_ulong target_tv_addr)
1197 {
1198     struct target__kernel_sock_timeval *target_tv;
1199 
1200     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1201         return -TARGET_EFAULT;
1202     }
1203 
1204     __get_user(tv->tv_sec, &target_tv->tv_sec);
1205     __get_user(tv->tv_usec, &target_tv->tv_usec);
1206 
1207     unlock_user_struct(target_tv, target_tv_addr, 0);
1208 
1209     return 0;
1210 }
1211 #endif
1212 
1213 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1214                                               const struct timeval *tv)
1215 {
1216     struct target__kernel_sock_timeval *target_tv;
1217 
1218     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1219         return -TARGET_EFAULT;
1220     }
1221 
1222     __put_user(tv->tv_sec, &target_tv->tv_sec);
1223     __put_user(tv->tv_usec, &target_tv->tv_usec);
1224 
1225     unlock_user_struct(target_tv, target_tv_addr, 1);
1226 
1227     return 0;
1228 }
1229 
1230 #if defined(TARGET_NR_futex) || \
1231     defined(TARGET_NR_rt_sigtimedwait) || \
1232     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1233     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1234     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1235     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1236     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1237     defined(TARGET_NR_timer_settime) || \
1238     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1239 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1240                                                abi_ulong target_addr)
1241 {
1242     struct target_timespec *target_ts;
1243 
1244     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1245         return -TARGET_EFAULT;
1246     }
1247     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1248     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1249     unlock_user_struct(target_ts, target_addr, 0);
1250     return 0;
1251 }
1252 #endif
1253 
1254 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1255     defined(TARGET_NR_timer_settime64) || \
1256     defined(TARGET_NR_mq_timedsend_time64) || \
1257     defined(TARGET_NR_mq_timedreceive_time64) || \
1258     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1259     defined(TARGET_NR_clock_nanosleep_time64) || \
1260     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1261     defined(TARGET_NR_utimensat) || \
1262     defined(TARGET_NR_utimensat_time64) || \
1263     defined(TARGET_NR_semtimedop_time64) || \
1264     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1265 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1266                                                  abi_ulong target_addr)
1267 {
1268     struct target__kernel_timespec *target_ts;
1269 
1270     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1271         return -TARGET_EFAULT;
1272     }
1273     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1274     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1275     /* in 32bit mode, this drops the padding */
1276     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1277     unlock_user_struct(target_ts, target_addr, 0);
1278     return 0;
1279 }
1280 #endif
1281 
1282 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1283                                                struct timespec *host_ts)
1284 {
1285     struct target_timespec *target_ts;
1286 
1287     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1288         return -TARGET_EFAULT;
1289     }
1290     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1291     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1292     unlock_user_struct(target_ts, target_addr, 1);
1293     return 0;
1294 }
1295 
1296 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1297                                                  struct timespec *host_ts)
1298 {
1299     struct target__kernel_timespec *target_ts;
1300 
1301     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1302         return -TARGET_EFAULT;
1303     }
1304     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1305     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1306     unlock_user_struct(target_ts, target_addr, 1);
1307     return 0;
1308 }
1309 
1310 #if defined(TARGET_NR_gettimeofday)
1311 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1312                                              struct timezone *tz)
1313 {
1314     struct target_timezone *target_tz;
1315 
1316     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1317         return -TARGET_EFAULT;
1318     }
1319 
1320     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1321     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1322 
1323     unlock_user_struct(target_tz, target_tz_addr, 1);
1324 
1325     return 0;
1326 }
1327 #endif
1328 
1329 #if defined(TARGET_NR_settimeofday)
1330 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1331                                                abi_ulong target_tz_addr)
1332 {
1333     struct target_timezone *target_tz;
1334 
1335     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1336         return -TARGET_EFAULT;
1337     }
1338 
1339     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1340     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1341 
1342     unlock_user_struct(target_tz, target_tz_addr, 0);
1343 
1344     return 0;
1345 }
1346 #endif
1347 
1348 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1349 #include <mqueue.h>
1350 
1351 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1352                                               abi_ulong target_mq_attr_addr)
1353 {
1354     struct target_mq_attr *target_mq_attr;
1355 
1356     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1357                           target_mq_attr_addr, 1))
1358         return -TARGET_EFAULT;
1359 
1360     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1361     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1362     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1363     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1364 
1365     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1366 
1367     return 0;
1368 }
1369 
1370 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1371                                             const struct mq_attr *attr)
1372 {
1373     struct target_mq_attr *target_mq_attr;
1374 
1375     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1376                           target_mq_attr_addr, 0))
1377         return -TARGET_EFAULT;
1378 
1379     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1380     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1381     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1382     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1383 
1384     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1385 
1386     return 0;
1387 }
1388 #endif
1389 
1390 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1391 /* do_select() must return target values and target errnos. */
1392 static abi_long do_select(int n,
1393                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1394                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1395 {
1396     fd_set rfds, wfds, efds;
1397     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1398     struct timeval tv;
1399     struct timespec ts, *ts_ptr;
1400     abi_long ret;
1401 
1402     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1403     if (ret) {
1404         return ret;
1405     }
1406     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1407     if (ret) {
1408         return ret;
1409     }
1410     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1411     if (ret) {
1412         return ret;
1413     }
1414 
1415     if (target_tv_addr) {
1416         if (copy_from_user_timeval(&tv, target_tv_addr))
1417             return -TARGET_EFAULT;
1418         ts.tv_sec = tv.tv_sec;
1419         ts.tv_nsec = tv.tv_usec * 1000;
1420         ts_ptr = &ts;
1421     } else {
1422         ts_ptr = NULL;
1423     }
1424 
1425     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1426                                   ts_ptr, NULL));
1427 
1428     if (!is_error(ret)) {
1429         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1430             return -TARGET_EFAULT;
1431         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1432             return -TARGET_EFAULT;
1433         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1434             return -TARGET_EFAULT;
1435 
1436         if (target_tv_addr) {
1437             tv.tv_sec = ts.tv_sec;
1438             tv.tv_usec = ts.tv_nsec / 1000;
1439             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1440                 return -TARGET_EFAULT;
1441             }
1442         }
1443     }
1444 
1445     return ret;
1446 }
1447 
1448 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1449 static abi_long do_old_select(abi_ulong arg1)
1450 {
1451     struct target_sel_arg_struct *sel;
1452     abi_ulong inp, outp, exp, tvp;
1453     long nsel;
1454 
1455     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1456         return -TARGET_EFAULT;
1457     }
1458 
1459     nsel = tswapal(sel->n);
1460     inp = tswapal(sel->inp);
1461     outp = tswapal(sel->outp);
1462     exp = tswapal(sel->exp);
1463     tvp = tswapal(sel->tvp);
1464 
1465     unlock_user_struct(sel, arg1, 0);
1466 
1467     return do_select(nsel, inp, outp, exp, tvp);
1468 }
1469 #endif
1470 #endif
1471 
1472 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1473 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1474                             abi_long arg4, abi_long arg5, abi_long arg6,
1475                             bool time64)
1476 {
1477     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1478     fd_set rfds, wfds, efds;
1479     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1480     struct timespec ts, *ts_ptr;
1481     abi_long ret;
1482 
1483     /*
1484      * The 6th arg is actually two args smashed together,
1485      * so we cannot use the C library.
1486      */
1487     sigset_t set;
1488     struct {
1489         sigset_t *set;
1490         size_t size;
1491     } sig, *sig_ptr;
1492 
1493     abi_ulong arg_sigset, arg_sigsize, *arg7;
1494     target_sigset_t *target_sigset;
1495 
1496     n = arg1;
1497     rfd_addr = arg2;
1498     wfd_addr = arg3;
1499     efd_addr = arg4;
1500     ts_addr = arg5;
1501 
1502     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1503     if (ret) {
1504         return ret;
1505     }
1506     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1507     if (ret) {
1508         return ret;
1509     }
1510     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1511     if (ret) {
1512         return ret;
1513     }
1514 
1515     /*
1516      * This takes a timespec, and not a timeval, so we cannot
1517      * use the do_select() helper ...
1518      */
1519     if (ts_addr) {
1520         if (time64) {
1521             if (target_to_host_timespec64(&ts, ts_addr)) {
1522                 return -TARGET_EFAULT;
1523             }
1524         } else {
1525             if (target_to_host_timespec(&ts, ts_addr)) {
1526                 return -TARGET_EFAULT;
1527             }
1528         }
1529             ts_ptr = &ts;
1530     } else {
1531         ts_ptr = NULL;
1532     }
1533 
1534     /* Extract the two packed args for the sigset */
1535     if (arg6) {
1536         sig_ptr = &sig;
1537         sig.size = SIGSET_T_SIZE;
1538 
1539         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1540         if (!arg7) {
1541             return -TARGET_EFAULT;
1542         }
1543         arg_sigset = tswapal(arg7[0]);
1544         arg_sigsize = tswapal(arg7[1]);
1545         unlock_user(arg7, arg6, 0);
1546 
1547         if (arg_sigset) {
1548             sig.set = &set;
1549             if (arg_sigsize != sizeof(*target_sigset)) {
1550                 /* Like the kernel, we enforce correct size sigsets */
1551                 return -TARGET_EINVAL;
1552             }
1553             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1554                                       sizeof(*target_sigset), 1);
1555             if (!target_sigset) {
1556                 return -TARGET_EFAULT;
1557             }
1558             target_to_host_sigset(&set, target_sigset);
1559             unlock_user(target_sigset, arg_sigset, 0);
1560         } else {
1561             sig.set = NULL;
1562         }
1563     } else {
1564         sig_ptr = NULL;
1565     }
1566 
1567     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1568                                   ts_ptr, sig_ptr));
1569 
1570     if (!is_error(ret)) {
1571         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1572             return -TARGET_EFAULT;
1573         }
1574         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1575             return -TARGET_EFAULT;
1576         }
1577         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1578             return -TARGET_EFAULT;
1579         }
1580         if (time64) {
1581             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1582                 return -TARGET_EFAULT;
1583             }
1584         } else {
1585             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1586                 return -TARGET_EFAULT;
1587             }
1588         }
1589     }
1590     return ret;
1591 }
1592 #endif
1593 
1594 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1595     defined(TARGET_NR_ppoll_time64)
1596 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1597                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1598 {
1599     struct target_pollfd *target_pfd;
1600     unsigned int nfds = arg2;
1601     struct pollfd *pfd;
1602     unsigned int i;
1603     abi_long ret;
1604 
1605     pfd = NULL;
1606     target_pfd = NULL;
1607     if (nfds) {
1608         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1609             return -TARGET_EINVAL;
1610         }
1611         target_pfd = lock_user(VERIFY_WRITE, arg1,
1612                                sizeof(struct target_pollfd) * nfds, 1);
1613         if (!target_pfd) {
1614             return -TARGET_EFAULT;
1615         }
1616 
1617         pfd = alloca(sizeof(struct pollfd) * nfds);
1618         for (i = 0; i < nfds; i++) {
1619             pfd[i].fd = tswap32(target_pfd[i].fd);
1620             pfd[i].events = tswap16(target_pfd[i].events);
1621         }
1622     }
1623     if (ppoll) {
1624         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1625         target_sigset_t *target_set;
1626         sigset_t _set, *set = &_set;
1627 
1628         if (arg3) {
1629             if (time64) {
1630                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1631                     unlock_user(target_pfd, arg1, 0);
1632                     return -TARGET_EFAULT;
1633                 }
1634             } else {
1635                 if (target_to_host_timespec(timeout_ts, arg3)) {
1636                     unlock_user(target_pfd, arg1, 0);
1637                     return -TARGET_EFAULT;
1638                 }
1639             }
1640         } else {
1641             timeout_ts = NULL;
1642         }
1643 
1644         if (arg4) {
1645             if (arg5 != sizeof(target_sigset_t)) {
1646                 unlock_user(target_pfd, arg1, 0);
1647                 return -TARGET_EINVAL;
1648             }
1649 
1650             target_set = lock_user(VERIFY_READ, arg4,
1651                                    sizeof(target_sigset_t), 1);
1652             if (!target_set) {
1653                 unlock_user(target_pfd, arg1, 0);
1654                 return -TARGET_EFAULT;
1655             }
1656             target_to_host_sigset(set, target_set);
1657         } else {
1658             set = NULL;
1659         }
1660 
1661         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1662                                    set, SIGSET_T_SIZE));
1663 
1664         if (!is_error(ret) && arg3) {
1665             if (time64) {
1666                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1667                     return -TARGET_EFAULT;
1668                 }
1669             } else {
1670                 if (host_to_target_timespec(arg3, timeout_ts)) {
1671                     return -TARGET_EFAULT;
1672                 }
1673             }
1674         }
1675         if (arg4) {
1676             unlock_user(target_set, arg4, 0);
1677         }
1678     } else {
1679           struct timespec ts, *pts;
1680 
1681           if (arg3 >= 0) {
1682               /* Convert ms to secs, ns */
1683               ts.tv_sec = arg3 / 1000;
1684               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1685               pts = &ts;
1686           } else {
1687               /* -ve poll() timeout means "infinite" */
1688               pts = NULL;
1689           }
1690           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1691     }
1692 
1693     if (!is_error(ret)) {
1694         for (i = 0; i < nfds; i++) {
1695             target_pfd[i].revents = tswap16(pfd[i].revents);
1696         }
1697     }
1698     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1699     return ret;
1700 }
1701 #endif
1702 
1703 static abi_long do_pipe2(int host_pipe[], int flags)
1704 {
1705 #ifdef CONFIG_PIPE2
1706     return pipe2(host_pipe, flags);
1707 #else
1708     return -ENOSYS;
1709 #endif
1710 }
1711 
1712 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1713                         int flags, int is_pipe2)
1714 {
1715     int host_pipe[2];
1716     abi_long ret;
1717     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1718 
1719     if (is_error(ret))
1720         return get_errno(ret);
1721 
1722     /* Several targets have special calling conventions for the original
1723        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1724     if (!is_pipe2) {
1725 #if defined(TARGET_ALPHA)
1726         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1727         return host_pipe[0];
1728 #elif defined(TARGET_MIPS)
1729         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1730         return host_pipe[0];
1731 #elif defined(TARGET_SH4)
1732         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1733         return host_pipe[0];
1734 #elif defined(TARGET_SPARC)
1735         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1736         return host_pipe[0];
1737 #endif
1738     }
1739 
1740     if (put_user_s32(host_pipe[0], pipedes)
1741         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1742         return -TARGET_EFAULT;
1743     return get_errno(ret);
1744 }
1745 
1746 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1747                                               abi_ulong target_addr,
1748                                               socklen_t len)
1749 {
1750     struct target_ip_mreqn *target_smreqn;
1751 
1752     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1753     if (!target_smreqn)
1754         return -TARGET_EFAULT;
1755     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1756     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1757     if (len == sizeof(struct target_ip_mreqn))
1758         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1759     unlock_user(target_smreqn, target_addr, 0);
1760 
1761     return 0;
1762 }
1763 
1764 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1765                                                abi_ulong target_addr,
1766                                                socklen_t len)
1767 {
1768     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1769     sa_family_t sa_family;
1770     struct target_sockaddr *target_saddr;
1771 
1772     if (fd_trans_target_to_host_addr(fd)) {
1773         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1774     }
1775 
1776     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1777     if (!target_saddr)
1778         return -TARGET_EFAULT;
1779 
1780     sa_family = tswap16(target_saddr->sa_family);
1781 
1782     /* Oops. The caller might send a incomplete sun_path; sun_path
1783      * must be terminated by \0 (see the manual page), but
1784      * unfortunately it is quite common to specify sockaddr_un
1785      * length as "strlen(x->sun_path)" while it should be
1786      * "strlen(...) + 1". We'll fix that here if needed.
1787      * Linux kernel has a similar feature.
1788      */
1789 
1790     if (sa_family == AF_UNIX) {
1791         if (len < unix_maxlen && len > 0) {
1792             char *cp = (char*)target_saddr;
1793 
1794             if ( cp[len-1] && !cp[len] )
1795                 len++;
1796         }
1797         if (len > unix_maxlen)
1798             len = unix_maxlen;
1799     }
1800 
1801     memcpy(addr, target_saddr, len);
1802     addr->sa_family = sa_family;
1803     if (sa_family == AF_NETLINK) {
1804         struct sockaddr_nl *nladdr;
1805 
1806         nladdr = (struct sockaddr_nl *)addr;
1807         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1808         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1809     } else if (sa_family == AF_PACKET) {
1810 	struct target_sockaddr_ll *lladdr;
1811 
1812 	lladdr = (struct target_sockaddr_ll *)addr;
1813 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1814 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1815     }
1816     unlock_user(target_saddr, target_addr, 0);
1817 
1818     return 0;
1819 }
1820 
1821 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1822                                                struct sockaddr *addr,
1823                                                socklen_t len)
1824 {
1825     struct target_sockaddr *target_saddr;
1826 
1827     if (len == 0) {
1828         return 0;
1829     }
1830     assert(addr);
1831 
1832     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1833     if (!target_saddr)
1834         return -TARGET_EFAULT;
1835     memcpy(target_saddr, addr, len);
1836     if (len >= offsetof(struct target_sockaddr, sa_family) +
1837         sizeof(target_saddr->sa_family)) {
1838         target_saddr->sa_family = tswap16(addr->sa_family);
1839     }
1840     if (addr->sa_family == AF_NETLINK &&
1841         len >= sizeof(struct target_sockaddr_nl)) {
1842         struct target_sockaddr_nl *target_nl =
1843                (struct target_sockaddr_nl *)target_saddr;
1844         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1845         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1846     } else if (addr->sa_family == AF_PACKET) {
1847         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1848         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1849         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1850     } else if (addr->sa_family == AF_INET6 &&
1851                len >= sizeof(struct target_sockaddr_in6)) {
1852         struct target_sockaddr_in6 *target_in6 =
1853                (struct target_sockaddr_in6 *)target_saddr;
1854         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1855     }
1856     unlock_user(target_saddr, target_addr, len);
1857 
1858     return 0;
1859 }
1860 
1861 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1862                                            struct target_msghdr *target_msgh)
1863 {
1864     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1865     abi_long msg_controllen;
1866     abi_ulong target_cmsg_addr;
1867     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1868     socklen_t space = 0;
1869 
1870     msg_controllen = tswapal(target_msgh->msg_controllen);
1871     if (msg_controllen < sizeof (struct target_cmsghdr))
1872         goto the_end;
1873     target_cmsg_addr = tswapal(target_msgh->msg_control);
1874     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1875     target_cmsg_start = target_cmsg;
1876     if (!target_cmsg)
1877         return -TARGET_EFAULT;
1878 
1879     while (cmsg && target_cmsg) {
1880         void *data = CMSG_DATA(cmsg);
1881         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1882 
1883         int len = tswapal(target_cmsg->cmsg_len)
1884             - sizeof(struct target_cmsghdr);
1885 
1886         space += CMSG_SPACE(len);
1887         if (space > msgh->msg_controllen) {
1888             space -= CMSG_SPACE(len);
1889             /* This is a QEMU bug, since we allocated the payload
1890              * area ourselves (unlike overflow in host-to-target
1891              * conversion, which is just the guest giving us a buffer
1892              * that's too small). It can't happen for the payload types
1893              * we currently support; if it becomes an issue in future
1894              * we would need to improve our allocation strategy to
1895              * something more intelligent than "twice the size of the
1896              * target buffer we're reading from".
1897              */
1898             qemu_log_mask(LOG_UNIMP,
1899                           ("Unsupported ancillary data %d/%d: "
1900                            "unhandled msg size\n"),
1901                           tswap32(target_cmsg->cmsg_level),
1902                           tswap32(target_cmsg->cmsg_type));
1903             break;
1904         }
1905 
1906         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1907             cmsg->cmsg_level = SOL_SOCKET;
1908         } else {
1909             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1910         }
1911         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1912         cmsg->cmsg_len = CMSG_LEN(len);
1913 
1914         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1915             int *fd = (int *)data;
1916             int *target_fd = (int *)target_data;
1917             int i, numfds = len / sizeof(int);
1918 
1919             for (i = 0; i < numfds; i++) {
1920                 __get_user(fd[i], target_fd + i);
1921             }
1922         } else if (cmsg->cmsg_level == SOL_SOCKET
1923                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1924             struct ucred *cred = (struct ucred *)data;
1925             struct target_ucred *target_cred =
1926                 (struct target_ucred *)target_data;
1927 
1928             __get_user(cred->pid, &target_cred->pid);
1929             __get_user(cred->uid, &target_cred->uid);
1930             __get_user(cred->gid, &target_cred->gid);
1931         } else {
1932             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1933                           cmsg->cmsg_level, cmsg->cmsg_type);
1934             memcpy(data, target_data, len);
1935         }
1936 
1937         cmsg = CMSG_NXTHDR(msgh, cmsg);
1938         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1939                                          target_cmsg_start);
1940     }
1941     unlock_user(target_cmsg, target_cmsg_addr, 0);
1942  the_end:
1943     msgh->msg_controllen = space;
1944     return 0;
1945 }
1946 
1947 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1948                                            struct msghdr *msgh)
1949 {
1950     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1951     abi_long msg_controllen;
1952     abi_ulong target_cmsg_addr;
1953     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1954     socklen_t space = 0;
1955 
1956     msg_controllen = tswapal(target_msgh->msg_controllen);
1957     if (msg_controllen < sizeof (struct target_cmsghdr))
1958         goto the_end;
1959     target_cmsg_addr = tswapal(target_msgh->msg_control);
1960     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1961     target_cmsg_start = target_cmsg;
1962     if (!target_cmsg)
1963         return -TARGET_EFAULT;
1964 
1965     while (cmsg && target_cmsg) {
1966         void *data = CMSG_DATA(cmsg);
1967         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1968 
1969         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1970         int tgt_len, tgt_space;
1971 
1972         /* We never copy a half-header but may copy half-data;
1973          * this is Linux's behaviour in put_cmsg(). Note that
1974          * truncation here is a guest problem (which we report
1975          * to the guest via the CTRUNC bit), unlike truncation
1976          * in target_to_host_cmsg, which is a QEMU bug.
1977          */
1978         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1979             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1980             break;
1981         }
1982 
1983         if (cmsg->cmsg_level == SOL_SOCKET) {
1984             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1985         } else {
1986             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1987         }
1988         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1989 
1990         /* Payload types which need a different size of payload on
1991          * the target must adjust tgt_len here.
1992          */
1993         tgt_len = len;
1994         switch (cmsg->cmsg_level) {
1995         case SOL_SOCKET:
1996             switch (cmsg->cmsg_type) {
1997             case SO_TIMESTAMP:
1998                 tgt_len = sizeof(struct target_timeval);
1999                 break;
2000             default:
2001                 break;
2002             }
2003             break;
2004         default:
2005             break;
2006         }
2007 
2008         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
2009             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2010             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2011         }
2012 
2013         /* We must now copy-and-convert len bytes of payload
2014          * into tgt_len bytes of destination space. Bear in mind
2015          * that in both source and destination we may be dealing
2016          * with a truncated value!
2017          */
2018         switch (cmsg->cmsg_level) {
2019         case SOL_SOCKET:
2020             switch (cmsg->cmsg_type) {
2021             case SCM_RIGHTS:
2022             {
2023                 int *fd = (int *)data;
2024                 int *target_fd = (int *)target_data;
2025                 int i, numfds = tgt_len / sizeof(int);
2026 
2027                 for (i = 0; i < numfds; i++) {
2028                     __put_user(fd[i], target_fd + i);
2029                 }
2030                 break;
2031             }
2032             case SO_TIMESTAMP:
2033             {
2034                 struct timeval *tv = (struct timeval *)data;
2035                 struct target_timeval *target_tv =
2036                     (struct target_timeval *)target_data;
2037 
2038                 if (len != sizeof(struct timeval) ||
2039                     tgt_len != sizeof(struct target_timeval)) {
2040                     goto unimplemented;
2041                 }
2042 
2043                 /* copy struct timeval to target */
2044                 __put_user(tv->tv_sec, &target_tv->tv_sec);
2045                 __put_user(tv->tv_usec, &target_tv->tv_usec);
2046                 break;
2047             }
2048             case SCM_CREDENTIALS:
2049             {
2050                 struct ucred *cred = (struct ucred *)data;
2051                 struct target_ucred *target_cred =
2052                     (struct target_ucred *)target_data;
2053 
2054                 __put_user(cred->pid, &target_cred->pid);
2055                 __put_user(cred->uid, &target_cred->uid);
2056                 __put_user(cred->gid, &target_cred->gid);
2057                 break;
2058             }
2059             default:
2060                 goto unimplemented;
2061             }
2062             break;
2063 
2064         case SOL_IP:
2065             switch (cmsg->cmsg_type) {
2066             case IP_TTL:
2067             {
2068                 uint32_t *v = (uint32_t *)data;
2069                 uint32_t *t_int = (uint32_t *)target_data;
2070 
2071                 if (len != sizeof(uint32_t) ||
2072                     tgt_len != sizeof(uint32_t)) {
2073                     goto unimplemented;
2074                 }
2075                 __put_user(*v, t_int);
2076                 break;
2077             }
2078             case IP_RECVERR:
2079             {
2080                 struct errhdr_t {
2081                    struct sock_extended_err ee;
2082                    struct sockaddr_in offender;
2083                 };
2084                 struct errhdr_t *errh = (struct errhdr_t *)data;
2085                 struct errhdr_t *target_errh =
2086                     (struct errhdr_t *)target_data;
2087 
2088                 if (len != sizeof(struct errhdr_t) ||
2089                     tgt_len != sizeof(struct errhdr_t)) {
2090                     goto unimplemented;
2091                 }
2092                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2093                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2094                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2095                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2096                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2097                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2098                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2099                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2100                     (void *) &errh->offender, sizeof(errh->offender));
2101                 break;
2102             }
2103             default:
2104                 goto unimplemented;
2105             }
2106             break;
2107 
2108         case SOL_IPV6:
2109             switch (cmsg->cmsg_type) {
2110             case IPV6_HOPLIMIT:
2111             {
2112                 uint32_t *v = (uint32_t *)data;
2113                 uint32_t *t_int = (uint32_t *)target_data;
2114 
2115                 if (len != sizeof(uint32_t) ||
2116                     tgt_len != sizeof(uint32_t)) {
2117                     goto unimplemented;
2118                 }
2119                 __put_user(*v, t_int);
2120                 break;
2121             }
2122             case IPV6_RECVERR:
2123             {
2124                 struct errhdr6_t {
2125                    struct sock_extended_err ee;
2126                    struct sockaddr_in6 offender;
2127                 };
2128                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2129                 struct errhdr6_t *target_errh =
2130                     (struct errhdr6_t *)target_data;
2131 
2132                 if (len != sizeof(struct errhdr6_t) ||
2133                     tgt_len != sizeof(struct errhdr6_t)) {
2134                     goto unimplemented;
2135                 }
2136                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2137                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2138                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2139                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2140                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2141                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2142                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2143                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2144                     (void *) &errh->offender, sizeof(errh->offender));
2145                 break;
2146             }
2147             default:
2148                 goto unimplemented;
2149             }
2150             break;
2151 
2152         default:
2153         unimplemented:
2154             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2155                           cmsg->cmsg_level, cmsg->cmsg_type);
2156             memcpy(target_data, data, MIN(len, tgt_len));
2157             if (tgt_len > len) {
2158                 memset(target_data + len, 0, tgt_len - len);
2159             }
2160         }
2161 
2162         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2163         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2164         if (msg_controllen < tgt_space) {
2165             tgt_space = msg_controllen;
2166         }
2167         msg_controllen -= tgt_space;
2168         space += tgt_space;
2169         cmsg = CMSG_NXTHDR(msgh, cmsg);
2170         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2171                                          target_cmsg_start);
2172     }
2173     unlock_user(target_cmsg, target_cmsg_addr, space);
2174  the_end:
2175     target_msgh->msg_controllen = tswapal(space);
2176     return 0;
2177 }
2178 
2179 /* do_setsockopt() Must return target values and target errnos. */
2180 static abi_long do_setsockopt(int sockfd, int level, int optname,
2181                               abi_ulong optval_addr, socklen_t optlen)
2182 {
2183     abi_long ret;
2184     int val;
2185     struct ip_mreqn *ip_mreq;
2186     struct ip_mreq_source *ip_mreq_source;
2187 
2188     switch(level) {
2189     case SOL_TCP:
2190     case SOL_UDP:
2191         /* TCP and UDP options all take an 'int' value.  */
2192         if (optlen < sizeof(uint32_t))
2193             return -TARGET_EINVAL;
2194 
2195         if (get_user_u32(val, optval_addr))
2196             return -TARGET_EFAULT;
2197         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2198         break;
2199     case SOL_IP:
2200         switch(optname) {
2201         case IP_TOS:
2202         case IP_TTL:
2203         case IP_HDRINCL:
2204         case IP_ROUTER_ALERT:
2205         case IP_RECVOPTS:
2206         case IP_RETOPTS:
2207         case IP_PKTINFO:
2208         case IP_MTU_DISCOVER:
2209         case IP_RECVERR:
2210         case IP_RECVTTL:
2211         case IP_RECVTOS:
2212 #ifdef IP_FREEBIND
2213         case IP_FREEBIND:
2214 #endif
2215         case IP_MULTICAST_TTL:
2216         case IP_MULTICAST_LOOP:
2217             val = 0;
2218             if (optlen >= sizeof(uint32_t)) {
2219                 if (get_user_u32(val, optval_addr))
2220                     return -TARGET_EFAULT;
2221             } else if (optlen >= 1) {
2222                 if (get_user_u8(val, optval_addr))
2223                     return -TARGET_EFAULT;
2224             }
2225             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2226             break;
2227         case IP_ADD_MEMBERSHIP:
2228         case IP_DROP_MEMBERSHIP:
2229             if (optlen < sizeof (struct target_ip_mreq) ||
2230                 optlen > sizeof (struct target_ip_mreqn))
2231                 return -TARGET_EINVAL;
2232 
2233             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2234             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2235             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2236             break;
2237 
2238         case IP_BLOCK_SOURCE:
2239         case IP_UNBLOCK_SOURCE:
2240         case IP_ADD_SOURCE_MEMBERSHIP:
2241         case IP_DROP_SOURCE_MEMBERSHIP:
2242             if (optlen != sizeof (struct target_ip_mreq_source))
2243                 return -TARGET_EINVAL;
2244 
2245             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2246             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2247             unlock_user (ip_mreq_source, optval_addr, 0);
2248             break;
2249 
2250         default:
2251             goto unimplemented;
2252         }
2253         break;
2254     case SOL_IPV6:
2255         switch (optname) {
2256         case IPV6_MTU_DISCOVER:
2257         case IPV6_MTU:
2258         case IPV6_V6ONLY:
2259         case IPV6_RECVPKTINFO:
2260         case IPV6_UNICAST_HOPS:
2261         case IPV6_MULTICAST_HOPS:
2262         case IPV6_MULTICAST_LOOP:
2263         case IPV6_RECVERR:
2264         case IPV6_RECVHOPLIMIT:
2265         case IPV6_2292HOPLIMIT:
2266         case IPV6_CHECKSUM:
2267         case IPV6_ADDRFORM:
2268         case IPV6_2292PKTINFO:
2269         case IPV6_RECVTCLASS:
2270         case IPV6_RECVRTHDR:
2271         case IPV6_2292RTHDR:
2272         case IPV6_RECVHOPOPTS:
2273         case IPV6_2292HOPOPTS:
2274         case IPV6_RECVDSTOPTS:
2275         case IPV6_2292DSTOPTS:
2276         case IPV6_TCLASS:
2277         case IPV6_ADDR_PREFERENCES:
2278 #ifdef IPV6_RECVPATHMTU
2279         case IPV6_RECVPATHMTU:
2280 #endif
2281 #ifdef IPV6_TRANSPARENT
2282         case IPV6_TRANSPARENT:
2283 #endif
2284 #ifdef IPV6_FREEBIND
2285         case IPV6_FREEBIND:
2286 #endif
2287 #ifdef IPV6_RECVORIGDSTADDR
2288         case IPV6_RECVORIGDSTADDR:
2289 #endif
2290             val = 0;
2291             if (optlen < sizeof(uint32_t)) {
2292                 return -TARGET_EINVAL;
2293             }
2294             if (get_user_u32(val, optval_addr)) {
2295                 return -TARGET_EFAULT;
2296             }
2297             ret = get_errno(setsockopt(sockfd, level, optname,
2298                                        &val, sizeof(val)));
2299             break;
2300         case IPV6_PKTINFO:
2301         {
2302             struct in6_pktinfo pki;
2303 
2304             if (optlen < sizeof(pki)) {
2305                 return -TARGET_EINVAL;
2306             }
2307 
2308             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2309                 return -TARGET_EFAULT;
2310             }
2311 
2312             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2313 
2314             ret = get_errno(setsockopt(sockfd, level, optname,
2315                                        &pki, sizeof(pki)));
2316             break;
2317         }
2318         case IPV6_ADD_MEMBERSHIP:
2319         case IPV6_DROP_MEMBERSHIP:
2320         {
2321             struct ipv6_mreq ipv6mreq;
2322 
2323             if (optlen < sizeof(ipv6mreq)) {
2324                 return -TARGET_EINVAL;
2325             }
2326 
2327             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2328                 return -TARGET_EFAULT;
2329             }
2330 
2331             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2332 
2333             ret = get_errno(setsockopt(sockfd, level, optname,
2334                                        &ipv6mreq, sizeof(ipv6mreq)));
2335             break;
2336         }
2337         default:
2338             goto unimplemented;
2339         }
2340         break;
2341     case SOL_ICMPV6:
2342         switch (optname) {
2343         case ICMPV6_FILTER:
2344         {
2345             struct icmp6_filter icmp6f;
2346 
2347             if (optlen > sizeof(icmp6f)) {
2348                 optlen = sizeof(icmp6f);
2349             }
2350 
2351             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2352                 return -TARGET_EFAULT;
2353             }
2354 
2355             for (val = 0; val < 8; val++) {
2356                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2357             }
2358 
2359             ret = get_errno(setsockopt(sockfd, level, optname,
2360                                        &icmp6f, optlen));
2361             break;
2362         }
2363         default:
2364             goto unimplemented;
2365         }
2366         break;
2367     case SOL_RAW:
2368         switch (optname) {
2369         case ICMP_FILTER:
2370         case IPV6_CHECKSUM:
2371             /* those take an u32 value */
2372             if (optlen < sizeof(uint32_t)) {
2373                 return -TARGET_EINVAL;
2374             }
2375 
2376             if (get_user_u32(val, optval_addr)) {
2377                 return -TARGET_EFAULT;
2378             }
2379             ret = get_errno(setsockopt(sockfd, level, optname,
2380                                        &val, sizeof(val)));
2381             break;
2382 
2383         default:
2384             goto unimplemented;
2385         }
2386         break;
2387 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2388     case SOL_ALG:
2389         switch (optname) {
2390         case ALG_SET_KEY:
2391         {
2392             char *alg_key = g_malloc(optlen);
2393 
2394             if (!alg_key) {
2395                 return -TARGET_ENOMEM;
2396             }
2397             if (copy_from_user(alg_key, optval_addr, optlen)) {
2398                 g_free(alg_key);
2399                 return -TARGET_EFAULT;
2400             }
2401             ret = get_errno(setsockopt(sockfd, level, optname,
2402                                        alg_key, optlen));
2403             g_free(alg_key);
2404             break;
2405         }
2406         case ALG_SET_AEAD_AUTHSIZE:
2407         {
2408             ret = get_errno(setsockopt(sockfd, level, optname,
2409                                        NULL, optlen));
2410             break;
2411         }
2412         default:
2413             goto unimplemented;
2414         }
2415         break;
2416 #endif
2417     case TARGET_SOL_SOCKET:
2418         switch (optname) {
2419         case TARGET_SO_RCVTIMEO:
2420         {
2421                 struct timeval tv;
2422 
2423                 optname = SO_RCVTIMEO;
2424 
2425 set_timeout:
2426                 if (optlen != sizeof(struct target_timeval)) {
2427                     return -TARGET_EINVAL;
2428                 }
2429 
2430                 if (copy_from_user_timeval(&tv, optval_addr)) {
2431                     return -TARGET_EFAULT;
2432                 }
2433 
2434                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2435                                 &tv, sizeof(tv)));
2436                 return ret;
2437         }
2438         case TARGET_SO_SNDTIMEO:
2439                 optname = SO_SNDTIMEO;
2440                 goto set_timeout;
2441         case TARGET_SO_ATTACH_FILTER:
2442         {
2443                 struct target_sock_fprog *tfprog;
2444                 struct target_sock_filter *tfilter;
2445                 struct sock_fprog fprog;
2446                 struct sock_filter *filter;
2447                 int i;
2448 
2449                 if (optlen != sizeof(*tfprog)) {
2450                     return -TARGET_EINVAL;
2451                 }
2452                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2453                     return -TARGET_EFAULT;
2454                 }
2455                 if (!lock_user_struct(VERIFY_READ, tfilter,
2456                                       tswapal(tfprog->filter), 0)) {
2457                     unlock_user_struct(tfprog, optval_addr, 1);
2458                     return -TARGET_EFAULT;
2459                 }
2460 
2461                 fprog.len = tswap16(tfprog->len);
2462                 filter = g_try_new(struct sock_filter, fprog.len);
2463                 if (filter == NULL) {
2464                     unlock_user_struct(tfilter, tfprog->filter, 1);
2465                     unlock_user_struct(tfprog, optval_addr, 1);
2466                     return -TARGET_ENOMEM;
2467                 }
2468                 for (i = 0; i < fprog.len; i++) {
2469                     filter[i].code = tswap16(tfilter[i].code);
2470                     filter[i].jt = tfilter[i].jt;
2471                     filter[i].jf = tfilter[i].jf;
2472                     filter[i].k = tswap32(tfilter[i].k);
2473                 }
2474                 fprog.filter = filter;
2475 
2476                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2477                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2478                 g_free(filter);
2479 
2480                 unlock_user_struct(tfilter, tfprog->filter, 1);
2481                 unlock_user_struct(tfprog, optval_addr, 1);
2482                 return ret;
2483         }
2484 	case TARGET_SO_BINDTODEVICE:
2485 	{
2486 		char *dev_ifname, *addr_ifname;
2487 
2488 		if (optlen > IFNAMSIZ - 1) {
2489 		    optlen = IFNAMSIZ - 1;
2490 		}
2491 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2492 		if (!dev_ifname) {
2493 		    return -TARGET_EFAULT;
2494 		}
2495 		optname = SO_BINDTODEVICE;
2496 		addr_ifname = alloca(IFNAMSIZ);
2497 		memcpy(addr_ifname, dev_ifname, optlen);
2498 		addr_ifname[optlen] = 0;
2499 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2500                                            addr_ifname, optlen));
2501 		unlock_user (dev_ifname, optval_addr, 0);
2502 		return ret;
2503 	}
2504         case TARGET_SO_LINGER:
2505         {
2506                 struct linger lg;
2507                 struct target_linger *tlg;
2508 
2509                 if (optlen != sizeof(struct target_linger)) {
2510                     return -TARGET_EINVAL;
2511                 }
2512                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2513                     return -TARGET_EFAULT;
2514                 }
2515                 __get_user(lg.l_onoff, &tlg->l_onoff);
2516                 __get_user(lg.l_linger, &tlg->l_linger);
2517                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2518                                 &lg, sizeof(lg)));
2519                 unlock_user_struct(tlg, optval_addr, 0);
2520                 return ret;
2521         }
2522             /* Options with 'int' argument.  */
2523         case TARGET_SO_DEBUG:
2524 		optname = SO_DEBUG;
2525 		break;
2526         case TARGET_SO_REUSEADDR:
2527 		optname = SO_REUSEADDR;
2528 		break;
2529 #ifdef SO_REUSEPORT
2530         case TARGET_SO_REUSEPORT:
2531                 optname = SO_REUSEPORT;
2532                 break;
2533 #endif
2534         case TARGET_SO_TYPE:
2535 		optname = SO_TYPE;
2536 		break;
2537         case TARGET_SO_ERROR:
2538 		optname = SO_ERROR;
2539 		break;
2540         case TARGET_SO_DONTROUTE:
2541 		optname = SO_DONTROUTE;
2542 		break;
2543         case TARGET_SO_BROADCAST:
2544 		optname = SO_BROADCAST;
2545 		break;
2546         case TARGET_SO_SNDBUF:
2547 		optname = SO_SNDBUF;
2548 		break;
2549         case TARGET_SO_SNDBUFFORCE:
2550                 optname = SO_SNDBUFFORCE;
2551                 break;
2552         case TARGET_SO_RCVBUF:
2553 		optname = SO_RCVBUF;
2554 		break;
2555         case TARGET_SO_RCVBUFFORCE:
2556                 optname = SO_RCVBUFFORCE;
2557                 break;
2558         case TARGET_SO_KEEPALIVE:
2559 		optname = SO_KEEPALIVE;
2560 		break;
2561         case TARGET_SO_OOBINLINE:
2562 		optname = SO_OOBINLINE;
2563 		break;
2564         case TARGET_SO_NO_CHECK:
2565 		optname = SO_NO_CHECK;
2566 		break;
2567         case TARGET_SO_PRIORITY:
2568 		optname = SO_PRIORITY;
2569 		break;
2570 #ifdef SO_BSDCOMPAT
2571         case TARGET_SO_BSDCOMPAT:
2572 		optname = SO_BSDCOMPAT;
2573 		break;
2574 #endif
2575         case TARGET_SO_PASSCRED:
2576 		optname = SO_PASSCRED;
2577 		break;
2578         case TARGET_SO_PASSSEC:
2579                 optname = SO_PASSSEC;
2580                 break;
2581         case TARGET_SO_TIMESTAMP:
2582 		optname = SO_TIMESTAMP;
2583 		break;
2584         case TARGET_SO_RCVLOWAT:
2585 		optname = SO_RCVLOWAT;
2586 		break;
2587         default:
2588             goto unimplemented;
2589         }
2590 	if (optlen < sizeof(uint32_t))
2591             return -TARGET_EINVAL;
2592 
2593 	if (get_user_u32(val, optval_addr))
2594             return -TARGET_EFAULT;
2595 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2596         break;
2597 #ifdef SOL_NETLINK
2598     case SOL_NETLINK:
2599         switch (optname) {
2600         case NETLINK_PKTINFO:
2601         case NETLINK_ADD_MEMBERSHIP:
2602         case NETLINK_DROP_MEMBERSHIP:
2603         case NETLINK_BROADCAST_ERROR:
2604         case NETLINK_NO_ENOBUFS:
2605 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2606         case NETLINK_LISTEN_ALL_NSID:
2607         case NETLINK_CAP_ACK:
2608 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2609 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2610         case NETLINK_EXT_ACK:
2611 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2612 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2613         case NETLINK_GET_STRICT_CHK:
2614 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2615             break;
2616         default:
2617             goto unimplemented;
2618         }
2619         val = 0;
2620         if (optlen < sizeof(uint32_t)) {
2621             return -TARGET_EINVAL;
2622         }
2623         if (get_user_u32(val, optval_addr)) {
2624             return -TARGET_EFAULT;
2625         }
2626         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2627                                    sizeof(val)));
2628         break;
2629 #endif /* SOL_NETLINK */
2630     default:
2631     unimplemented:
2632         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2633                       level, optname);
2634         ret = -TARGET_ENOPROTOOPT;
2635     }
2636     return ret;
2637 }
2638 
2639 /* do_getsockopt() Must return target values and target errnos. */
2640 static abi_long do_getsockopt(int sockfd, int level, int optname,
2641                               abi_ulong optval_addr, abi_ulong optlen)
2642 {
2643     abi_long ret;
2644     int len, val;
2645     socklen_t lv;
2646 
2647     switch(level) {
2648     case TARGET_SOL_SOCKET:
2649         level = SOL_SOCKET;
2650         switch (optname) {
2651         /* These don't just return a single integer */
2652         case TARGET_SO_PEERNAME:
2653             goto unimplemented;
2654         case TARGET_SO_RCVTIMEO: {
2655             struct timeval tv;
2656             socklen_t tvlen;
2657 
2658             optname = SO_RCVTIMEO;
2659 
2660 get_timeout:
2661             if (get_user_u32(len, optlen)) {
2662                 return -TARGET_EFAULT;
2663             }
2664             if (len < 0) {
2665                 return -TARGET_EINVAL;
2666             }
2667 
2668             tvlen = sizeof(tv);
2669             ret = get_errno(getsockopt(sockfd, level, optname,
2670                                        &tv, &tvlen));
2671             if (ret < 0) {
2672                 return ret;
2673             }
2674             if (len > sizeof(struct target_timeval)) {
2675                 len = sizeof(struct target_timeval);
2676             }
2677             if (copy_to_user_timeval(optval_addr, &tv)) {
2678                 return -TARGET_EFAULT;
2679             }
2680             if (put_user_u32(len, optlen)) {
2681                 return -TARGET_EFAULT;
2682             }
2683             break;
2684         }
2685         case TARGET_SO_SNDTIMEO:
2686             optname = SO_SNDTIMEO;
2687             goto get_timeout;
2688         case TARGET_SO_PEERCRED: {
2689             struct ucred cr;
2690             socklen_t crlen;
2691             struct target_ucred *tcr;
2692 
2693             if (get_user_u32(len, optlen)) {
2694                 return -TARGET_EFAULT;
2695             }
2696             if (len < 0) {
2697                 return -TARGET_EINVAL;
2698             }
2699 
2700             crlen = sizeof(cr);
2701             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2702                                        &cr, &crlen));
2703             if (ret < 0) {
2704                 return ret;
2705             }
2706             if (len > crlen) {
2707                 len = crlen;
2708             }
2709             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2710                 return -TARGET_EFAULT;
2711             }
2712             __put_user(cr.pid, &tcr->pid);
2713             __put_user(cr.uid, &tcr->uid);
2714             __put_user(cr.gid, &tcr->gid);
2715             unlock_user_struct(tcr, optval_addr, 1);
2716             if (put_user_u32(len, optlen)) {
2717                 return -TARGET_EFAULT;
2718             }
2719             break;
2720         }
2721         case TARGET_SO_PEERSEC: {
2722             char *name;
2723 
2724             if (get_user_u32(len, optlen)) {
2725                 return -TARGET_EFAULT;
2726             }
2727             if (len < 0) {
2728                 return -TARGET_EINVAL;
2729             }
2730             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2731             if (!name) {
2732                 return -TARGET_EFAULT;
2733             }
2734             lv = len;
2735             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2736                                        name, &lv));
2737             if (put_user_u32(lv, optlen)) {
2738                 ret = -TARGET_EFAULT;
2739             }
2740             unlock_user(name, optval_addr, lv);
2741             break;
2742         }
2743         case TARGET_SO_LINGER:
2744         {
2745             struct linger lg;
2746             socklen_t lglen;
2747             struct target_linger *tlg;
2748 
2749             if (get_user_u32(len, optlen)) {
2750                 return -TARGET_EFAULT;
2751             }
2752             if (len < 0) {
2753                 return -TARGET_EINVAL;
2754             }
2755 
2756             lglen = sizeof(lg);
2757             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2758                                        &lg, &lglen));
2759             if (ret < 0) {
2760                 return ret;
2761             }
2762             if (len > lglen) {
2763                 len = lglen;
2764             }
2765             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2766                 return -TARGET_EFAULT;
2767             }
2768             __put_user(lg.l_onoff, &tlg->l_onoff);
2769             __put_user(lg.l_linger, &tlg->l_linger);
2770             unlock_user_struct(tlg, optval_addr, 1);
2771             if (put_user_u32(len, optlen)) {
2772                 return -TARGET_EFAULT;
2773             }
2774             break;
2775         }
2776         /* Options with 'int' argument.  */
2777         case TARGET_SO_DEBUG:
2778             optname = SO_DEBUG;
2779             goto int_case;
2780         case TARGET_SO_REUSEADDR:
2781             optname = SO_REUSEADDR;
2782             goto int_case;
2783 #ifdef SO_REUSEPORT
2784         case TARGET_SO_REUSEPORT:
2785             optname = SO_REUSEPORT;
2786             goto int_case;
2787 #endif
2788         case TARGET_SO_TYPE:
2789             optname = SO_TYPE;
2790             goto int_case;
2791         case TARGET_SO_ERROR:
2792             optname = SO_ERROR;
2793             goto int_case;
2794         case TARGET_SO_DONTROUTE:
2795             optname = SO_DONTROUTE;
2796             goto int_case;
2797         case TARGET_SO_BROADCAST:
2798             optname = SO_BROADCAST;
2799             goto int_case;
2800         case TARGET_SO_SNDBUF:
2801             optname = SO_SNDBUF;
2802             goto int_case;
2803         case TARGET_SO_RCVBUF:
2804             optname = SO_RCVBUF;
2805             goto int_case;
2806         case TARGET_SO_KEEPALIVE:
2807             optname = SO_KEEPALIVE;
2808             goto int_case;
2809         case TARGET_SO_OOBINLINE:
2810             optname = SO_OOBINLINE;
2811             goto int_case;
2812         case TARGET_SO_NO_CHECK:
2813             optname = SO_NO_CHECK;
2814             goto int_case;
2815         case TARGET_SO_PRIORITY:
2816             optname = SO_PRIORITY;
2817             goto int_case;
2818 #ifdef SO_BSDCOMPAT
2819         case TARGET_SO_BSDCOMPAT:
2820             optname = SO_BSDCOMPAT;
2821             goto int_case;
2822 #endif
2823         case TARGET_SO_PASSCRED:
2824             optname = SO_PASSCRED;
2825             goto int_case;
2826         case TARGET_SO_TIMESTAMP:
2827             optname = SO_TIMESTAMP;
2828             goto int_case;
2829         case TARGET_SO_RCVLOWAT:
2830             optname = SO_RCVLOWAT;
2831             goto int_case;
2832         case TARGET_SO_ACCEPTCONN:
2833             optname = SO_ACCEPTCONN;
2834             goto int_case;
2835         default:
2836             goto int_case;
2837         }
2838         break;
2839     case SOL_TCP:
2840     case SOL_UDP:
2841         /* TCP and UDP options all take an 'int' value.  */
2842     int_case:
2843         if (get_user_u32(len, optlen))
2844             return -TARGET_EFAULT;
2845         if (len < 0)
2846             return -TARGET_EINVAL;
2847         lv = sizeof(lv);
2848         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2849         if (ret < 0)
2850             return ret;
2851         if (optname == SO_TYPE) {
2852             val = host_to_target_sock_type(val);
2853         }
2854         if (len > lv)
2855             len = lv;
2856         if (len == 4) {
2857             if (put_user_u32(val, optval_addr))
2858                 return -TARGET_EFAULT;
2859         } else {
2860             if (put_user_u8(val, optval_addr))
2861                 return -TARGET_EFAULT;
2862         }
2863         if (put_user_u32(len, optlen))
2864             return -TARGET_EFAULT;
2865         break;
2866     case SOL_IP:
2867         switch(optname) {
2868         case IP_TOS:
2869         case IP_TTL:
2870         case IP_HDRINCL:
2871         case IP_ROUTER_ALERT:
2872         case IP_RECVOPTS:
2873         case IP_RETOPTS:
2874         case IP_PKTINFO:
2875         case IP_MTU_DISCOVER:
2876         case IP_RECVERR:
2877         case IP_RECVTOS:
2878 #ifdef IP_FREEBIND
2879         case IP_FREEBIND:
2880 #endif
2881         case IP_MULTICAST_TTL:
2882         case IP_MULTICAST_LOOP:
2883             if (get_user_u32(len, optlen))
2884                 return -TARGET_EFAULT;
2885             if (len < 0)
2886                 return -TARGET_EINVAL;
2887             lv = sizeof(lv);
2888             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2889             if (ret < 0)
2890                 return ret;
2891             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2892                 len = 1;
2893                 if (put_user_u32(len, optlen)
2894                     || put_user_u8(val, optval_addr))
2895                     return -TARGET_EFAULT;
2896             } else {
2897                 if (len > sizeof(int))
2898                     len = sizeof(int);
2899                 if (put_user_u32(len, optlen)
2900                     || put_user_u32(val, optval_addr))
2901                     return -TARGET_EFAULT;
2902             }
2903             break;
2904         default:
2905             ret = -TARGET_ENOPROTOOPT;
2906             break;
2907         }
2908         break;
2909     case SOL_IPV6:
2910         switch (optname) {
2911         case IPV6_MTU_DISCOVER:
2912         case IPV6_MTU:
2913         case IPV6_V6ONLY:
2914         case IPV6_RECVPKTINFO:
2915         case IPV6_UNICAST_HOPS:
2916         case IPV6_MULTICAST_HOPS:
2917         case IPV6_MULTICAST_LOOP:
2918         case IPV6_RECVERR:
2919         case IPV6_RECVHOPLIMIT:
2920         case IPV6_2292HOPLIMIT:
2921         case IPV6_CHECKSUM:
2922         case IPV6_ADDRFORM:
2923         case IPV6_2292PKTINFO:
2924         case IPV6_RECVTCLASS:
2925         case IPV6_RECVRTHDR:
2926         case IPV6_2292RTHDR:
2927         case IPV6_RECVHOPOPTS:
2928         case IPV6_2292HOPOPTS:
2929         case IPV6_RECVDSTOPTS:
2930         case IPV6_2292DSTOPTS:
2931         case IPV6_TCLASS:
2932         case IPV6_ADDR_PREFERENCES:
2933 #ifdef IPV6_RECVPATHMTU
2934         case IPV6_RECVPATHMTU:
2935 #endif
2936 #ifdef IPV6_TRANSPARENT
2937         case IPV6_TRANSPARENT:
2938 #endif
2939 #ifdef IPV6_FREEBIND
2940         case IPV6_FREEBIND:
2941 #endif
2942 #ifdef IPV6_RECVORIGDSTADDR
2943         case IPV6_RECVORIGDSTADDR:
2944 #endif
2945             if (get_user_u32(len, optlen))
2946                 return -TARGET_EFAULT;
2947             if (len < 0)
2948                 return -TARGET_EINVAL;
2949             lv = sizeof(lv);
2950             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2951             if (ret < 0)
2952                 return ret;
2953             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2954                 len = 1;
2955                 if (put_user_u32(len, optlen)
2956                     || put_user_u8(val, optval_addr))
2957                     return -TARGET_EFAULT;
2958             } else {
2959                 if (len > sizeof(int))
2960                     len = sizeof(int);
2961                 if (put_user_u32(len, optlen)
2962                     || put_user_u32(val, optval_addr))
2963                     return -TARGET_EFAULT;
2964             }
2965             break;
2966         default:
2967             ret = -TARGET_ENOPROTOOPT;
2968             break;
2969         }
2970         break;
2971 #ifdef SOL_NETLINK
2972     case SOL_NETLINK:
2973         switch (optname) {
2974         case NETLINK_PKTINFO:
2975         case NETLINK_BROADCAST_ERROR:
2976         case NETLINK_NO_ENOBUFS:
2977 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2978         case NETLINK_LISTEN_ALL_NSID:
2979         case NETLINK_CAP_ACK:
2980 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2981 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2982         case NETLINK_EXT_ACK:
2983 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2984 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2985         case NETLINK_GET_STRICT_CHK:
2986 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2987             if (get_user_u32(len, optlen)) {
2988                 return -TARGET_EFAULT;
2989             }
2990             if (len != sizeof(val)) {
2991                 return -TARGET_EINVAL;
2992             }
2993             lv = len;
2994             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2995             if (ret < 0) {
2996                 return ret;
2997             }
2998             if (put_user_u32(lv, optlen)
2999                 || put_user_u32(val, optval_addr)) {
3000                 return -TARGET_EFAULT;
3001             }
3002             break;
3003 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3004         case NETLINK_LIST_MEMBERSHIPS:
3005         {
3006             uint32_t *results;
3007             int i;
3008             if (get_user_u32(len, optlen)) {
3009                 return -TARGET_EFAULT;
3010             }
3011             if (len < 0) {
3012                 return -TARGET_EINVAL;
3013             }
3014             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3015             if (!results) {
3016                 return -TARGET_EFAULT;
3017             }
3018             lv = len;
3019             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3020             if (ret < 0) {
3021                 unlock_user(results, optval_addr, 0);
3022                 return ret;
3023             }
3024             /* swap host endianess to target endianess. */
3025             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3026                 results[i] = tswap32(results[i]);
3027             }
3028             if (put_user_u32(lv, optlen)) {
3029                 return -TARGET_EFAULT;
3030             }
3031             unlock_user(results, optval_addr, 0);
3032             break;
3033         }
3034 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3035         default:
3036             goto unimplemented;
3037         }
3038         break;
3039 #endif /* SOL_NETLINK */
3040     default:
3041     unimplemented:
3042         qemu_log_mask(LOG_UNIMP,
3043                       "getsockopt level=%d optname=%d not yet supported\n",
3044                       level, optname);
3045         ret = -TARGET_EOPNOTSUPP;
3046         break;
3047     }
3048     return ret;
3049 }
3050 
3051 /* Convert target low/high pair representing file offset into the host
3052  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3053  * as the kernel doesn't handle them either.
3054  */
3055 static void target_to_host_low_high(abi_ulong tlow,
3056                                     abi_ulong thigh,
3057                                     unsigned long *hlow,
3058                                     unsigned long *hhigh)
3059 {
3060     uint64_t off = tlow |
3061         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3062         TARGET_LONG_BITS / 2;
3063 
3064     *hlow = off;
3065     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3066 }
3067 
3068 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3069                                 abi_ulong count, int copy)
3070 {
3071     struct target_iovec *target_vec;
3072     struct iovec *vec;
3073     abi_ulong total_len, max_len;
3074     int i;
3075     int err = 0;
3076     bool bad_address = false;
3077 
3078     if (count == 0) {
3079         errno = 0;
3080         return NULL;
3081     }
3082     if (count > IOV_MAX) {
3083         errno = EINVAL;
3084         return NULL;
3085     }
3086 
3087     vec = g_try_new0(struct iovec, count);
3088     if (vec == NULL) {
3089         errno = ENOMEM;
3090         return NULL;
3091     }
3092 
3093     target_vec = lock_user(VERIFY_READ, target_addr,
3094                            count * sizeof(struct target_iovec), 1);
3095     if (target_vec == NULL) {
3096         err = EFAULT;
3097         goto fail2;
3098     }
3099 
3100     /* ??? If host page size > target page size, this will result in a
3101        value larger than what we can actually support.  */
3102     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3103     total_len = 0;
3104 
3105     for (i = 0; i < count; i++) {
3106         abi_ulong base = tswapal(target_vec[i].iov_base);
3107         abi_long len = tswapal(target_vec[i].iov_len);
3108 
3109         if (len < 0) {
3110             err = EINVAL;
3111             goto fail;
3112         } else if (len == 0) {
3113             /* Zero length pointer is ignored.  */
3114             vec[i].iov_base = 0;
3115         } else {
3116             vec[i].iov_base = lock_user(type, base, len, copy);
3117             /* If the first buffer pointer is bad, this is a fault.  But
3118              * subsequent bad buffers will result in a partial write; this
3119              * is realized by filling the vector with null pointers and
3120              * zero lengths. */
3121             if (!vec[i].iov_base) {
3122                 if (i == 0) {
3123                     err = EFAULT;
3124                     goto fail;
3125                 } else {
3126                     bad_address = true;
3127                 }
3128             }
3129             if (bad_address) {
3130                 len = 0;
3131             }
3132             if (len > max_len - total_len) {
3133                 len = max_len - total_len;
3134             }
3135         }
3136         vec[i].iov_len = len;
3137         total_len += len;
3138     }
3139 
3140     unlock_user(target_vec, target_addr, 0);
3141     return vec;
3142 
3143  fail:
3144     while (--i >= 0) {
3145         if (tswapal(target_vec[i].iov_len) > 0) {
3146             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3147         }
3148     }
3149     unlock_user(target_vec, target_addr, 0);
3150  fail2:
3151     g_free(vec);
3152     errno = err;
3153     return NULL;
3154 }
3155 
3156 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3157                          abi_ulong count, int copy)
3158 {
3159     struct target_iovec *target_vec;
3160     int i;
3161 
3162     target_vec = lock_user(VERIFY_READ, target_addr,
3163                            count * sizeof(struct target_iovec), 1);
3164     if (target_vec) {
3165         for (i = 0; i < count; i++) {
3166             abi_ulong base = tswapal(target_vec[i].iov_base);
3167             abi_long len = tswapal(target_vec[i].iov_len);
3168             if (len < 0) {
3169                 break;
3170             }
3171             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3172         }
3173         unlock_user(target_vec, target_addr, 0);
3174     }
3175 
3176     g_free(vec);
3177 }
3178 
3179 static inline int target_to_host_sock_type(int *type)
3180 {
3181     int host_type = 0;
3182     int target_type = *type;
3183 
3184     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3185     case TARGET_SOCK_DGRAM:
3186         host_type = SOCK_DGRAM;
3187         break;
3188     case TARGET_SOCK_STREAM:
3189         host_type = SOCK_STREAM;
3190         break;
3191     default:
3192         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3193         break;
3194     }
3195     if (target_type & TARGET_SOCK_CLOEXEC) {
3196 #if defined(SOCK_CLOEXEC)
3197         host_type |= SOCK_CLOEXEC;
3198 #else
3199         return -TARGET_EINVAL;
3200 #endif
3201     }
3202     if (target_type & TARGET_SOCK_NONBLOCK) {
3203 #if defined(SOCK_NONBLOCK)
3204         host_type |= SOCK_NONBLOCK;
3205 #elif !defined(O_NONBLOCK)
3206         return -TARGET_EINVAL;
3207 #endif
3208     }
3209     *type = host_type;
3210     return 0;
3211 }
3212 
3213 /* Try to emulate socket type flags after socket creation.  */
3214 static int sock_flags_fixup(int fd, int target_type)
3215 {
3216 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3217     if (target_type & TARGET_SOCK_NONBLOCK) {
3218         int flags = fcntl(fd, F_GETFL);
3219         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3220             close(fd);
3221             return -TARGET_EINVAL;
3222         }
3223     }
3224 #endif
3225     return fd;
3226 }
3227 
3228 /* do_socket() Must return target values and target errnos. */
3229 static abi_long do_socket(int domain, int type, int protocol)
3230 {
3231     int target_type = type;
3232     int ret;
3233 
3234     ret = target_to_host_sock_type(&type);
3235     if (ret) {
3236         return ret;
3237     }
3238 
3239     if (domain == PF_NETLINK && !(
3240 #ifdef CONFIG_RTNETLINK
3241          protocol == NETLINK_ROUTE ||
3242 #endif
3243          protocol == NETLINK_KOBJECT_UEVENT ||
3244          protocol == NETLINK_AUDIT)) {
3245         return -TARGET_EPROTONOSUPPORT;
3246     }
3247 
3248     if (domain == AF_PACKET ||
3249         (domain == AF_INET && type == SOCK_PACKET)) {
3250         protocol = tswap16(protocol);
3251     }
3252 
3253     ret = get_errno(socket(domain, type, protocol));
3254     if (ret >= 0) {
3255         ret = sock_flags_fixup(ret, target_type);
3256         if (type == SOCK_PACKET) {
3257             /* Manage an obsolete case :
3258              * if socket type is SOCK_PACKET, bind by name
3259              */
3260             fd_trans_register(ret, &target_packet_trans);
3261         } else if (domain == PF_NETLINK) {
3262             switch (protocol) {
3263 #ifdef CONFIG_RTNETLINK
3264             case NETLINK_ROUTE:
3265                 fd_trans_register(ret, &target_netlink_route_trans);
3266                 break;
3267 #endif
3268             case NETLINK_KOBJECT_UEVENT:
3269                 /* nothing to do: messages are strings */
3270                 break;
3271             case NETLINK_AUDIT:
3272                 fd_trans_register(ret, &target_netlink_audit_trans);
3273                 break;
3274             default:
3275                 g_assert_not_reached();
3276             }
3277         }
3278     }
3279     return ret;
3280 }
3281 
3282 /* do_bind() Must return target values and target errnos. */
3283 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3284                         socklen_t addrlen)
3285 {
3286     void *addr;
3287     abi_long ret;
3288 
3289     if ((int)addrlen < 0) {
3290         return -TARGET_EINVAL;
3291     }
3292 
3293     addr = alloca(addrlen+1);
3294 
3295     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3296     if (ret)
3297         return ret;
3298 
3299     return get_errno(bind(sockfd, addr, addrlen));
3300 }
3301 
3302 /* do_connect() Must return target values and target errnos. */
3303 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3304                            socklen_t addrlen)
3305 {
3306     void *addr;
3307     abi_long ret;
3308 
3309     if ((int)addrlen < 0) {
3310         return -TARGET_EINVAL;
3311     }
3312 
3313     addr = alloca(addrlen+1);
3314 
3315     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3316     if (ret)
3317         return ret;
3318 
3319     return get_errno(safe_connect(sockfd, addr, addrlen));
3320 }
3321 
3322 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3323 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3324                                       int flags, int send)
3325 {
3326     abi_long ret, len;
3327     struct msghdr msg;
3328     abi_ulong count;
3329     struct iovec *vec;
3330     abi_ulong target_vec;
3331 
3332     if (msgp->msg_name) {
3333         msg.msg_namelen = tswap32(msgp->msg_namelen);
3334         msg.msg_name = alloca(msg.msg_namelen+1);
3335         ret = target_to_host_sockaddr(fd, msg.msg_name,
3336                                       tswapal(msgp->msg_name),
3337                                       msg.msg_namelen);
3338         if (ret == -TARGET_EFAULT) {
3339             /* For connected sockets msg_name and msg_namelen must
3340              * be ignored, so returning EFAULT immediately is wrong.
3341              * Instead, pass a bad msg_name to the host kernel, and
3342              * let it decide whether to return EFAULT or not.
3343              */
3344             msg.msg_name = (void *)-1;
3345         } else if (ret) {
3346             goto out2;
3347         }
3348     } else {
3349         msg.msg_name = NULL;
3350         msg.msg_namelen = 0;
3351     }
3352     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3353     msg.msg_control = alloca(msg.msg_controllen);
3354     memset(msg.msg_control, 0, msg.msg_controllen);
3355 
3356     msg.msg_flags = tswap32(msgp->msg_flags);
3357 
3358     count = tswapal(msgp->msg_iovlen);
3359     target_vec = tswapal(msgp->msg_iov);
3360 
3361     if (count > IOV_MAX) {
3362         /* sendrcvmsg returns a different errno for this condition than
3363          * readv/writev, so we must catch it here before lock_iovec() does.
3364          */
3365         ret = -TARGET_EMSGSIZE;
3366         goto out2;
3367     }
3368 
3369     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3370                      target_vec, count, send);
3371     if (vec == NULL) {
3372         ret = -host_to_target_errno(errno);
3373         goto out2;
3374     }
3375     msg.msg_iovlen = count;
3376     msg.msg_iov = vec;
3377 
3378     if (send) {
3379         if (fd_trans_target_to_host_data(fd)) {
3380             void *host_msg;
3381 
3382             host_msg = g_malloc(msg.msg_iov->iov_len);
3383             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3384             ret = fd_trans_target_to_host_data(fd)(host_msg,
3385                                                    msg.msg_iov->iov_len);
3386             if (ret >= 0) {
3387                 msg.msg_iov->iov_base = host_msg;
3388                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3389             }
3390             g_free(host_msg);
3391         } else {
3392             ret = target_to_host_cmsg(&msg, msgp);
3393             if (ret == 0) {
3394                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3395             }
3396         }
3397     } else {
3398         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3399         if (!is_error(ret)) {
3400             len = ret;
3401             if (fd_trans_host_to_target_data(fd)) {
3402                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3403                                                MIN(msg.msg_iov->iov_len, len));
3404             } else {
3405                 ret = host_to_target_cmsg(msgp, &msg);
3406             }
3407             if (!is_error(ret)) {
3408                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3409                 msgp->msg_flags = tswap32(msg.msg_flags);
3410                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3411                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3412                                     msg.msg_name, msg.msg_namelen);
3413                     if (ret) {
3414                         goto out;
3415                     }
3416                 }
3417 
3418                 ret = len;
3419             }
3420         }
3421     }
3422 
3423 out:
3424     unlock_iovec(vec, target_vec, count, !send);
3425 out2:
3426     return ret;
3427 }
3428 
3429 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3430                                int flags, int send)
3431 {
3432     abi_long ret;
3433     struct target_msghdr *msgp;
3434 
3435     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3436                           msgp,
3437                           target_msg,
3438                           send ? 1 : 0)) {
3439         return -TARGET_EFAULT;
3440     }
3441     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3442     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3443     return ret;
3444 }
3445 
3446 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3447  * so it might not have this *mmsg-specific flag either.
3448  */
3449 #ifndef MSG_WAITFORONE
3450 #define MSG_WAITFORONE 0x10000
3451 #endif
3452 
3453 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3454                                 unsigned int vlen, unsigned int flags,
3455                                 int send)
3456 {
3457     struct target_mmsghdr *mmsgp;
3458     abi_long ret = 0;
3459     int i;
3460 
3461     if (vlen > UIO_MAXIOV) {
3462         vlen = UIO_MAXIOV;
3463     }
3464 
3465     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3466     if (!mmsgp) {
3467         return -TARGET_EFAULT;
3468     }
3469 
3470     for (i = 0; i < vlen; i++) {
3471         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3472         if (is_error(ret)) {
3473             break;
3474         }
3475         mmsgp[i].msg_len = tswap32(ret);
3476         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3477         if (flags & MSG_WAITFORONE) {
3478             flags |= MSG_DONTWAIT;
3479         }
3480     }
3481 
3482     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3483 
3484     /* Return number of datagrams sent if we sent any at all;
3485      * otherwise return the error.
3486      */
3487     if (i) {
3488         return i;
3489     }
3490     return ret;
3491 }
3492 
3493 /* do_accept4() Must return target values and target errnos. */
3494 static abi_long do_accept4(int fd, abi_ulong target_addr,
3495                            abi_ulong target_addrlen_addr, int flags)
3496 {
3497     socklen_t addrlen, ret_addrlen;
3498     void *addr;
3499     abi_long ret;
3500     int host_flags;
3501 
3502     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3503 
3504     if (target_addr == 0) {
3505         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3506     }
3507 
3508     /* linux returns EFAULT if addrlen pointer is invalid */
3509     if (get_user_u32(addrlen, target_addrlen_addr))
3510         return -TARGET_EFAULT;
3511 
3512     if ((int)addrlen < 0) {
3513         return -TARGET_EINVAL;
3514     }
3515 
3516     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3517         return -TARGET_EFAULT;
3518 
3519     addr = alloca(addrlen);
3520 
3521     ret_addrlen = addrlen;
3522     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3523     if (!is_error(ret)) {
3524         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3525         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3526             ret = -TARGET_EFAULT;
3527         }
3528     }
3529     return ret;
3530 }
3531 
3532 /* do_getpeername() Must return target values and target errnos. */
3533 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3534                                abi_ulong target_addrlen_addr)
3535 {
3536     socklen_t addrlen, ret_addrlen;
3537     void *addr;
3538     abi_long ret;
3539 
3540     if (get_user_u32(addrlen, target_addrlen_addr))
3541         return -TARGET_EFAULT;
3542 
3543     if ((int)addrlen < 0) {
3544         return -TARGET_EINVAL;
3545     }
3546 
3547     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3548         return -TARGET_EFAULT;
3549 
3550     addr = alloca(addrlen);
3551 
3552     ret_addrlen = addrlen;
3553     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3554     if (!is_error(ret)) {
3555         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3556         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3557             ret = -TARGET_EFAULT;
3558         }
3559     }
3560     return ret;
3561 }
3562 
3563 /* do_getsockname() Must return target values and target errnos. */
3564 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3565                                abi_ulong target_addrlen_addr)
3566 {
3567     socklen_t addrlen, ret_addrlen;
3568     void *addr;
3569     abi_long ret;
3570 
3571     if (get_user_u32(addrlen, target_addrlen_addr))
3572         return -TARGET_EFAULT;
3573 
3574     if ((int)addrlen < 0) {
3575         return -TARGET_EINVAL;
3576     }
3577 
3578     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3579         return -TARGET_EFAULT;
3580 
3581     addr = alloca(addrlen);
3582 
3583     ret_addrlen = addrlen;
3584     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3585     if (!is_error(ret)) {
3586         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3587         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3588             ret = -TARGET_EFAULT;
3589         }
3590     }
3591     return ret;
3592 }
3593 
3594 /* do_socketpair() Must return target values and target errnos. */
3595 static abi_long do_socketpair(int domain, int type, int protocol,
3596                               abi_ulong target_tab_addr)
3597 {
3598     int tab[2];
3599     abi_long ret;
3600 
3601     target_to_host_sock_type(&type);
3602 
3603     ret = get_errno(socketpair(domain, type, protocol, tab));
3604     if (!is_error(ret)) {
3605         if (put_user_s32(tab[0], target_tab_addr)
3606             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3607             ret = -TARGET_EFAULT;
3608     }
3609     return ret;
3610 }
3611 
3612 /* do_sendto() Must return target values and target errnos. */
3613 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3614                           abi_ulong target_addr, socklen_t addrlen)
3615 {
3616     void *addr;
3617     void *host_msg;
3618     void *copy_msg = NULL;
3619     abi_long ret;
3620 
3621     if ((int)addrlen < 0) {
3622         return -TARGET_EINVAL;
3623     }
3624 
3625     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3626     if (!host_msg)
3627         return -TARGET_EFAULT;
3628     if (fd_trans_target_to_host_data(fd)) {
3629         copy_msg = host_msg;
3630         host_msg = g_malloc(len);
3631         memcpy(host_msg, copy_msg, len);
3632         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3633         if (ret < 0) {
3634             goto fail;
3635         }
3636     }
3637     if (target_addr) {
3638         addr = alloca(addrlen+1);
3639         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3640         if (ret) {
3641             goto fail;
3642         }
3643         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3644     } else {
3645         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3646     }
3647 fail:
3648     if (copy_msg) {
3649         g_free(host_msg);
3650         host_msg = copy_msg;
3651     }
3652     unlock_user(host_msg, msg, 0);
3653     return ret;
3654 }
3655 
3656 /* do_recvfrom() Must return target values and target errnos. */
3657 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3658                             abi_ulong target_addr,
3659                             abi_ulong target_addrlen)
3660 {
3661     socklen_t addrlen, ret_addrlen;
3662     void *addr;
3663     void *host_msg;
3664     abi_long ret;
3665 
3666     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3667     if (!host_msg)
3668         return -TARGET_EFAULT;
3669     if (target_addr) {
3670         if (get_user_u32(addrlen, target_addrlen)) {
3671             ret = -TARGET_EFAULT;
3672             goto fail;
3673         }
3674         if ((int)addrlen < 0) {
3675             ret = -TARGET_EINVAL;
3676             goto fail;
3677         }
3678         addr = alloca(addrlen);
3679         ret_addrlen = addrlen;
3680         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3681                                       addr, &ret_addrlen));
3682     } else {
3683         addr = NULL; /* To keep compiler quiet.  */
3684         addrlen = 0; /* To keep compiler quiet.  */
3685         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3686     }
3687     if (!is_error(ret)) {
3688         if (fd_trans_host_to_target_data(fd)) {
3689             abi_long trans;
3690             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3691             if (is_error(trans)) {
3692                 ret = trans;
3693                 goto fail;
3694             }
3695         }
3696         if (target_addr) {
3697             host_to_target_sockaddr(target_addr, addr,
3698                                     MIN(addrlen, ret_addrlen));
3699             if (put_user_u32(ret_addrlen, target_addrlen)) {
3700                 ret = -TARGET_EFAULT;
3701                 goto fail;
3702             }
3703         }
3704         unlock_user(host_msg, msg, len);
3705     } else {
3706 fail:
3707         unlock_user(host_msg, msg, 0);
3708     }
3709     return ret;
3710 }
3711 
3712 #ifdef TARGET_NR_socketcall
3713 /* do_socketcall() must return target values and target errnos. */
3714 static abi_long do_socketcall(int num, abi_ulong vptr)
3715 {
3716     static const unsigned nargs[] = { /* number of arguments per operation */
3717         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3718         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3719         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3720         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3721         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3722         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3723         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3724         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3725         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3726         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3727         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3728         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3729         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3730         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3731         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3732         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3733         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3734         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3735         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3736         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3737     };
3738     abi_long a[6]; /* max 6 args */
3739     unsigned i;
3740 
3741     /* check the range of the first argument num */
3742     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3743     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3744         return -TARGET_EINVAL;
3745     }
3746     /* ensure we have space for args */
3747     if (nargs[num] > ARRAY_SIZE(a)) {
3748         return -TARGET_EINVAL;
3749     }
3750     /* collect the arguments in a[] according to nargs[] */
3751     for (i = 0; i < nargs[num]; ++i) {
3752         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3753             return -TARGET_EFAULT;
3754         }
3755     }
3756     /* now when we have the args, invoke the appropriate underlying function */
3757     switch (num) {
3758     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3759         return do_socket(a[0], a[1], a[2]);
3760     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3761         return do_bind(a[0], a[1], a[2]);
3762     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3763         return do_connect(a[0], a[1], a[2]);
3764     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3765         return get_errno(listen(a[0], a[1]));
3766     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3767         return do_accept4(a[0], a[1], a[2], 0);
3768     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3769         return do_getsockname(a[0], a[1], a[2]);
3770     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3771         return do_getpeername(a[0], a[1], a[2]);
3772     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3773         return do_socketpair(a[0], a[1], a[2], a[3]);
3774     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3775         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3776     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3777         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3778     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3779         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3780     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3781         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3782     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3783         return get_errno(shutdown(a[0], a[1]));
3784     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3785         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3786     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3787         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3788     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3789         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3790     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3791         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3792     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3793         return do_accept4(a[0], a[1], a[2], a[3]);
3794     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3795         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3796     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3797         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3798     default:
3799         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3800         return -TARGET_EINVAL;
3801     }
3802 }
3803 #endif
3804 
3805 #define N_SHM_REGIONS	32
3806 
3807 static struct shm_region {
3808     abi_ulong start;
3809     abi_ulong size;
3810     bool in_use;
3811 } shm_regions[N_SHM_REGIONS];
3812 
3813 #ifndef TARGET_SEMID64_DS
3814 /* asm-generic version of this struct */
3815 struct target_semid64_ds
3816 {
3817   struct target_ipc_perm sem_perm;
3818   abi_ulong sem_otime;
3819 #if TARGET_ABI_BITS == 32
3820   abi_ulong __unused1;
3821 #endif
3822   abi_ulong sem_ctime;
3823 #if TARGET_ABI_BITS == 32
3824   abi_ulong __unused2;
3825 #endif
3826   abi_ulong sem_nsems;
3827   abi_ulong __unused3;
3828   abi_ulong __unused4;
3829 };
3830 #endif
3831 
3832 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3833                                                abi_ulong target_addr)
3834 {
3835     struct target_ipc_perm *target_ip;
3836     struct target_semid64_ds *target_sd;
3837 
3838     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3839         return -TARGET_EFAULT;
3840     target_ip = &(target_sd->sem_perm);
3841     host_ip->__key = tswap32(target_ip->__key);
3842     host_ip->uid = tswap32(target_ip->uid);
3843     host_ip->gid = tswap32(target_ip->gid);
3844     host_ip->cuid = tswap32(target_ip->cuid);
3845     host_ip->cgid = tswap32(target_ip->cgid);
3846 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3847     host_ip->mode = tswap32(target_ip->mode);
3848 #else
3849     host_ip->mode = tswap16(target_ip->mode);
3850 #endif
3851 #if defined(TARGET_PPC)
3852     host_ip->__seq = tswap32(target_ip->__seq);
3853 #else
3854     host_ip->__seq = tswap16(target_ip->__seq);
3855 #endif
3856     unlock_user_struct(target_sd, target_addr, 0);
3857     return 0;
3858 }
3859 
3860 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3861                                                struct ipc_perm *host_ip)
3862 {
3863     struct target_ipc_perm *target_ip;
3864     struct target_semid64_ds *target_sd;
3865 
3866     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3867         return -TARGET_EFAULT;
3868     target_ip = &(target_sd->sem_perm);
3869     target_ip->__key = tswap32(host_ip->__key);
3870     target_ip->uid = tswap32(host_ip->uid);
3871     target_ip->gid = tswap32(host_ip->gid);
3872     target_ip->cuid = tswap32(host_ip->cuid);
3873     target_ip->cgid = tswap32(host_ip->cgid);
3874 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3875     target_ip->mode = tswap32(host_ip->mode);
3876 #else
3877     target_ip->mode = tswap16(host_ip->mode);
3878 #endif
3879 #if defined(TARGET_PPC)
3880     target_ip->__seq = tswap32(host_ip->__seq);
3881 #else
3882     target_ip->__seq = tswap16(host_ip->__seq);
3883 #endif
3884     unlock_user_struct(target_sd, target_addr, 1);
3885     return 0;
3886 }
3887 
3888 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3889                                                abi_ulong target_addr)
3890 {
3891     struct target_semid64_ds *target_sd;
3892 
3893     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3894         return -TARGET_EFAULT;
3895     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3896         return -TARGET_EFAULT;
3897     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3898     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3899     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3900     unlock_user_struct(target_sd, target_addr, 0);
3901     return 0;
3902 }
3903 
3904 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3905                                                struct semid_ds *host_sd)
3906 {
3907     struct target_semid64_ds *target_sd;
3908 
3909     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3910         return -TARGET_EFAULT;
3911     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3912         return -TARGET_EFAULT;
3913     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3914     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3915     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3916     unlock_user_struct(target_sd, target_addr, 1);
3917     return 0;
3918 }
3919 
3920 struct target_seminfo {
3921     int semmap;
3922     int semmni;
3923     int semmns;
3924     int semmnu;
3925     int semmsl;
3926     int semopm;
3927     int semume;
3928     int semusz;
3929     int semvmx;
3930     int semaem;
3931 };
3932 
3933 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3934                                               struct seminfo *host_seminfo)
3935 {
3936     struct target_seminfo *target_seminfo;
3937     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3938         return -TARGET_EFAULT;
3939     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3940     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3941     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3942     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3943     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3944     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3945     __put_user(host_seminfo->semume, &target_seminfo->semume);
3946     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3947     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3948     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3949     unlock_user_struct(target_seminfo, target_addr, 1);
3950     return 0;
3951 }
3952 
3953 union semun {
3954 	int val;
3955 	struct semid_ds *buf;
3956 	unsigned short *array;
3957 	struct seminfo *__buf;
3958 };
3959 
3960 union target_semun {
3961 	int val;
3962 	abi_ulong buf;
3963 	abi_ulong array;
3964 	abi_ulong __buf;
3965 };
3966 
3967 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3968                                                abi_ulong target_addr)
3969 {
3970     int nsems;
3971     unsigned short *array;
3972     union semun semun;
3973     struct semid_ds semid_ds;
3974     int i, ret;
3975 
3976     semun.buf = &semid_ds;
3977 
3978     ret = semctl(semid, 0, IPC_STAT, semun);
3979     if (ret == -1)
3980         return get_errno(ret);
3981 
3982     nsems = semid_ds.sem_nsems;
3983 
3984     *host_array = g_try_new(unsigned short, nsems);
3985     if (!*host_array) {
3986         return -TARGET_ENOMEM;
3987     }
3988     array = lock_user(VERIFY_READ, target_addr,
3989                       nsems*sizeof(unsigned short), 1);
3990     if (!array) {
3991         g_free(*host_array);
3992         return -TARGET_EFAULT;
3993     }
3994 
3995     for(i=0; i<nsems; i++) {
3996         __get_user((*host_array)[i], &array[i]);
3997     }
3998     unlock_user(array, target_addr, 0);
3999 
4000     return 0;
4001 }
4002 
4003 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4004                                                unsigned short **host_array)
4005 {
4006     int nsems;
4007     unsigned short *array;
4008     union semun semun;
4009     struct semid_ds semid_ds;
4010     int i, ret;
4011 
4012     semun.buf = &semid_ds;
4013 
4014     ret = semctl(semid, 0, IPC_STAT, semun);
4015     if (ret == -1)
4016         return get_errno(ret);
4017 
4018     nsems = semid_ds.sem_nsems;
4019 
4020     array = lock_user(VERIFY_WRITE, target_addr,
4021                       nsems*sizeof(unsigned short), 0);
4022     if (!array)
4023         return -TARGET_EFAULT;
4024 
4025     for(i=0; i<nsems; i++) {
4026         __put_user((*host_array)[i], &array[i]);
4027     }
4028     g_free(*host_array);
4029     unlock_user(array, target_addr, 1);
4030 
4031     return 0;
4032 }
4033 
4034 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4035                                  abi_ulong target_arg)
4036 {
4037     union target_semun target_su = { .buf = target_arg };
4038     union semun arg;
4039     struct semid_ds dsarg;
4040     unsigned short *array = NULL;
4041     struct seminfo seminfo;
4042     abi_long ret = -TARGET_EINVAL;
4043     abi_long err;
4044     cmd &= 0xff;
4045 
4046     switch( cmd ) {
4047 	case GETVAL:
4048 	case SETVAL:
4049             /* In 64 bit cross-endian situations, we will erroneously pick up
4050              * the wrong half of the union for the "val" element.  To rectify
4051              * this, the entire 8-byte structure is byteswapped, followed by
4052 	     * a swap of the 4 byte val field. In other cases, the data is
4053 	     * already in proper host byte order. */
4054 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4055 		target_su.buf = tswapal(target_su.buf);
4056 		arg.val = tswap32(target_su.val);
4057 	    } else {
4058 		arg.val = target_su.val;
4059 	    }
4060             ret = get_errno(semctl(semid, semnum, cmd, arg));
4061             break;
4062 	case GETALL:
4063 	case SETALL:
4064             err = target_to_host_semarray(semid, &array, target_su.array);
4065             if (err)
4066                 return err;
4067             arg.array = array;
4068             ret = get_errno(semctl(semid, semnum, cmd, arg));
4069             err = host_to_target_semarray(semid, target_su.array, &array);
4070             if (err)
4071                 return err;
4072             break;
4073 	case IPC_STAT:
4074 	case IPC_SET:
4075 	case SEM_STAT:
4076             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4077             if (err)
4078                 return err;
4079             arg.buf = &dsarg;
4080             ret = get_errno(semctl(semid, semnum, cmd, arg));
4081             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4082             if (err)
4083                 return err;
4084             break;
4085 	case IPC_INFO:
4086 	case SEM_INFO:
4087             arg.__buf = &seminfo;
4088             ret = get_errno(semctl(semid, semnum, cmd, arg));
4089             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4090             if (err)
4091                 return err;
4092             break;
4093 	case IPC_RMID:
4094 	case GETPID:
4095 	case GETNCNT:
4096 	case GETZCNT:
4097             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4098             break;
4099     }
4100 
4101     return ret;
4102 }
4103 
4104 struct target_sembuf {
4105     unsigned short sem_num;
4106     short sem_op;
4107     short sem_flg;
4108 };
4109 
4110 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4111                                              abi_ulong target_addr,
4112                                              unsigned nsops)
4113 {
4114     struct target_sembuf *target_sembuf;
4115     int i;
4116 
4117     target_sembuf = lock_user(VERIFY_READ, target_addr,
4118                               nsops*sizeof(struct target_sembuf), 1);
4119     if (!target_sembuf)
4120         return -TARGET_EFAULT;
4121 
4122     for(i=0; i<nsops; i++) {
4123         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4124         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4125         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4126     }
4127 
4128     unlock_user(target_sembuf, target_addr, 0);
4129 
4130     return 0;
4131 }
4132 
4133 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4134     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4135 
4136 /*
4137  * This macro is required to handle the s390 variants, which passes the
4138  * arguments in a different order than default.
4139  */
4140 #ifdef __s390x__
4141 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4142   (__nsops), (__timeout), (__sops)
4143 #else
4144 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4145   (__nsops), 0, (__sops), (__timeout)
4146 #endif
4147 
4148 static inline abi_long do_semtimedop(int semid,
4149                                      abi_long ptr,
4150                                      unsigned nsops,
4151                                      abi_long timeout, bool time64)
4152 {
4153     struct sembuf *sops;
4154     struct timespec ts, *pts = NULL;
4155     abi_long ret;
4156 
4157     if (timeout) {
4158         pts = &ts;
4159         if (time64) {
4160             if (target_to_host_timespec64(pts, timeout)) {
4161                 return -TARGET_EFAULT;
4162             }
4163         } else {
4164             if (target_to_host_timespec(pts, timeout)) {
4165                 return -TARGET_EFAULT;
4166             }
4167         }
4168     }
4169 
4170     if (nsops > TARGET_SEMOPM) {
4171         return -TARGET_E2BIG;
4172     }
4173 
4174     sops = g_new(struct sembuf, nsops);
4175 
4176     if (target_to_host_sembuf(sops, ptr, nsops)) {
4177         g_free(sops);
4178         return -TARGET_EFAULT;
4179     }
4180 
4181     ret = -TARGET_ENOSYS;
4182 #ifdef __NR_semtimedop
4183     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4184 #endif
4185 #ifdef __NR_ipc
4186     if (ret == -TARGET_ENOSYS) {
4187         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4188                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4189     }
4190 #endif
4191     g_free(sops);
4192     return ret;
4193 }
4194 #endif
4195 
4196 struct target_msqid_ds
4197 {
4198     struct target_ipc_perm msg_perm;
4199     abi_ulong msg_stime;
4200 #if TARGET_ABI_BITS == 32
4201     abi_ulong __unused1;
4202 #endif
4203     abi_ulong msg_rtime;
4204 #if TARGET_ABI_BITS == 32
4205     abi_ulong __unused2;
4206 #endif
4207     abi_ulong msg_ctime;
4208 #if TARGET_ABI_BITS == 32
4209     abi_ulong __unused3;
4210 #endif
4211     abi_ulong __msg_cbytes;
4212     abi_ulong msg_qnum;
4213     abi_ulong msg_qbytes;
4214     abi_ulong msg_lspid;
4215     abi_ulong msg_lrpid;
4216     abi_ulong __unused4;
4217     abi_ulong __unused5;
4218 };
4219 
4220 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4221                                                abi_ulong target_addr)
4222 {
4223     struct target_msqid_ds *target_md;
4224 
4225     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4226         return -TARGET_EFAULT;
4227     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4228         return -TARGET_EFAULT;
4229     host_md->msg_stime = tswapal(target_md->msg_stime);
4230     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4231     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4232     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4233     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4234     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4235     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4236     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4237     unlock_user_struct(target_md, target_addr, 0);
4238     return 0;
4239 }
4240 
4241 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4242                                                struct msqid_ds *host_md)
4243 {
4244     struct target_msqid_ds *target_md;
4245 
4246     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4247         return -TARGET_EFAULT;
4248     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4249         return -TARGET_EFAULT;
4250     target_md->msg_stime = tswapal(host_md->msg_stime);
4251     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4252     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4253     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4254     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4255     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4256     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4257     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4258     unlock_user_struct(target_md, target_addr, 1);
4259     return 0;
4260 }
4261 
4262 struct target_msginfo {
4263     int msgpool;
4264     int msgmap;
4265     int msgmax;
4266     int msgmnb;
4267     int msgmni;
4268     int msgssz;
4269     int msgtql;
4270     unsigned short int msgseg;
4271 };
4272 
4273 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4274                                               struct msginfo *host_msginfo)
4275 {
4276     struct target_msginfo *target_msginfo;
4277     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4278         return -TARGET_EFAULT;
4279     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4280     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4281     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4282     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4283     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4284     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4285     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4286     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4287     unlock_user_struct(target_msginfo, target_addr, 1);
4288     return 0;
4289 }
4290 
4291 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4292 {
4293     struct msqid_ds dsarg;
4294     struct msginfo msginfo;
4295     abi_long ret = -TARGET_EINVAL;
4296 
4297     cmd &= 0xff;
4298 
4299     switch (cmd) {
4300     case IPC_STAT:
4301     case IPC_SET:
4302     case MSG_STAT:
4303         if (target_to_host_msqid_ds(&dsarg,ptr))
4304             return -TARGET_EFAULT;
4305         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4306         if (host_to_target_msqid_ds(ptr,&dsarg))
4307             return -TARGET_EFAULT;
4308         break;
4309     case IPC_RMID:
4310         ret = get_errno(msgctl(msgid, cmd, NULL));
4311         break;
4312     case IPC_INFO:
4313     case MSG_INFO:
4314         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4315         if (host_to_target_msginfo(ptr, &msginfo))
4316             return -TARGET_EFAULT;
4317         break;
4318     }
4319 
4320     return ret;
4321 }
4322 
4323 struct target_msgbuf {
4324     abi_long mtype;
4325     char	mtext[1];
4326 };
4327 
4328 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4329                                  ssize_t msgsz, int msgflg)
4330 {
4331     struct target_msgbuf *target_mb;
4332     struct msgbuf *host_mb;
4333     abi_long ret = 0;
4334 
4335     if (msgsz < 0) {
4336         return -TARGET_EINVAL;
4337     }
4338 
4339     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4340         return -TARGET_EFAULT;
4341     host_mb = g_try_malloc(msgsz + sizeof(long));
4342     if (!host_mb) {
4343         unlock_user_struct(target_mb, msgp, 0);
4344         return -TARGET_ENOMEM;
4345     }
4346     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4347     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4348     ret = -TARGET_ENOSYS;
4349 #ifdef __NR_msgsnd
4350     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4351 #endif
4352 #ifdef __NR_ipc
4353     if (ret == -TARGET_ENOSYS) {
4354 #ifdef __s390x__
4355         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4356                                  host_mb));
4357 #else
4358         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4359                                  host_mb, 0));
4360 #endif
4361     }
4362 #endif
4363     g_free(host_mb);
4364     unlock_user_struct(target_mb, msgp, 0);
4365 
4366     return ret;
4367 }
4368 
4369 #ifdef __NR_ipc
4370 #if defined(__sparc__)
4371 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4372 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4373 #elif defined(__s390x__)
4374 /* The s390 sys_ipc variant has only five parameters.  */
4375 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4376     ((long int[]){(long int)__msgp, __msgtyp})
4377 #else
4378 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4379     ((long int[]){(long int)__msgp, __msgtyp}), 0
4380 #endif
4381 #endif
4382 
4383 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4384                                  ssize_t msgsz, abi_long msgtyp,
4385                                  int msgflg)
4386 {
4387     struct target_msgbuf *target_mb;
4388     char *target_mtext;
4389     struct msgbuf *host_mb;
4390     abi_long ret = 0;
4391 
4392     if (msgsz < 0) {
4393         return -TARGET_EINVAL;
4394     }
4395 
4396     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4397         return -TARGET_EFAULT;
4398 
4399     host_mb = g_try_malloc(msgsz + sizeof(long));
4400     if (!host_mb) {
4401         ret = -TARGET_ENOMEM;
4402         goto end;
4403     }
4404     ret = -TARGET_ENOSYS;
4405 #ifdef __NR_msgrcv
4406     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4407 #endif
4408 #ifdef __NR_ipc
4409     if (ret == -TARGET_ENOSYS) {
4410         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4411                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4412     }
4413 #endif
4414 
4415     if (ret > 0) {
4416         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4417         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4418         if (!target_mtext) {
4419             ret = -TARGET_EFAULT;
4420             goto end;
4421         }
4422         memcpy(target_mb->mtext, host_mb->mtext, ret);
4423         unlock_user(target_mtext, target_mtext_addr, ret);
4424     }
4425 
4426     target_mb->mtype = tswapal(host_mb->mtype);
4427 
4428 end:
4429     if (target_mb)
4430         unlock_user_struct(target_mb, msgp, 1);
4431     g_free(host_mb);
4432     return ret;
4433 }
4434 
4435 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4436                                                abi_ulong target_addr)
4437 {
4438     struct target_shmid_ds *target_sd;
4439 
4440     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4441         return -TARGET_EFAULT;
4442     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4443         return -TARGET_EFAULT;
4444     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4445     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4446     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4447     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4448     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4449     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4450     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4451     unlock_user_struct(target_sd, target_addr, 0);
4452     return 0;
4453 }
4454 
4455 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4456                                                struct shmid_ds *host_sd)
4457 {
4458     struct target_shmid_ds *target_sd;
4459 
4460     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4461         return -TARGET_EFAULT;
4462     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4463         return -TARGET_EFAULT;
4464     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4465     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4466     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4467     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4468     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4469     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4470     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4471     unlock_user_struct(target_sd, target_addr, 1);
4472     return 0;
4473 }
4474 
4475 struct  target_shminfo {
4476     abi_ulong shmmax;
4477     abi_ulong shmmin;
4478     abi_ulong shmmni;
4479     abi_ulong shmseg;
4480     abi_ulong shmall;
4481 };
4482 
4483 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4484                                               struct shminfo *host_shminfo)
4485 {
4486     struct target_shminfo *target_shminfo;
4487     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4488         return -TARGET_EFAULT;
4489     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4490     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4491     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4492     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4493     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4494     unlock_user_struct(target_shminfo, target_addr, 1);
4495     return 0;
4496 }
4497 
4498 struct target_shm_info {
4499     int used_ids;
4500     abi_ulong shm_tot;
4501     abi_ulong shm_rss;
4502     abi_ulong shm_swp;
4503     abi_ulong swap_attempts;
4504     abi_ulong swap_successes;
4505 };
4506 
4507 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4508                                                struct shm_info *host_shm_info)
4509 {
4510     struct target_shm_info *target_shm_info;
4511     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4512         return -TARGET_EFAULT;
4513     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4514     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4515     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4516     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4517     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4518     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4519     unlock_user_struct(target_shm_info, target_addr, 1);
4520     return 0;
4521 }
4522 
4523 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4524 {
4525     struct shmid_ds dsarg;
4526     struct shminfo shminfo;
4527     struct shm_info shm_info;
4528     abi_long ret = -TARGET_EINVAL;
4529 
4530     cmd &= 0xff;
4531 
4532     switch(cmd) {
4533     case IPC_STAT:
4534     case IPC_SET:
4535     case SHM_STAT:
4536         if (target_to_host_shmid_ds(&dsarg, buf))
4537             return -TARGET_EFAULT;
4538         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4539         if (host_to_target_shmid_ds(buf, &dsarg))
4540             return -TARGET_EFAULT;
4541         break;
4542     case IPC_INFO:
4543         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4544         if (host_to_target_shminfo(buf, &shminfo))
4545             return -TARGET_EFAULT;
4546         break;
4547     case SHM_INFO:
4548         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4549         if (host_to_target_shm_info(buf, &shm_info))
4550             return -TARGET_EFAULT;
4551         break;
4552     case IPC_RMID:
4553     case SHM_LOCK:
4554     case SHM_UNLOCK:
4555         ret = get_errno(shmctl(shmid, cmd, NULL));
4556         break;
4557     }
4558 
4559     return ret;
4560 }
4561 
4562 #ifndef TARGET_FORCE_SHMLBA
4563 /* For most architectures, SHMLBA is the same as the page size;
4564  * some architectures have larger values, in which case they should
4565  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4566  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4567  * and defining its own value for SHMLBA.
4568  *
4569  * The kernel also permits SHMLBA to be set by the architecture to a
4570  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4571  * this means that addresses are rounded to the large size if
4572  * SHM_RND is set but addresses not aligned to that size are not rejected
4573  * as long as they are at least page-aligned. Since the only architecture
4574  * which uses this is ia64 this code doesn't provide for that oddity.
4575  */
4576 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4577 {
4578     return TARGET_PAGE_SIZE;
4579 }
4580 #endif
4581 
4582 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4583                                  int shmid, abi_ulong shmaddr, int shmflg)
4584 {
4585     abi_long raddr;
4586     void *host_raddr;
4587     struct shmid_ds shm_info;
4588     int i,ret;
4589     abi_ulong shmlba;
4590 
4591     /* find out the length of the shared memory segment */
4592     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4593     if (is_error(ret)) {
4594         /* can't get length, bail out */
4595         return ret;
4596     }
4597 
4598     shmlba = target_shmlba(cpu_env);
4599 
4600     if (shmaddr & (shmlba - 1)) {
4601         if (shmflg & SHM_RND) {
4602             shmaddr &= ~(shmlba - 1);
4603         } else {
4604             return -TARGET_EINVAL;
4605         }
4606     }
4607     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4608         return -TARGET_EINVAL;
4609     }
4610 
4611     mmap_lock();
4612 
4613     if (shmaddr)
4614         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4615     else {
4616         abi_ulong mmap_start;
4617 
4618         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4619         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4620 
4621         if (mmap_start == -1) {
4622             errno = ENOMEM;
4623             host_raddr = (void *)-1;
4624         } else
4625             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4626     }
4627 
4628     if (host_raddr == (void *)-1) {
4629         mmap_unlock();
4630         return get_errno((long)host_raddr);
4631     }
4632     raddr=h2g((unsigned long)host_raddr);
4633 
4634     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4635                    PAGE_VALID | PAGE_READ |
4636                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4637 
4638     for (i = 0; i < N_SHM_REGIONS; i++) {
4639         if (!shm_regions[i].in_use) {
4640             shm_regions[i].in_use = true;
4641             shm_regions[i].start = raddr;
4642             shm_regions[i].size = shm_info.shm_segsz;
4643             break;
4644         }
4645     }
4646 
4647     mmap_unlock();
4648     return raddr;
4649 
4650 }
4651 
4652 static inline abi_long do_shmdt(abi_ulong shmaddr)
4653 {
4654     int i;
4655     abi_long rv;
4656 
4657     mmap_lock();
4658 
4659     for (i = 0; i < N_SHM_REGIONS; ++i) {
4660         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4661             shm_regions[i].in_use = false;
4662             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4663             break;
4664         }
4665     }
4666     rv = get_errno(shmdt(g2h(shmaddr)));
4667 
4668     mmap_unlock();
4669 
4670     return rv;
4671 }
4672 
4673 #ifdef TARGET_NR_ipc
4674 /* ??? This only works with linear mappings.  */
4675 /* do_ipc() must return target values and target errnos. */
4676 static abi_long do_ipc(CPUArchState *cpu_env,
4677                        unsigned int call, abi_long first,
4678                        abi_long second, abi_long third,
4679                        abi_long ptr, abi_long fifth)
4680 {
4681     int version;
4682     abi_long ret = 0;
4683 
4684     version = call >> 16;
4685     call &= 0xffff;
4686 
4687     switch (call) {
4688     case IPCOP_semop:
4689         ret = do_semtimedop(first, ptr, second, 0, false);
4690         break;
4691     case IPCOP_semtimedop:
4692     /*
4693      * The s390 sys_ipc variant has only five parameters instead of six
4694      * (as for default variant) and the only difference is the handling of
4695      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4696      * to a struct timespec where the generic variant uses fifth parameter.
4697      */
4698 #if defined(TARGET_S390X)
4699         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4700 #else
4701         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4702 #endif
4703         break;
4704 
4705     case IPCOP_semget:
4706         ret = get_errno(semget(first, second, third));
4707         break;
4708 
4709     case IPCOP_semctl: {
4710         /* The semun argument to semctl is passed by value, so dereference the
4711          * ptr argument. */
4712         abi_ulong atptr;
4713         get_user_ual(atptr, ptr);
4714         ret = do_semctl(first, second, third, atptr);
4715         break;
4716     }
4717 
4718     case IPCOP_msgget:
4719         ret = get_errno(msgget(first, second));
4720         break;
4721 
4722     case IPCOP_msgsnd:
4723         ret = do_msgsnd(first, ptr, second, third);
4724         break;
4725 
4726     case IPCOP_msgctl:
4727         ret = do_msgctl(first, second, ptr);
4728         break;
4729 
4730     case IPCOP_msgrcv:
4731         switch (version) {
4732         case 0:
4733             {
4734                 struct target_ipc_kludge {
4735                     abi_long msgp;
4736                     abi_long msgtyp;
4737                 } *tmp;
4738 
4739                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4740                     ret = -TARGET_EFAULT;
4741                     break;
4742                 }
4743 
4744                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4745 
4746                 unlock_user_struct(tmp, ptr, 0);
4747                 break;
4748             }
4749         default:
4750             ret = do_msgrcv(first, ptr, second, fifth, third);
4751         }
4752         break;
4753 
4754     case IPCOP_shmat:
4755         switch (version) {
4756         default:
4757         {
4758             abi_ulong raddr;
4759             raddr = do_shmat(cpu_env, first, ptr, second);
4760             if (is_error(raddr))
4761                 return get_errno(raddr);
4762             if (put_user_ual(raddr, third))
4763                 return -TARGET_EFAULT;
4764             break;
4765         }
4766         case 1:
4767             ret = -TARGET_EINVAL;
4768             break;
4769         }
4770 	break;
4771     case IPCOP_shmdt:
4772         ret = do_shmdt(ptr);
4773 	break;
4774 
4775     case IPCOP_shmget:
4776 	/* IPC_* flag values are the same on all linux platforms */
4777 	ret = get_errno(shmget(first, second, third));
4778 	break;
4779 
4780 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4781     case IPCOP_shmctl:
4782         ret = do_shmctl(first, second, ptr);
4783         break;
4784     default:
4785         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4786                       call, version);
4787 	ret = -TARGET_ENOSYS;
4788 	break;
4789     }
4790     return ret;
4791 }
4792 #endif
4793 
4794 /* kernel structure types definitions */
4795 
4796 #define STRUCT(name, ...) STRUCT_ ## name,
4797 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4798 enum {
4799 #include "syscall_types.h"
4800 STRUCT_MAX
4801 };
4802 #undef STRUCT
4803 #undef STRUCT_SPECIAL
4804 
4805 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4806 #define STRUCT_SPECIAL(name)
4807 #include "syscall_types.h"
4808 #undef STRUCT
4809 #undef STRUCT_SPECIAL
4810 
4811 #define MAX_STRUCT_SIZE 4096
4812 
4813 #ifdef CONFIG_FIEMAP
4814 /* So fiemap access checks don't overflow on 32 bit systems.
4815  * This is very slightly smaller than the limit imposed by
4816  * the underlying kernel.
4817  */
4818 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4819                             / sizeof(struct fiemap_extent))
4820 
4821 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4822                                        int fd, int cmd, abi_long arg)
4823 {
4824     /* The parameter for this ioctl is a struct fiemap followed
4825      * by an array of struct fiemap_extent whose size is set
4826      * in fiemap->fm_extent_count. The array is filled in by the
4827      * ioctl.
4828      */
4829     int target_size_in, target_size_out;
4830     struct fiemap *fm;
4831     const argtype *arg_type = ie->arg_type;
4832     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4833     void *argptr, *p;
4834     abi_long ret;
4835     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4836     uint32_t outbufsz;
4837     int free_fm = 0;
4838 
4839     assert(arg_type[0] == TYPE_PTR);
4840     assert(ie->access == IOC_RW);
4841     arg_type++;
4842     target_size_in = thunk_type_size(arg_type, 0);
4843     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4844     if (!argptr) {
4845         return -TARGET_EFAULT;
4846     }
4847     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4848     unlock_user(argptr, arg, 0);
4849     fm = (struct fiemap *)buf_temp;
4850     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4851         return -TARGET_EINVAL;
4852     }
4853 
4854     outbufsz = sizeof (*fm) +
4855         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4856 
4857     if (outbufsz > MAX_STRUCT_SIZE) {
4858         /* We can't fit all the extents into the fixed size buffer.
4859          * Allocate one that is large enough and use it instead.
4860          */
4861         fm = g_try_malloc(outbufsz);
4862         if (!fm) {
4863             return -TARGET_ENOMEM;
4864         }
4865         memcpy(fm, buf_temp, sizeof(struct fiemap));
4866         free_fm = 1;
4867     }
4868     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4869     if (!is_error(ret)) {
4870         target_size_out = target_size_in;
4871         /* An extent_count of 0 means we were only counting the extents
4872          * so there are no structs to copy
4873          */
4874         if (fm->fm_extent_count != 0) {
4875             target_size_out += fm->fm_mapped_extents * extent_size;
4876         }
4877         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4878         if (!argptr) {
4879             ret = -TARGET_EFAULT;
4880         } else {
4881             /* Convert the struct fiemap */
4882             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4883             if (fm->fm_extent_count != 0) {
4884                 p = argptr + target_size_in;
4885                 /* ...and then all the struct fiemap_extents */
4886                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4887                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4888                                   THUNK_TARGET);
4889                     p += extent_size;
4890                 }
4891             }
4892             unlock_user(argptr, arg, target_size_out);
4893         }
4894     }
4895     if (free_fm) {
4896         g_free(fm);
4897     }
4898     return ret;
4899 }
4900 #endif
4901 
4902 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4903                                 int fd, int cmd, abi_long arg)
4904 {
4905     const argtype *arg_type = ie->arg_type;
4906     int target_size;
4907     void *argptr;
4908     int ret;
4909     struct ifconf *host_ifconf;
4910     uint32_t outbufsz;
4911     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4912     int target_ifreq_size;
4913     int nb_ifreq;
4914     int free_buf = 0;
4915     int i;
4916     int target_ifc_len;
4917     abi_long target_ifc_buf;
4918     int host_ifc_len;
4919     char *host_ifc_buf;
4920 
4921     assert(arg_type[0] == TYPE_PTR);
4922     assert(ie->access == IOC_RW);
4923 
4924     arg_type++;
4925     target_size = thunk_type_size(arg_type, 0);
4926 
4927     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4928     if (!argptr)
4929         return -TARGET_EFAULT;
4930     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4931     unlock_user(argptr, arg, 0);
4932 
4933     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4934     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4935     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4936 
4937     if (target_ifc_buf != 0) {
4938         target_ifc_len = host_ifconf->ifc_len;
4939         nb_ifreq = target_ifc_len / target_ifreq_size;
4940         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4941 
4942         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4943         if (outbufsz > MAX_STRUCT_SIZE) {
4944             /*
4945              * We can't fit all the extents into the fixed size buffer.
4946              * Allocate one that is large enough and use it instead.
4947              */
4948             host_ifconf = malloc(outbufsz);
4949             if (!host_ifconf) {
4950                 return -TARGET_ENOMEM;
4951             }
4952             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4953             free_buf = 1;
4954         }
4955         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4956 
4957         host_ifconf->ifc_len = host_ifc_len;
4958     } else {
4959       host_ifc_buf = NULL;
4960     }
4961     host_ifconf->ifc_buf = host_ifc_buf;
4962 
4963     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4964     if (!is_error(ret)) {
4965 	/* convert host ifc_len to target ifc_len */
4966 
4967         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4968         target_ifc_len = nb_ifreq * target_ifreq_size;
4969         host_ifconf->ifc_len = target_ifc_len;
4970 
4971 	/* restore target ifc_buf */
4972 
4973         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4974 
4975 	/* copy struct ifconf to target user */
4976 
4977         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4978         if (!argptr)
4979             return -TARGET_EFAULT;
4980         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4981         unlock_user(argptr, arg, target_size);
4982 
4983         if (target_ifc_buf != 0) {
4984             /* copy ifreq[] to target user */
4985             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4986             for (i = 0; i < nb_ifreq ; i++) {
4987                 thunk_convert(argptr + i * target_ifreq_size,
4988                               host_ifc_buf + i * sizeof(struct ifreq),
4989                               ifreq_arg_type, THUNK_TARGET);
4990             }
4991             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4992         }
4993     }
4994 
4995     if (free_buf) {
4996         free(host_ifconf);
4997     }
4998 
4999     return ret;
5000 }
5001 
5002 #if defined(CONFIG_USBFS)
5003 #if HOST_LONG_BITS > 64
5004 #error USBDEVFS thunks do not support >64 bit hosts yet.
5005 #endif
5006 struct live_urb {
5007     uint64_t target_urb_adr;
5008     uint64_t target_buf_adr;
5009     char *target_buf_ptr;
5010     struct usbdevfs_urb host_urb;
5011 };
5012 
5013 static GHashTable *usbdevfs_urb_hashtable(void)
5014 {
5015     static GHashTable *urb_hashtable;
5016 
5017     if (!urb_hashtable) {
5018         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5019     }
5020     return urb_hashtable;
5021 }
5022 
5023 static void urb_hashtable_insert(struct live_urb *urb)
5024 {
5025     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5026     g_hash_table_insert(urb_hashtable, urb, urb);
5027 }
5028 
5029 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5030 {
5031     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5032     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5033 }
5034 
5035 static void urb_hashtable_remove(struct live_urb *urb)
5036 {
5037     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5038     g_hash_table_remove(urb_hashtable, urb);
5039 }
5040 
5041 static abi_long
5042 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5043                           int fd, int cmd, abi_long arg)
5044 {
5045     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5046     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5047     struct live_urb *lurb;
5048     void *argptr;
5049     uint64_t hurb;
5050     int target_size;
5051     uintptr_t target_urb_adr;
5052     abi_long ret;
5053 
5054     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5055 
5056     memset(buf_temp, 0, sizeof(uint64_t));
5057     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5058     if (is_error(ret)) {
5059         return ret;
5060     }
5061 
5062     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5063     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5064     if (!lurb->target_urb_adr) {
5065         return -TARGET_EFAULT;
5066     }
5067     urb_hashtable_remove(lurb);
5068     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5069         lurb->host_urb.buffer_length);
5070     lurb->target_buf_ptr = NULL;
5071 
5072     /* restore the guest buffer pointer */
5073     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5074 
5075     /* update the guest urb struct */
5076     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5077     if (!argptr) {
5078         g_free(lurb);
5079         return -TARGET_EFAULT;
5080     }
5081     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5082     unlock_user(argptr, lurb->target_urb_adr, target_size);
5083 
5084     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5085     /* write back the urb handle */
5086     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5087     if (!argptr) {
5088         g_free(lurb);
5089         return -TARGET_EFAULT;
5090     }
5091 
5092     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5093     target_urb_adr = lurb->target_urb_adr;
5094     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5095     unlock_user(argptr, arg, target_size);
5096 
5097     g_free(lurb);
5098     return ret;
5099 }
5100 
5101 static abi_long
5102 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5103                              uint8_t *buf_temp __attribute__((unused)),
5104                              int fd, int cmd, abi_long arg)
5105 {
5106     struct live_urb *lurb;
5107 
5108     /* map target address back to host URB with metadata. */
5109     lurb = urb_hashtable_lookup(arg);
5110     if (!lurb) {
5111         return -TARGET_EFAULT;
5112     }
5113     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5114 }
5115 
5116 static abi_long
5117 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5118                             int fd, int cmd, abi_long arg)
5119 {
5120     const argtype *arg_type = ie->arg_type;
5121     int target_size;
5122     abi_long ret;
5123     void *argptr;
5124     int rw_dir;
5125     struct live_urb *lurb;
5126 
5127     /*
5128      * each submitted URB needs to map to a unique ID for the
5129      * kernel, and that unique ID needs to be a pointer to
5130      * host memory.  hence, we need to malloc for each URB.
5131      * isochronous transfers have a variable length struct.
5132      */
5133     arg_type++;
5134     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5135 
5136     /* construct host copy of urb and metadata */
5137     lurb = g_try_malloc0(sizeof(struct live_urb));
5138     if (!lurb) {
5139         return -TARGET_ENOMEM;
5140     }
5141 
5142     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5143     if (!argptr) {
5144         g_free(lurb);
5145         return -TARGET_EFAULT;
5146     }
5147     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5148     unlock_user(argptr, arg, 0);
5149 
5150     lurb->target_urb_adr = arg;
5151     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5152 
5153     /* buffer space used depends on endpoint type so lock the entire buffer */
5154     /* control type urbs should check the buffer contents for true direction */
5155     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5156     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5157         lurb->host_urb.buffer_length, 1);
5158     if (lurb->target_buf_ptr == NULL) {
5159         g_free(lurb);
5160         return -TARGET_EFAULT;
5161     }
5162 
5163     /* update buffer pointer in host copy */
5164     lurb->host_urb.buffer = lurb->target_buf_ptr;
5165 
5166     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5167     if (is_error(ret)) {
5168         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5169         g_free(lurb);
5170     } else {
5171         urb_hashtable_insert(lurb);
5172     }
5173 
5174     return ret;
5175 }
5176 #endif /* CONFIG_USBFS */
5177 
5178 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5179                             int cmd, abi_long arg)
5180 {
5181     void *argptr;
5182     struct dm_ioctl *host_dm;
5183     abi_long guest_data;
5184     uint32_t guest_data_size;
5185     int target_size;
5186     const argtype *arg_type = ie->arg_type;
5187     abi_long ret;
5188     void *big_buf = NULL;
5189     char *host_data;
5190 
5191     arg_type++;
5192     target_size = thunk_type_size(arg_type, 0);
5193     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5194     if (!argptr) {
5195         ret = -TARGET_EFAULT;
5196         goto out;
5197     }
5198     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5199     unlock_user(argptr, arg, 0);
5200 
5201     /* buf_temp is too small, so fetch things into a bigger buffer */
5202     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5203     memcpy(big_buf, buf_temp, target_size);
5204     buf_temp = big_buf;
5205     host_dm = big_buf;
5206 
5207     guest_data = arg + host_dm->data_start;
5208     if ((guest_data - arg) < 0) {
5209         ret = -TARGET_EINVAL;
5210         goto out;
5211     }
5212     guest_data_size = host_dm->data_size - host_dm->data_start;
5213     host_data = (char*)host_dm + host_dm->data_start;
5214 
5215     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5216     if (!argptr) {
5217         ret = -TARGET_EFAULT;
5218         goto out;
5219     }
5220 
5221     switch (ie->host_cmd) {
5222     case DM_REMOVE_ALL:
5223     case DM_LIST_DEVICES:
5224     case DM_DEV_CREATE:
5225     case DM_DEV_REMOVE:
5226     case DM_DEV_SUSPEND:
5227     case DM_DEV_STATUS:
5228     case DM_DEV_WAIT:
5229     case DM_TABLE_STATUS:
5230     case DM_TABLE_CLEAR:
5231     case DM_TABLE_DEPS:
5232     case DM_LIST_VERSIONS:
5233         /* no input data */
5234         break;
5235     case DM_DEV_RENAME:
5236     case DM_DEV_SET_GEOMETRY:
5237         /* data contains only strings */
5238         memcpy(host_data, argptr, guest_data_size);
5239         break;
5240     case DM_TARGET_MSG:
5241         memcpy(host_data, argptr, guest_data_size);
5242         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5243         break;
5244     case DM_TABLE_LOAD:
5245     {
5246         void *gspec = argptr;
5247         void *cur_data = host_data;
5248         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5249         int spec_size = thunk_type_size(arg_type, 0);
5250         int i;
5251 
5252         for (i = 0; i < host_dm->target_count; i++) {
5253             struct dm_target_spec *spec = cur_data;
5254             uint32_t next;
5255             int slen;
5256 
5257             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5258             slen = strlen((char*)gspec + spec_size) + 1;
5259             next = spec->next;
5260             spec->next = sizeof(*spec) + slen;
5261             strcpy((char*)&spec[1], gspec + spec_size);
5262             gspec += next;
5263             cur_data += spec->next;
5264         }
5265         break;
5266     }
5267     default:
5268         ret = -TARGET_EINVAL;
5269         unlock_user(argptr, guest_data, 0);
5270         goto out;
5271     }
5272     unlock_user(argptr, guest_data, 0);
5273 
5274     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5275     if (!is_error(ret)) {
5276         guest_data = arg + host_dm->data_start;
5277         guest_data_size = host_dm->data_size - host_dm->data_start;
5278         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5279         switch (ie->host_cmd) {
5280         case DM_REMOVE_ALL:
5281         case DM_DEV_CREATE:
5282         case DM_DEV_REMOVE:
5283         case DM_DEV_RENAME:
5284         case DM_DEV_SUSPEND:
5285         case DM_DEV_STATUS:
5286         case DM_TABLE_LOAD:
5287         case DM_TABLE_CLEAR:
5288         case DM_TARGET_MSG:
5289         case DM_DEV_SET_GEOMETRY:
5290             /* no return data */
5291             break;
5292         case DM_LIST_DEVICES:
5293         {
5294             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5295             uint32_t remaining_data = guest_data_size;
5296             void *cur_data = argptr;
5297             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5298             int nl_size = 12; /* can't use thunk_size due to alignment */
5299 
5300             while (1) {
5301                 uint32_t next = nl->next;
5302                 if (next) {
5303                     nl->next = nl_size + (strlen(nl->name) + 1);
5304                 }
5305                 if (remaining_data < nl->next) {
5306                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5307                     break;
5308                 }
5309                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5310                 strcpy(cur_data + nl_size, nl->name);
5311                 cur_data += nl->next;
5312                 remaining_data -= nl->next;
5313                 if (!next) {
5314                     break;
5315                 }
5316                 nl = (void*)nl + next;
5317             }
5318             break;
5319         }
5320         case DM_DEV_WAIT:
5321         case DM_TABLE_STATUS:
5322         {
5323             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5324             void *cur_data = argptr;
5325             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5326             int spec_size = thunk_type_size(arg_type, 0);
5327             int i;
5328 
5329             for (i = 0; i < host_dm->target_count; i++) {
5330                 uint32_t next = spec->next;
5331                 int slen = strlen((char*)&spec[1]) + 1;
5332                 spec->next = (cur_data - argptr) + spec_size + slen;
5333                 if (guest_data_size < spec->next) {
5334                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5335                     break;
5336                 }
5337                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5338                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5339                 cur_data = argptr + spec->next;
5340                 spec = (void*)host_dm + host_dm->data_start + next;
5341             }
5342             break;
5343         }
5344         case DM_TABLE_DEPS:
5345         {
5346             void *hdata = (void*)host_dm + host_dm->data_start;
5347             int count = *(uint32_t*)hdata;
5348             uint64_t *hdev = hdata + 8;
5349             uint64_t *gdev = argptr + 8;
5350             int i;
5351 
5352             *(uint32_t*)argptr = tswap32(count);
5353             for (i = 0; i < count; i++) {
5354                 *gdev = tswap64(*hdev);
5355                 gdev++;
5356                 hdev++;
5357             }
5358             break;
5359         }
5360         case DM_LIST_VERSIONS:
5361         {
5362             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5363             uint32_t remaining_data = guest_data_size;
5364             void *cur_data = argptr;
5365             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5366             int vers_size = thunk_type_size(arg_type, 0);
5367 
5368             while (1) {
5369                 uint32_t next = vers->next;
5370                 if (next) {
5371                     vers->next = vers_size + (strlen(vers->name) + 1);
5372                 }
5373                 if (remaining_data < vers->next) {
5374                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5375                     break;
5376                 }
5377                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5378                 strcpy(cur_data + vers_size, vers->name);
5379                 cur_data += vers->next;
5380                 remaining_data -= vers->next;
5381                 if (!next) {
5382                     break;
5383                 }
5384                 vers = (void*)vers + next;
5385             }
5386             break;
5387         }
5388         default:
5389             unlock_user(argptr, guest_data, 0);
5390             ret = -TARGET_EINVAL;
5391             goto out;
5392         }
5393         unlock_user(argptr, guest_data, guest_data_size);
5394 
5395         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5396         if (!argptr) {
5397             ret = -TARGET_EFAULT;
5398             goto out;
5399         }
5400         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5401         unlock_user(argptr, arg, target_size);
5402     }
5403 out:
5404     g_free(big_buf);
5405     return ret;
5406 }
5407 
5408 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5409                                int cmd, abi_long arg)
5410 {
5411     void *argptr;
5412     int target_size;
5413     const argtype *arg_type = ie->arg_type;
5414     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5415     abi_long ret;
5416 
5417     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5418     struct blkpg_partition host_part;
5419 
5420     /* Read and convert blkpg */
5421     arg_type++;
5422     target_size = thunk_type_size(arg_type, 0);
5423     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5424     if (!argptr) {
5425         ret = -TARGET_EFAULT;
5426         goto out;
5427     }
5428     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5429     unlock_user(argptr, arg, 0);
5430 
5431     switch (host_blkpg->op) {
5432     case BLKPG_ADD_PARTITION:
5433     case BLKPG_DEL_PARTITION:
5434         /* payload is struct blkpg_partition */
5435         break;
5436     default:
5437         /* Unknown opcode */
5438         ret = -TARGET_EINVAL;
5439         goto out;
5440     }
5441 
5442     /* Read and convert blkpg->data */
5443     arg = (abi_long)(uintptr_t)host_blkpg->data;
5444     target_size = thunk_type_size(part_arg_type, 0);
5445     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5446     if (!argptr) {
5447         ret = -TARGET_EFAULT;
5448         goto out;
5449     }
5450     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5451     unlock_user(argptr, arg, 0);
5452 
5453     /* Swizzle the data pointer to our local copy and call! */
5454     host_blkpg->data = &host_part;
5455     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5456 
5457 out:
5458     return ret;
5459 }
5460 
5461 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5462                                 int fd, int cmd, abi_long arg)
5463 {
5464     const argtype *arg_type = ie->arg_type;
5465     const StructEntry *se;
5466     const argtype *field_types;
5467     const int *dst_offsets, *src_offsets;
5468     int target_size;
5469     void *argptr;
5470     abi_ulong *target_rt_dev_ptr = NULL;
5471     unsigned long *host_rt_dev_ptr = NULL;
5472     abi_long ret;
5473     int i;
5474 
5475     assert(ie->access == IOC_W);
5476     assert(*arg_type == TYPE_PTR);
5477     arg_type++;
5478     assert(*arg_type == TYPE_STRUCT);
5479     target_size = thunk_type_size(arg_type, 0);
5480     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5481     if (!argptr) {
5482         return -TARGET_EFAULT;
5483     }
5484     arg_type++;
5485     assert(*arg_type == (int)STRUCT_rtentry);
5486     se = struct_entries + *arg_type++;
5487     assert(se->convert[0] == NULL);
5488     /* convert struct here to be able to catch rt_dev string */
5489     field_types = se->field_types;
5490     dst_offsets = se->field_offsets[THUNK_HOST];
5491     src_offsets = se->field_offsets[THUNK_TARGET];
5492     for (i = 0; i < se->nb_fields; i++) {
5493         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5494             assert(*field_types == TYPE_PTRVOID);
5495             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5496             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5497             if (*target_rt_dev_ptr != 0) {
5498                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5499                                                   tswapal(*target_rt_dev_ptr));
5500                 if (!*host_rt_dev_ptr) {
5501                     unlock_user(argptr, arg, 0);
5502                     return -TARGET_EFAULT;
5503                 }
5504             } else {
5505                 *host_rt_dev_ptr = 0;
5506             }
5507             field_types++;
5508             continue;
5509         }
5510         field_types = thunk_convert(buf_temp + dst_offsets[i],
5511                                     argptr + src_offsets[i],
5512                                     field_types, THUNK_HOST);
5513     }
5514     unlock_user(argptr, arg, 0);
5515 
5516     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5517 
5518     assert(host_rt_dev_ptr != NULL);
5519     assert(target_rt_dev_ptr != NULL);
5520     if (*host_rt_dev_ptr != 0) {
5521         unlock_user((void *)*host_rt_dev_ptr,
5522                     *target_rt_dev_ptr, 0);
5523     }
5524     return ret;
5525 }
5526 
5527 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5528                                      int fd, int cmd, abi_long arg)
5529 {
5530     int sig = target_to_host_signal(arg);
5531     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5532 }
5533 
5534 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5535                                     int fd, int cmd, abi_long arg)
5536 {
5537     struct timeval tv;
5538     abi_long ret;
5539 
5540     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5541     if (is_error(ret)) {
5542         return ret;
5543     }
5544 
5545     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5546         if (copy_to_user_timeval(arg, &tv)) {
5547             return -TARGET_EFAULT;
5548         }
5549     } else {
5550         if (copy_to_user_timeval64(arg, &tv)) {
5551             return -TARGET_EFAULT;
5552         }
5553     }
5554 
5555     return ret;
5556 }
5557 
5558 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5559                                       int fd, int cmd, abi_long arg)
5560 {
5561     struct timespec ts;
5562     abi_long ret;
5563 
5564     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5565     if (is_error(ret)) {
5566         return ret;
5567     }
5568 
5569     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5570         if (host_to_target_timespec(arg, &ts)) {
5571             return -TARGET_EFAULT;
5572         }
5573     } else{
5574         if (host_to_target_timespec64(arg, &ts)) {
5575             return -TARGET_EFAULT;
5576         }
5577     }
5578 
5579     return ret;
5580 }
5581 
5582 #ifdef TIOCGPTPEER
5583 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5584                                      int fd, int cmd, abi_long arg)
5585 {
5586     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5587     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5588 }
5589 #endif
5590 
5591 #ifdef HAVE_DRM_H
5592 
5593 static void unlock_drm_version(struct drm_version *host_ver,
5594                                struct target_drm_version *target_ver,
5595                                bool copy)
5596 {
5597     unlock_user(host_ver->name, target_ver->name,
5598                                 copy ? host_ver->name_len : 0);
5599     unlock_user(host_ver->date, target_ver->date,
5600                                 copy ? host_ver->date_len : 0);
5601     unlock_user(host_ver->desc, target_ver->desc,
5602                                 copy ? host_ver->desc_len : 0);
5603 }
5604 
5605 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5606                                           struct target_drm_version *target_ver)
5607 {
5608     memset(host_ver, 0, sizeof(*host_ver));
5609 
5610     __get_user(host_ver->name_len, &target_ver->name_len);
5611     if (host_ver->name_len) {
5612         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5613                                    target_ver->name_len, 0);
5614         if (!host_ver->name) {
5615             return -EFAULT;
5616         }
5617     }
5618 
5619     __get_user(host_ver->date_len, &target_ver->date_len);
5620     if (host_ver->date_len) {
5621         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5622                                    target_ver->date_len, 0);
5623         if (!host_ver->date) {
5624             goto err;
5625         }
5626     }
5627 
5628     __get_user(host_ver->desc_len, &target_ver->desc_len);
5629     if (host_ver->desc_len) {
5630         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5631                                    target_ver->desc_len, 0);
5632         if (!host_ver->desc) {
5633             goto err;
5634         }
5635     }
5636 
5637     return 0;
5638 err:
5639     unlock_drm_version(host_ver, target_ver, false);
5640     return -EFAULT;
5641 }
5642 
5643 static inline void host_to_target_drmversion(
5644                                           struct target_drm_version *target_ver,
5645                                           struct drm_version *host_ver)
5646 {
5647     __put_user(host_ver->version_major, &target_ver->version_major);
5648     __put_user(host_ver->version_minor, &target_ver->version_minor);
5649     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5650     __put_user(host_ver->name_len, &target_ver->name_len);
5651     __put_user(host_ver->date_len, &target_ver->date_len);
5652     __put_user(host_ver->desc_len, &target_ver->desc_len);
5653     unlock_drm_version(host_ver, target_ver, true);
5654 }
5655 
5656 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5657                              int fd, int cmd, abi_long arg)
5658 {
5659     struct drm_version *ver;
5660     struct target_drm_version *target_ver;
5661     abi_long ret;
5662 
5663     switch (ie->host_cmd) {
5664     case DRM_IOCTL_VERSION:
5665         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5666             return -TARGET_EFAULT;
5667         }
5668         ver = (struct drm_version *)buf_temp;
5669         ret = target_to_host_drmversion(ver, target_ver);
5670         if (!is_error(ret)) {
5671             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5672             if (is_error(ret)) {
5673                 unlock_drm_version(ver, target_ver, false);
5674             } else {
5675                 host_to_target_drmversion(target_ver, ver);
5676             }
5677         }
5678         unlock_user_struct(target_ver, arg, 0);
5679         return ret;
5680     }
5681     return -TARGET_ENOSYS;
5682 }
5683 
5684 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5685                                            struct drm_i915_getparam *gparam,
5686                                            int fd, abi_long arg)
5687 {
5688     abi_long ret;
5689     int value;
5690     struct target_drm_i915_getparam *target_gparam;
5691 
5692     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5693         return -TARGET_EFAULT;
5694     }
5695 
5696     __get_user(gparam->param, &target_gparam->param);
5697     gparam->value = &value;
5698     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5699     put_user_s32(value, target_gparam->value);
5700 
5701     unlock_user_struct(target_gparam, arg, 0);
5702     return ret;
5703 }
5704 
5705 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5706                                   int fd, int cmd, abi_long arg)
5707 {
5708     switch (ie->host_cmd) {
5709     case DRM_IOCTL_I915_GETPARAM:
5710         return do_ioctl_drm_i915_getparam(ie,
5711                                           (struct drm_i915_getparam *)buf_temp,
5712                                           fd, arg);
5713     default:
5714         return -TARGET_ENOSYS;
5715     }
5716 }
5717 
5718 #endif
5719 
5720 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5721                                         int fd, int cmd, abi_long arg)
5722 {
5723     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5724     struct tun_filter *target_filter;
5725     char *target_addr;
5726 
5727     assert(ie->access == IOC_W);
5728 
5729     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5730     if (!target_filter) {
5731         return -TARGET_EFAULT;
5732     }
5733     filter->flags = tswap16(target_filter->flags);
5734     filter->count = tswap16(target_filter->count);
5735     unlock_user(target_filter, arg, 0);
5736 
5737     if (filter->count) {
5738         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5739             MAX_STRUCT_SIZE) {
5740             return -TARGET_EFAULT;
5741         }
5742 
5743         target_addr = lock_user(VERIFY_READ,
5744                                 arg + offsetof(struct tun_filter, addr),
5745                                 filter->count * ETH_ALEN, 1);
5746         if (!target_addr) {
5747             return -TARGET_EFAULT;
5748         }
5749         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5750         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5751     }
5752 
5753     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5754 }
5755 
5756 IOCTLEntry ioctl_entries[] = {
5757 #define IOCTL(cmd, access, ...) \
5758     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5759 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5760     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5761 #define IOCTL_IGNORE(cmd) \
5762     { TARGET_ ## cmd, 0, #cmd },
5763 #include "ioctls.h"
5764     { 0, 0, },
5765 };
5766 
5767 /* ??? Implement proper locking for ioctls.  */
5768 /* do_ioctl() Must return target values and target errnos. */
5769 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5770 {
5771     const IOCTLEntry *ie;
5772     const argtype *arg_type;
5773     abi_long ret;
5774     uint8_t buf_temp[MAX_STRUCT_SIZE];
5775     int target_size;
5776     void *argptr;
5777 
5778     ie = ioctl_entries;
5779     for(;;) {
5780         if (ie->target_cmd == 0) {
5781             qemu_log_mask(
5782                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5783             return -TARGET_ENOSYS;
5784         }
5785         if (ie->target_cmd == cmd)
5786             break;
5787         ie++;
5788     }
5789     arg_type = ie->arg_type;
5790     if (ie->do_ioctl) {
5791         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5792     } else if (!ie->host_cmd) {
5793         /* Some architectures define BSD ioctls in their headers
5794            that are not implemented in Linux.  */
5795         return -TARGET_ENOSYS;
5796     }
5797 
5798     switch(arg_type[0]) {
5799     case TYPE_NULL:
5800         /* no argument */
5801         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5802         break;
5803     case TYPE_PTRVOID:
5804     case TYPE_INT:
5805     case TYPE_LONG:
5806     case TYPE_ULONG:
5807         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5808         break;
5809     case TYPE_PTR:
5810         arg_type++;
5811         target_size = thunk_type_size(arg_type, 0);
5812         switch(ie->access) {
5813         case IOC_R:
5814             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5815             if (!is_error(ret)) {
5816                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5817                 if (!argptr)
5818                     return -TARGET_EFAULT;
5819                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5820                 unlock_user(argptr, arg, target_size);
5821             }
5822             break;
5823         case IOC_W:
5824             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5825             if (!argptr)
5826                 return -TARGET_EFAULT;
5827             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5828             unlock_user(argptr, arg, 0);
5829             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5830             break;
5831         default:
5832         case IOC_RW:
5833             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5834             if (!argptr)
5835                 return -TARGET_EFAULT;
5836             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5837             unlock_user(argptr, arg, 0);
5838             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5839             if (!is_error(ret)) {
5840                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5841                 if (!argptr)
5842                     return -TARGET_EFAULT;
5843                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5844                 unlock_user(argptr, arg, target_size);
5845             }
5846             break;
5847         }
5848         break;
5849     default:
5850         qemu_log_mask(LOG_UNIMP,
5851                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5852                       (long)cmd, arg_type[0]);
5853         ret = -TARGET_ENOSYS;
5854         break;
5855     }
5856     return ret;
5857 }
5858 
5859 static const bitmask_transtbl iflag_tbl[] = {
5860         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5861         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5862         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5863         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5864         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5865         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5866         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5867         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5868         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5869         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5870         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5871         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5872         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5873         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5874         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5875         { 0, 0, 0, 0 }
5876 };
5877 
5878 static const bitmask_transtbl oflag_tbl[] = {
5879 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5880 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5881 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5882 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5883 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5884 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5885 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5886 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5887 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5888 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5889 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5890 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5891 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5892 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5893 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5894 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5895 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5896 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5897 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5898 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5899 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5900 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5901 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5902 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5903 	{ 0, 0, 0, 0 }
5904 };
5905 
5906 static const bitmask_transtbl cflag_tbl[] = {
5907 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5908 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5909 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5910 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5911 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5912 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5913 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5914 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5915 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5916 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5917 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5918 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5919 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5920 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5921 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5922 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5923 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5924 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5925 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5926 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5927 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5928 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5929 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5930 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5931 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5932 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5933 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5934 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5935 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5936 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5937 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5938 	{ 0, 0, 0, 0 }
5939 };
5940 
5941 static const bitmask_transtbl lflag_tbl[] = {
5942   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5943   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5944   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5945   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5946   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5947   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5948   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5949   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5950   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5951   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5952   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5953   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5954   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5955   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5956   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5957   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5958   { 0, 0, 0, 0 }
5959 };
5960 
5961 static void target_to_host_termios (void *dst, const void *src)
5962 {
5963     struct host_termios *host = dst;
5964     const struct target_termios *target = src;
5965 
5966     host->c_iflag =
5967         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5968     host->c_oflag =
5969         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5970     host->c_cflag =
5971         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5972     host->c_lflag =
5973         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5974     host->c_line = target->c_line;
5975 
5976     memset(host->c_cc, 0, sizeof(host->c_cc));
5977     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5978     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5979     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5980     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5981     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5982     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5983     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5984     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5985     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5986     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5987     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5988     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5989     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5990     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5991     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5992     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5993     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5994 }
5995 
5996 static void host_to_target_termios (void *dst, const void *src)
5997 {
5998     struct target_termios *target = dst;
5999     const struct host_termios *host = src;
6000 
6001     target->c_iflag =
6002         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
6003     target->c_oflag =
6004         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
6005     target->c_cflag =
6006         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6007     target->c_lflag =
6008         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6009     target->c_line = host->c_line;
6010 
6011     memset(target->c_cc, 0, sizeof(target->c_cc));
6012     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6013     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6014     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6015     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6016     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6017     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6018     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6019     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6020     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6021     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6022     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6023     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6024     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6025     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6026     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6027     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6028     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6029 }
6030 
6031 static const StructEntry struct_termios_def = {
6032     .convert = { host_to_target_termios, target_to_host_termios },
6033     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6034     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6035     .print = print_termios,
6036 };
6037 
6038 static bitmask_transtbl mmap_flags_tbl[] = {
6039     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6040     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6041     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6042     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6043       MAP_ANONYMOUS, MAP_ANONYMOUS },
6044     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6045       MAP_GROWSDOWN, MAP_GROWSDOWN },
6046     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6047       MAP_DENYWRITE, MAP_DENYWRITE },
6048     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6049       MAP_EXECUTABLE, MAP_EXECUTABLE },
6050     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6051     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6052       MAP_NORESERVE, MAP_NORESERVE },
6053     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6054     /* MAP_STACK had been ignored by the kernel for quite some time.
6055        Recognize it for the target insofar as we do not want to pass
6056        it through to the host.  */
6057     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6058     { 0, 0, 0, 0 }
6059 };
6060 
6061 /*
6062  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6063  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6064  */
6065 #if defined(TARGET_I386)
6066 
6067 /* NOTE: there is really one LDT for all the threads */
6068 static uint8_t *ldt_table;
6069 
6070 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6071 {
6072     int size;
6073     void *p;
6074 
6075     if (!ldt_table)
6076         return 0;
6077     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6078     if (size > bytecount)
6079         size = bytecount;
6080     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6081     if (!p)
6082         return -TARGET_EFAULT;
6083     /* ??? Should this by byteswapped?  */
6084     memcpy(p, ldt_table, size);
6085     unlock_user(p, ptr, size);
6086     return size;
6087 }
6088 
6089 /* XXX: add locking support */
6090 static abi_long write_ldt(CPUX86State *env,
6091                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6092 {
6093     struct target_modify_ldt_ldt_s ldt_info;
6094     struct target_modify_ldt_ldt_s *target_ldt_info;
6095     int seg_32bit, contents, read_exec_only, limit_in_pages;
6096     int seg_not_present, useable, lm;
6097     uint32_t *lp, entry_1, entry_2;
6098 
6099     if (bytecount != sizeof(ldt_info))
6100         return -TARGET_EINVAL;
6101     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6102         return -TARGET_EFAULT;
6103     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6104     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6105     ldt_info.limit = tswap32(target_ldt_info->limit);
6106     ldt_info.flags = tswap32(target_ldt_info->flags);
6107     unlock_user_struct(target_ldt_info, ptr, 0);
6108 
6109     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6110         return -TARGET_EINVAL;
6111     seg_32bit = ldt_info.flags & 1;
6112     contents = (ldt_info.flags >> 1) & 3;
6113     read_exec_only = (ldt_info.flags >> 3) & 1;
6114     limit_in_pages = (ldt_info.flags >> 4) & 1;
6115     seg_not_present = (ldt_info.flags >> 5) & 1;
6116     useable = (ldt_info.flags >> 6) & 1;
6117 #ifdef TARGET_ABI32
6118     lm = 0;
6119 #else
6120     lm = (ldt_info.flags >> 7) & 1;
6121 #endif
6122     if (contents == 3) {
6123         if (oldmode)
6124             return -TARGET_EINVAL;
6125         if (seg_not_present == 0)
6126             return -TARGET_EINVAL;
6127     }
6128     /* allocate the LDT */
6129     if (!ldt_table) {
6130         env->ldt.base = target_mmap(0,
6131                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6132                                     PROT_READ|PROT_WRITE,
6133                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6134         if (env->ldt.base == -1)
6135             return -TARGET_ENOMEM;
6136         memset(g2h(env->ldt.base), 0,
6137                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6138         env->ldt.limit = 0xffff;
6139         ldt_table = g2h(env->ldt.base);
6140     }
6141 
6142     /* NOTE: same code as Linux kernel */
6143     /* Allow LDTs to be cleared by the user. */
6144     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6145         if (oldmode ||
6146             (contents == 0		&&
6147              read_exec_only == 1	&&
6148              seg_32bit == 0		&&
6149              limit_in_pages == 0	&&
6150              seg_not_present == 1	&&
6151              useable == 0 )) {
6152             entry_1 = 0;
6153             entry_2 = 0;
6154             goto install;
6155         }
6156     }
6157 
6158     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6159         (ldt_info.limit & 0x0ffff);
6160     entry_2 = (ldt_info.base_addr & 0xff000000) |
6161         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6162         (ldt_info.limit & 0xf0000) |
6163         ((read_exec_only ^ 1) << 9) |
6164         (contents << 10) |
6165         ((seg_not_present ^ 1) << 15) |
6166         (seg_32bit << 22) |
6167         (limit_in_pages << 23) |
6168         (lm << 21) |
6169         0x7000;
6170     if (!oldmode)
6171         entry_2 |= (useable << 20);
6172 
6173     /* Install the new entry ...  */
6174 install:
6175     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6176     lp[0] = tswap32(entry_1);
6177     lp[1] = tswap32(entry_2);
6178     return 0;
6179 }
6180 
6181 /* specific and weird i386 syscalls */
6182 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6183                               unsigned long bytecount)
6184 {
6185     abi_long ret;
6186 
6187     switch (func) {
6188     case 0:
6189         ret = read_ldt(ptr, bytecount);
6190         break;
6191     case 1:
6192         ret = write_ldt(env, ptr, bytecount, 1);
6193         break;
6194     case 0x11:
6195         ret = write_ldt(env, ptr, bytecount, 0);
6196         break;
6197     default:
6198         ret = -TARGET_ENOSYS;
6199         break;
6200     }
6201     return ret;
6202 }
6203 
6204 #if defined(TARGET_ABI32)
6205 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6206 {
6207     uint64_t *gdt_table = g2h(env->gdt.base);
6208     struct target_modify_ldt_ldt_s ldt_info;
6209     struct target_modify_ldt_ldt_s *target_ldt_info;
6210     int seg_32bit, contents, read_exec_only, limit_in_pages;
6211     int seg_not_present, useable, lm;
6212     uint32_t *lp, entry_1, entry_2;
6213     int i;
6214 
6215     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6216     if (!target_ldt_info)
6217         return -TARGET_EFAULT;
6218     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6219     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6220     ldt_info.limit = tswap32(target_ldt_info->limit);
6221     ldt_info.flags = tswap32(target_ldt_info->flags);
6222     if (ldt_info.entry_number == -1) {
6223         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6224             if (gdt_table[i] == 0) {
6225                 ldt_info.entry_number = i;
6226                 target_ldt_info->entry_number = tswap32(i);
6227                 break;
6228             }
6229         }
6230     }
6231     unlock_user_struct(target_ldt_info, ptr, 1);
6232 
6233     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6234         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6235            return -TARGET_EINVAL;
6236     seg_32bit = ldt_info.flags & 1;
6237     contents = (ldt_info.flags >> 1) & 3;
6238     read_exec_only = (ldt_info.flags >> 3) & 1;
6239     limit_in_pages = (ldt_info.flags >> 4) & 1;
6240     seg_not_present = (ldt_info.flags >> 5) & 1;
6241     useable = (ldt_info.flags >> 6) & 1;
6242 #ifdef TARGET_ABI32
6243     lm = 0;
6244 #else
6245     lm = (ldt_info.flags >> 7) & 1;
6246 #endif
6247 
6248     if (contents == 3) {
6249         if (seg_not_present == 0)
6250             return -TARGET_EINVAL;
6251     }
6252 
6253     /* NOTE: same code as Linux kernel */
6254     /* Allow LDTs to be cleared by the user. */
6255     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6256         if ((contents == 0             &&
6257              read_exec_only == 1       &&
6258              seg_32bit == 0            &&
6259              limit_in_pages == 0       &&
6260              seg_not_present == 1      &&
6261              useable == 0 )) {
6262             entry_1 = 0;
6263             entry_2 = 0;
6264             goto install;
6265         }
6266     }
6267 
6268     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6269         (ldt_info.limit & 0x0ffff);
6270     entry_2 = (ldt_info.base_addr & 0xff000000) |
6271         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6272         (ldt_info.limit & 0xf0000) |
6273         ((read_exec_only ^ 1) << 9) |
6274         (contents << 10) |
6275         ((seg_not_present ^ 1) << 15) |
6276         (seg_32bit << 22) |
6277         (limit_in_pages << 23) |
6278         (useable << 20) |
6279         (lm << 21) |
6280         0x7000;
6281 
6282     /* Install the new entry ...  */
6283 install:
6284     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6285     lp[0] = tswap32(entry_1);
6286     lp[1] = tswap32(entry_2);
6287     return 0;
6288 }
6289 
6290 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6291 {
6292     struct target_modify_ldt_ldt_s *target_ldt_info;
6293     uint64_t *gdt_table = g2h(env->gdt.base);
6294     uint32_t base_addr, limit, flags;
6295     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6296     int seg_not_present, useable, lm;
6297     uint32_t *lp, entry_1, entry_2;
6298 
6299     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6300     if (!target_ldt_info)
6301         return -TARGET_EFAULT;
6302     idx = tswap32(target_ldt_info->entry_number);
6303     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6304         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6305         unlock_user_struct(target_ldt_info, ptr, 1);
6306         return -TARGET_EINVAL;
6307     }
6308     lp = (uint32_t *)(gdt_table + idx);
6309     entry_1 = tswap32(lp[0]);
6310     entry_2 = tswap32(lp[1]);
6311 
6312     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6313     contents = (entry_2 >> 10) & 3;
6314     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6315     seg_32bit = (entry_2 >> 22) & 1;
6316     limit_in_pages = (entry_2 >> 23) & 1;
6317     useable = (entry_2 >> 20) & 1;
6318 #ifdef TARGET_ABI32
6319     lm = 0;
6320 #else
6321     lm = (entry_2 >> 21) & 1;
6322 #endif
6323     flags = (seg_32bit << 0) | (contents << 1) |
6324         (read_exec_only << 3) | (limit_in_pages << 4) |
6325         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6326     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6327     base_addr = (entry_1 >> 16) |
6328         (entry_2 & 0xff000000) |
6329         ((entry_2 & 0xff) << 16);
6330     target_ldt_info->base_addr = tswapal(base_addr);
6331     target_ldt_info->limit = tswap32(limit);
6332     target_ldt_info->flags = tswap32(flags);
6333     unlock_user_struct(target_ldt_info, ptr, 1);
6334     return 0;
6335 }
6336 
6337 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6338 {
6339     return -TARGET_ENOSYS;
6340 }
6341 #else
6342 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6343 {
6344     abi_long ret = 0;
6345     abi_ulong val;
6346     int idx;
6347 
6348     switch(code) {
6349     case TARGET_ARCH_SET_GS:
6350     case TARGET_ARCH_SET_FS:
6351         if (code == TARGET_ARCH_SET_GS)
6352             idx = R_GS;
6353         else
6354             idx = R_FS;
6355         cpu_x86_load_seg(env, idx, 0);
6356         env->segs[idx].base = addr;
6357         break;
6358     case TARGET_ARCH_GET_GS:
6359     case TARGET_ARCH_GET_FS:
6360         if (code == TARGET_ARCH_GET_GS)
6361             idx = R_GS;
6362         else
6363             idx = R_FS;
6364         val = env->segs[idx].base;
6365         if (put_user(val, addr, abi_ulong))
6366             ret = -TARGET_EFAULT;
6367         break;
6368     default:
6369         ret = -TARGET_EINVAL;
6370         break;
6371     }
6372     return ret;
6373 }
6374 #endif /* defined(TARGET_ABI32 */
6375 
6376 #endif /* defined(TARGET_I386) */
6377 
6378 #define NEW_STACK_SIZE 0x40000
6379 
6380 
6381 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6382 typedef struct {
6383     CPUArchState *env;
6384     pthread_mutex_t mutex;
6385     pthread_cond_t cond;
6386     pthread_t thread;
6387     uint32_t tid;
6388     abi_ulong child_tidptr;
6389     abi_ulong parent_tidptr;
6390     sigset_t sigmask;
6391 } new_thread_info;
6392 
6393 static void *clone_func(void *arg)
6394 {
6395     new_thread_info *info = arg;
6396     CPUArchState *env;
6397     CPUState *cpu;
6398     TaskState *ts;
6399 
6400     rcu_register_thread();
6401     tcg_register_thread();
6402     env = info->env;
6403     cpu = env_cpu(env);
6404     thread_cpu = cpu;
6405     ts = (TaskState *)cpu->opaque;
6406     info->tid = sys_gettid();
6407     task_settid(ts);
6408     if (info->child_tidptr)
6409         put_user_u32(info->tid, info->child_tidptr);
6410     if (info->parent_tidptr)
6411         put_user_u32(info->tid, info->parent_tidptr);
6412     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6413     /* Enable signals.  */
6414     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6415     /* Signal to the parent that we're ready.  */
6416     pthread_mutex_lock(&info->mutex);
6417     pthread_cond_broadcast(&info->cond);
6418     pthread_mutex_unlock(&info->mutex);
6419     /* Wait until the parent has finished initializing the tls state.  */
6420     pthread_mutex_lock(&clone_lock);
6421     pthread_mutex_unlock(&clone_lock);
6422     cpu_loop(env);
6423     /* never exits */
6424     return NULL;
6425 }
6426 
6427 /* do_fork() Must return host values and target errnos (unlike most
6428    do_*() functions). */
6429 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6430                    abi_ulong parent_tidptr, target_ulong newtls,
6431                    abi_ulong child_tidptr)
6432 {
6433     CPUState *cpu = env_cpu(env);
6434     int ret;
6435     TaskState *ts;
6436     CPUState *new_cpu;
6437     CPUArchState *new_env;
6438     sigset_t sigmask;
6439 
6440     flags &= ~CLONE_IGNORED_FLAGS;
6441 
6442     /* Emulate vfork() with fork() */
6443     if (flags & CLONE_VFORK)
6444         flags &= ~(CLONE_VFORK | CLONE_VM);
6445 
6446     if (flags & CLONE_VM) {
6447         TaskState *parent_ts = (TaskState *)cpu->opaque;
6448         new_thread_info info;
6449         pthread_attr_t attr;
6450 
6451         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6452             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6453             return -TARGET_EINVAL;
6454         }
6455 
6456         ts = g_new0(TaskState, 1);
6457         init_task_state(ts);
6458 
6459         /* Grab a mutex so that thread setup appears atomic.  */
6460         pthread_mutex_lock(&clone_lock);
6461 
6462         /* we create a new CPU instance. */
6463         new_env = cpu_copy(env);
6464         /* Init regs that differ from the parent.  */
6465         cpu_clone_regs_child(new_env, newsp, flags);
6466         cpu_clone_regs_parent(env, flags);
6467         new_cpu = env_cpu(new_env);
6468         new_cpu->opaque = ts;
6469         ts->bprm = parent_ts->bprm;
6470         ts->info = parent_ts->info;
6471         ts->signal_mask = parent_ts->signal_mask;
6472 
6473         if (flags & CLONE_CHILD_CLEARTID) {
6474             ts->child_tidptr = child_tidptr;
6475         }
6476 
6477         if (flags & CLONE_SETTLS) {
6478             cpu_set_tls (new_env, newtls);
6479         }
6480 
6481         memset(&info, 0, sizeof(info));
6482         pthread_mutex_init(&info.mutex, NULL);
6483         pthread_mutex_lock(&info.mutex);
6484         pthread_cond_init(&info.cond, NULL);
6485         info.env = new_env;
6486         if (flags & CLONE_CHILD_SETTID) {
6487             info.child_tidptr = child_tidptr;
6488         }
6489         if (flags & CLONE_PARENT_SETTID) {
6490             info.parent_tidptr = parent_tidptr;
6491         }
6492 
6493         ret = pthread_attr_init(&attr);
6494         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6495         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6496         /* It is not safe to deliver signals until the child has finished
6497            initializing, so temporarily block all signals.  */
6498         sigfillset(&sigmask);
6499         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6500         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6501 
6502         /* If this is our first additional thread, we need to ensure we
6503          * generate code for parallel execution and flush old translations.
6504          */
6505         if (!parallel_cpus) {
6506             parallel_cpus = true;
6507             tb_flush(cpu);
6508         }
6509 
6510         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6511         /* TODO: Free new CPU state if thread creation failed.  */
6512 
6513         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6514         pthread_attr_destroy(&attr);
6515         if (ret == 0) {
6516             /* Wait for the child to initialize.  */
6517             pthread_cond_wait(&info.cond, &info.mutex);
6518             ret = info.tid;
6519         } else {
6520             ret = -1;
6521         }
6522         pthread_mutex_unlock(&info.mutex);
6523         pthread_cond_destroy(&info.cond);
6524         pthread_mutex_destroy(&info.mutex);
6525         pthread_mutex_unlock(&clone_lock);
6526     } else {
6527         /* if no CLONE_VM, we consider it is a fork */
6528         if (flags & CLONE_INVALID_FORK_FLAGS) {
6529             return -TARGET_EINVAL;
6530         }
6531 
6532         /* We can't support custom termination signals */
6533         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6534             return -TARGET_EINVAL;
6535         }
6536 
6537         if (block_signals()) {
6538             return -TARGET_ERESTARTSYS;
6539         }
6540 
6541         fork_start();
6542         ret = fork();
6543         if (ret == 0) {
6544             /* Child Process.  */
6545             cpu_clone_regs_child(env, newsp, flags);
6546             fork_end(1);
6547             /* There is a race condition here.  The parent process could
6548                theoretically read the TID in the child process before the child
6549                tid is set.  This would require using either ptrace
6550                (not implemented) or having *_tidptr to point at a shared memory
6551                mapping.  We can't repeat the spinlock hack used above because
6552                the child process gets its own copy of the lock.  */
6553             if (flags & CLONE_CHILD_SETTID)
6554                 put_user_u32(sys_gettid(), child_tidptr);
6555             if (flags & CLONE_PARENT_SETTID)
6556                 put_user_u32(sys_gettid(), parent_tidptr);
6557             ts = (TaskState *)cpu->opaque;
6558             if (flags & CLONE_SETTLS)
6559                 cpu_set_tls (env, newtls);
6560             if (flags & CLONE_CHILD_CLEARTID)
6561                 ts->child_tidptr = child_tidptr;
6562         } else {
6563             cpu_clone_regs_parent(env, flags);
6564             fork_end(0);
6565         }
6566     }
6567     return ret;
6568 }
6569 
6570 /* warning : doesn't handle linux specific flags... */
6571 static int target_to_host_fcntl_cmd(int cmd)
6572 {
6573     int ret;
6574 
6575     switch(cmd) {
6576     case TARGET_F_DUPFD:
6577     case TARGET_F_GETFD:
6578     case TARGET_F_SETFD:
6579     case TARGET_F_GETFL:
6580     case TARGET_F_SETFL:
6581     case TARGET_F_OFD_GETLK:
6582     case TARGET_F_OFD_SETLK:
6583     case TARGET_F_OFD_SETLKW:
6584         ret = cmd;
6585         break;
6586     case TARGET_F_GETLK:
6587         ret = F_GETLK64;
6588         break;
6589     case TARGET_F_SETLK:
6590         ret = F_SETLK64;
6591         break;
6592     case TARGET_F_SETLKW:
6593         ret = F_SETLKW64;
6594         break;
6595     case TARGET_F_GETOWN:
6596         ret = F_GETOWN;
6597         break;
6598     case TARGET_F_SETOWN:
6599         ret = F_SETOWN;
6600         break;
6601     case TARGET_F_GETSIG:
6602         ret = F_GETSIG;
6603         break;
6604     case TARGET_F_SETSIG:
6605         ret = F_SETSIG;
6606         break;
6607 #if TARGET_ABI_BITS == 32
6608     case TARGET_F_GETLK64:
6609         ret = F_GETLK64;
6610         break;
6611     case TARGET_F_SETLK64:
6612         ret = F_SETLK64;
6613         break;
6614     case TARGET_F_SETLKW64:
6615         ret = F_SETLKW64;
6616         break;
6617 #endif
6618     case TARGET_F_SETLEASE:
6619         ret = F_SETLEASE;
6620         break;
6621     case TARGET_F_GETLEASE:
6622         ret = F_GETLEASE;
6623         break;
6624 #ifdef F_DUPFD_CLOEXEC
6625     case TARGET_F_DUPFD_CLOEXEC:
6626         ret = F_DUPFD_CLOEXEC;
6627         break;
6628 #endif
6629     case TARGET_F_NOTIFY:
6630         ret = F_NOTIFY;
6631         break;
6632 #ifdef F_GETOWN_EX
6633     case TARGET_F_GETOWN_EX:
6634         ret = F_GETOWN_EX;
6635         break;
6636 #endif
6637 #ifdef F_SETOWN_EX
6638     case TARGET_F_SETOWN_EX:
6639         ret = F_SETOWN_EX;
6640         break;
6641 #endif
6642 #ifdef F_SETPIPE_SZ
6643     case TARGET_F_SETPIPE_SZ:
6644         ret = F_SETPIPE_SZ;
6645         break;
6646     case TARGET_F_GETPIPE_SZ:
6647         ret = F_GETPIPE_SZ;
6648         break;
6649 #endif
6650 #ifdef F_ADD_SEALS
6651     case TARGET_F_ADD_SEALS:
6652         ret = F_ADD_SEALS;
6653         break;
6654     case TARGET_F_GET_SEALS:
6655         ret = F_GET_SEALS;
6656         break;
6657 #endif
6658     default:
6659         ret = -TARGET_EINVAL;
6660         break;
6661     }
6662 
6663 #if defined(__powerpc64__)
6664     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6665      * is not supported by kernel. The glibc fcntl call actually adjusts
6666      * them to 5, 6 and 7 before making the syscall(). Since we make the
6667      * syscall directly, adjust to what is supported by the kernel.
6668      */
6669     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6670         ret -= F_GETLK64 - 5;
6671     }
6672 #endif
6673 
6674     return ret;
6675 }
6676 
6677 #define FLOCK_TRANSTBL \
6678     switch (type) { \
6679     TRANSTBL_CONVERT(F_RDLCK); \
6680     TRANSTBL_CONVERT(F_WRLCK); \
6681     TRANSTBL_CONVERT(F_UNLCK); \
6682     TRANSTBL_CONVERT(F_EXLCK); \
6683     TRANSTBL_CONVERT(F_SHLCK); \
6684     }
6685 
6686 static int target_to_host_flock(int type)
6687 {
6688 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6689     FLOCK_TRANSTBL
6690 #undef  TRANSTBL_CONVERT
6691     return -TARGET_EINVAL;
6692 }
6693 
6694 static int host_to_target_flock(int type)
6695 {
6696 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6697     FLOCK_TRANSTBL
6698 #undef  TRANSTBL_CONVERT
6699     /* if we don't know how to convert the value coming
6700      * from the host we copy to the target field as-is
6701      */
6702     return type;
6703 }
6704 
6705 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6706                                             abi_ulong target_flock_addr)
6707 {
6708     struct target_flock *target_fl;
6709     int l_type;
6710 
6711     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6712         return -TARGET_EFAULT;
6713     }
6714 
6715     __get_user(l_type, &target_fl->l_type);
6716     l_type = target_to_host_flock(l_type);
6717     if (l_type < 0) {
6718         return l_type;
6719     }
6720     fl->l_type = l_type;
6721     __get_user(fl->l_whence, &target_fl->l_whence);
6722     __get_user(fl->l_start, &target_fl->l_start);
6723     __get_user(fl->l_len, &target_fl->l_len);
6724     __get_user(fl->l_pid, &target_fl->l_pid);
6725     unlock_user_struct(target_fl, target_flock_addr, 0);
6726     return 0;
6727 }
6728 
6729 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6730                                           const struct flock64 *fl)
6731 {
6732     struct target_flock *target_fl;
6733     short l_type;
6734 
6735     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6736         return -TARGET_EFAULT;
6737     }
6738 
6739     l_type = host_to_target_flock(fl->l_type);
6740     __put_user(l_type, &target_fl->l_type);
6741     __put_user(fl->l_whence, &target_fl->l_whence);
6742     __put_user(fl->l_start, &target_fl->l_start);
6743     __put_user(fl->l_len, &target_fl->l_len);
6744     __put_user(fl->l_pid, &target_fl->l_pid);
6745     unlock_user_struct(target_fl, target_flock_addr, 1);
6746     return 0;
6747 }
6748 
6749 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6750 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6751 
6752 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6753 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6754                                                    abi_ulong target_flock_addr)
6755 {
6756     struct target_oabi_flock64 *target_fl;
6757     int l_type;
6758 
6759     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6760         return -TARGET_EFAULT;
6761     }
6762 
6763     __get_user(l_type, &target_fl->l_type);
6764     l_type = target_to_host_flock(l_type);
6765     if (l_type < 0) {
6766         return l_type;
6767     }
6768     fl->l_type = l_type;
6769     __get_user(fl->l_whence, &target_fl->l_whence);
6770     __get_user(fl->l_start, &target_fl->l_start);
6771     __get_user(fl->l_len, &target_fl->l_len);
6772     __get_user(fl->l_pid, &target_fl->l_pid);
6773     unlock_user_struct(target_fl, target_flock_addr, 0);
6774     return 0;
6775 }
6776 
6777 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6778                                                  const struct flock64 *fl)
6779 {
6780     struct target_oabi_flock64 *target_fl;
6781     short l_type;
6782 
6783     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6784         return -TARGET_EFAULT;
6785     }
6786 
6787     l_type = host_to_target_flock(fl->l_type);
6788     __put_user(l_type, &target_fl->l_type);
6789     __put_user(fl->l_whence, &target_fl->l_whence);
6790     __put_user(fl->l_start, &target_fl->l_start);
6791     __put_user(fl->l_len, &target_fl->l_len);
6792     __put_user(fl->l_pid, &target_fl->l_pid);
6793     unlock_user_struct(target_fl, target_flock_addr, 1);
6794     return 0;
6795 }
6796 #endif
6797 
6798 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6799                                               abi_ulong target_flock_addr)
6800 {
6801     struct target_flock64 *target_fl;
6802     int l_type;
6803 
6804     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6805         return -TARGET_EFAULT;
6806     }
6807 
6808     __get_user(l_type, &target_fl->l_type);
6809     l_type = target_to_host_flock(l_type);
6810     if (l_type < 0) {
6811         return l_type;
6812     }
6813     fl->l_type = l_type;
6814     __get_user(fl->l_whence, &target_fl->l_whence);
6815     __get_user(fl->l_start, &target_fl->l_start);
6816     __get_user(fl->l_len, &target_fl->l_len);
6817     __get_user(fl->l_pid, &target_fl->l_pid);
6818     unlock_user_struct(target_fl, target_flock_addr, 0);
6819     return 0;
6820 }
6821 
6822 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6823                                             const struct flock64 *fl)
6824 {
6825     struct target_flock64 *target_fl;
6826     short l_type;
6827 
6828     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6829         return -TARGET_EFAULT;
6830     }
6831 
6832     l_type = host_to_target_flock(fl->l_type);
6833     __put_user(l_type, &target_fl->l_type);
6834     __put_user(fl->l_whence, &target_fl->l_whence);
6835     __put_user(fl->l_start, &target_fl->l_start);
6836     __put_user(fl->l_len, &target_fl->l_len);
6837     __put_user(fl->l_pid, &target_fl->l_pid);
6838     unlock_user_struct(target_fl, target_flock_addr, 1);
6839     return 0;
6840 }
6841 
6842 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6843 {
6844     struct flock64 fl64;
6845 #ifdef F_GETOWN_EX
6846     struct f_owner_ex fox;
6847     struct target_f_owner_ex *target_fox;
6848 #endif
6849     abi_long ret;
6850     int host_cmd = target_to_host_fcntl_cmd(cmd);
6851 
6852     if (host_cmd == -TARGET_EINVAL)
6853 	    return host_cmd;
6854 
6855     switch(cmd) {
6856     case TARGET_F_GETLK:
6857         ret = copy_from_user_flock(&fl64, arg);
6858         if (ret) {
6859             return ret;
6860         }
6861         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6862         if (ret == 0) {
6863             ret = copy_to_user_flock(arg, &fl64);
6864         }
6865         break;
6866 
6867     case TARGET_F_SETLK:
6868     case TARGET_F_SETLKW:
6869         ret = copy_from_user_flock(&fl64, arg);
6870         if (ret) {
6871             return ret;
6872         }
6873         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6874         break;
6875 
6876     case TARGET_F_GETLK64:
6877     case TARGET_F_OFD_GETLK:
6878         ret = copy_from_user_flock64(&fl64, arg);
6879         if (ret) {
6880             return ret;
6881         }
6882         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6883         if (ret == 0) {
6884             ret = copy_to_user_flock64(arg, &fl64);
6885         }
6886         break;
6887     case TARGET_F_SETLK64:
6888     case TARGET_F_SETLKW64:
6889     case TARGET_F_OFD_SETLK:
6890     case TARGET_F_OFD_SETLKW:
6891         ret = copy_from_user_flock64(&fl64, arg);
6892         if (ret) {
6893             return ret;
6894         }
6895         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6896         break;
6897 
6898     case TARGET_F_GETFL:
6899         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6900         if (ret >= 0) {
6901             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6902         }
6903         break;
6904 
6905     case TARGET_F_SETFL:
6906         ret = get_errno(safe_fcntl(fd, host_cmd,
6907                                    target_to_host_bitmask(arg,
6908                                                           fcntl_flags_tbl)));
6909         break;
6910 
6911 #ifdef F_GETOWN_EX
6912     case TARGET_F_GETOWN_EX:
6913         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6914         if (ret >= 0) {
6915             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6916                 return -TARGET_EFAULT;
6917             target_fox->type = tswap32(fox.type);
6918             target_fox->pid = tswap32(fox.pid);
6919             unlock_user_struct(target_fox, arg, 1);
6920         }
6921         break;
6922 #endif
6923 
6924 #ifdef F_SETOWN_EX
6925     case TARGET_F_SETOWN_EX:
6926         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6927             return -TARGET_EFAULT;
6928         fox.type = tswap32(target_fox->type);
6929         fox.pid = tswap32(target_fox->pid);
6930         unlock_user_struct(target_fox, arg, 0);
6931         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6932         break;
6933 #endif
6934 
6935     case TARGET_F_SETSIG:
6936         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6937         break;
6938 
6939     case TARGET_F_GETSIG:
6940         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6941         break;
6942 
6943     case TARGET_F_SETOWN:
6944     case TARGET_F_GETOWN:
6945     case TARGET_F_SETLEASE:
6946     case TARGET_F_GETLEASE:
6947     case TARGET_F_SETPIPE_SZ:
6948     case TARGET_F_GETPIPE_SZ:
6949     case TARGET_F_ADD_SEALS:
6950     case TARGET_F_GET_SEALS:
6951         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6952         break;
6953 
6954     default:
6955         ret = get_errno(safe_fcntl(fd, cmd, arg));
6956         break;
6957     }
6958     return ret;
6959 }
6960 
6961 #ifdef USE_UID16
6962 
6963 static inline int high2lowuid(int uid)
6964 {
6965     if (uid > 65535)
6966         return 65534;
6967     else
6968         return uid;
6969 }
6970 
6971 static inline int high2lowgid(int gid)
6972 {
6973     if (gid > 65535)
6974         return 65534;
6975     else
6976         return gid;
6977 }
6978 
6979 static inline int low2highuid(int uid)
6980 {
6981     if ((int16_t)uid == -1)
6982         return -1;
6983     else
6984         return uid;
6985 }
6986 
6987 static inline int low2highgid(int gid)
6988 {
6989     if ((int16_t)gid == -1)
6990         return -1;
6991     else
6992         return gid;
6993 }
6994 static inline int tswapid(int id)
6995 {
6996     return tswap16(id);
6997 }
6998 
6999 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7000 
7001 #else /* !USE_UID16 */
7002 static inline int high2lowuid(int uid)
7003 {
7004     return uid;
7005 }
7006 static inline int high2lowgid(int gid)
7007 {
7008     return gid;
7009 }
7010 static inline int low2highuid(int uid)
7011 {
7012     return uid;
7013 }
7014 static inline int low2highgid(int gid)
7015 {
7016     return gid;
7017 }
7018 static inline int tswapid(int id)
7019 {
7020     return tswap32(id);
7021 }
7022 
7023 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7024 
7025 #endif /* USE_UID16 */
7026 
7027 /* We must do direct syscalls for setting UID/GID, because we want to
7028  * implement the Linux system call semantics of "change only for this thread",
7029  * not the libc/POSIX semantics of "change for all threads in process".
7030  * (See http://ewontfix.com/17/ for more details.)
7031  * We use the 32-bit version of the syscalls if present; if it is not
7032  * then either the host architecture supports 32-bit UIDs natively with
7033  * the standard syscall, or the 16-bit UID is the best we can do.
7034  */
7035 #ifdef __NR_setuid32
7036 #define __NR_sys_setuid __NR_setuid32
7037 #else
7038 #define __NR_sys_setuid __NR_setuid
7039 #endif
7040 #ifdef __NR_setgid32
7041 #define __NR_sys_setgid __NR_setgid32
7042 #else
7043 #define __NR_sys_setgid __NR_setgid
7044 #endif
7045 #ifdef __NR_setresuid32
7046 #define __NR_sys_setresuid __NR_setresuid32
7047 #else
7048 #define __NR_sys_setresuid __NR_setresuid
7049 #endif
7050 #ifdef __NR_setresgid32
7051 #define __NR_sys_setresgid __NR_setresgid32
7052 #else
7053 #define __NR_sys_setresgid __NR_setresgid
7054 #endif
7055 
7056 _syscall1(int, sys_setuid, uid_t, uid)
7057 _syscall1(int, sys_setgid, gid_t, gid)
7058 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7059 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7060 
7061 void syscall_init(void)
7062 {
7063     IOCTLEntry *ie;
7064     const argtype *arg_type;
7065     int size;
7066     int i;
7067 
7068     thunk_init(STRUCT_MAX);
7069 
7070 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7071 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7072 #include "syscall_types.h"
7073 #undef STRUCT
7074 #undef STRUCT_SPECIAL
7075 
7076     /* Build target_to_host_errno_table[] table from
7077      * host_to_target_errno_table[]. */
7078     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7079         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7080     }
7081 
7082     /* we patch the ioctl size if necessary. We rely on the fact that
7083        no ioctl has all the bits at '1' in the size field */
7084     ie = ioctl_entries;
7085     while (ie->target_cmd != 0) {
7086         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7087             TARGET_IOC_SIZEMASK) {
7088             arg_type = ie->arg_type;
7089             if (arg_type[0] != TYPE_PTR) {
7090                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7091                         ie->target_cmd);
7092                 exit(1);
7093             }
7094             arg_type++;
7095             size = thunk_type_size(arg_type, 0);
7096             ie->target_cmd = (ie->target_cmd &
7097                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7098                 (size << TARGET_IOC_SIZESHIFT);
7099         }
7100 
7101         /* automatic consistency check if same arch */
7102 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7103     (defined(__x86_64__) && defined(TARGET_X86_64))
7104         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7105             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7106                     ie->name, ie->target_cmd, ie->host_cmd);
7107         }
7108 #endif
7109         ie++;
7110     }
7111 }
7112 
7113 #ifdef TARGET_NR_truncate64
7114 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7115                                          abi_long arg2,
7116                                          abi_long arg3,
7117                                          abi_long arg4)
7118 {
7119     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7120         arg2 = arg3;
7121         arg3 = arg4;
7122     }
7123     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7124 }
7125 #endif
7126 
7127 #ifdef TARGET_NR_ftruncate64
7128 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7129                                           abi_long arg2,
7130                                           abi_long arg3,
7131                                           abi_long arg4)
7132 {
7133     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7134         arg2 = arg3;
7135         arg3 = arg4;
7136     }
7137     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7138 }
7139 #endif
7140 
7141 #if defined(TARGET_NR_timer_settime) || \
7142     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7143 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7144                                                  abi_ulong target_addr)
7145 {
7146     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7147                                 offsetof(struct target_itimerspec,
7148                                          it_interval)) ||
7149         target_to_host_timespec(&host_its->it_value, target_addr +
7150                                 offsetof(struct target_itimerspec,
7151                                          it_value))) {
7152         return -TARGET_EFAULT;
7153     }
7154 
7155     return 0;
7156 }
7157 #endif
7158 
7159 #if defined(TARGET_NR_timer_settime64) || \
7160     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7161 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7162                                                    abi_ulong target_addr)
7163 {
7164     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7165                                   offsetof(struct target__kernel_itimerspec,
7166                                            it_interval)) ||
7167         target_to_host_timespec64(&host_its->it_value, target_addr +
7168                                   offsetof(struct target__kernel_itimerspec,
7169                                            it_value))) {
7170         return -TARGET_EFAULT;
7171     }
7172 
7173     return 0;
7174 }
7175 #endif
7176 
7177 #if ((defined(TARGET_NR_timerfd_gettime) || \
7178       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7179       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7180 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7181                                                  struct itimerspec *host_its)
7182 {
7183     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7184                                                        it_interval),
7185                                 &host_its->it_interval) ||
7186         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7187                                                        it_value),
7188                                 &host_its->it_value)) {
7189         return -TARGET_EFAULT;
7190     }
7191     return 0;
7192 }
7193 #endif
7194 
7195 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7196       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7197       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7198 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7199                                                    struct itimerspec *host_its)
7200 {
7201     if (host_to_target_timespec64(target_addr +
7202                                   offsetof(struct target__kernel_itimerspec,
7203                                            it_interval),
7204                                   &host_its->it_interval) ||
7205         host_to_target_timespec64(target_addr +
7206                                   offsetof(struct target__kernel_itimerspec,
7207                                            it_value),
7208                                   &host_its->it_value)) {
7209         return -TARGET_EFAULT;
7210     }
7211     return 0;
7212 }
7213 #endif
7214 
7215 #if defined(TARGET_NR_adjtimex) || \
7216     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7217 static inline abi_long target_to_host_timex(struct timex *host_tx,
7218                                             abi_long target_addr)
7219 {
7220     struct target_timex *target_tx;
7221 
7222     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7223         return -TARGET_EFAULT;
7224     }
7225 
7226     __get_user(host_tx->modes, &target_tx->modes);
7227     __get_user(host_tx->offset, &target_tx->offset);
7228     __get_user(host_tx->freq, &target_tx->freq);
7229     __get_user(host_tx->maxerror, &target_tx->maxerror);
7230     __get_user(host_tx->esterror, &target_tx->esterror);
7231     __get_user(host_tx->status, &target_tx->status);
7232     __get_user(host_tx->constant, &target_tx->constant);
7233     __get_user(host_tx->precision, &target_tx->precision);
7234     __get_user(host_tx->tolerance, &target_tx->tolerance);
7235     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7236     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7237     __get_user(host_tx->tick, &target_tx->tick);
7238     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7239     __get_user(host_tx->jitter, &target_tx->jitter);
7240     __get_user(host_tx->shift, &target_tx->shift);
7241     __get_user(host_tx->stabil, &target_tx->stabil);
7242     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7243     __get_user(host_tx->calcnt, &target_tx->calcnt);
7244     __get_user(host_tx->errcnt, &target_tx->errcnt);
7245     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7246     __get_user(host_tx->tai, &target_tx->tai);
7247 
7248     unlock_user_struct(target_tx, target_addr, 0);
7249     return 0;
7250 }
7251 
7252 static inline abi_long host_to_target_timex(abi_long target_addr,
7253                                             struct timex *host_tx)
7254 {
7255     struct target_timex *target_tx;
7256 
7257     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7258         return -TARGET_EFAULT;
7259     }
7260 
7261     __put_user(host_tx->modes, &target_tx->modes);
7262     __put_user(host_tx->offset, &target_tx->offset);
7263     __put_user(host_tx->freq, &target_tx->freq);
7264     __put_user(host_tx->maxerror, &target_tx->maxerror);
7265     __put_user(host_tx->esterror, &target_tx->esterror);
7266     __put_user(host_tx->status, &target_tx->status);
7267     __put_user(host_tx->constant, &target_tx->constant);
7268     __put_user(host_tx->precision, &target_tx->precision);
7269     __put_user(host_tx->tolerance, &target_tx->tolerance);
7270     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7271     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7272     __put_user(host_tx->tick, &target_tx->tick);
7273     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7274     __put_user(host_tx->jitter, &target_tx->jitter);
7275     __put_user(host_tx->shift, &target_tx->shift);
7276     __put_user(host_tx->stabil, &target_tx->stabil);
7277     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7278     __put_user(host_tx->calcnt, &target_tx->calcnt);
7279     __put_user(host_tx->errcnt, &target_tx->errcnt);
7280     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7281     __put_user(host_tx->tai, &target_tx->tai);
7282 
7283     unlock_user_struct(target_tx, target_addr, 1);
7284     return 0;
7285 }
7286 #endif
7287 
7288 
7289 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7290 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7291                                               abi_long target_addr)
7292 {
7293     struct target__kernel_timex *target_tx;
7294 
7295     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7296                                  offsetof(struct target__kernel_timex,
7297                                           time))) {
7298         return -TARGET_EFAULT;
7299     }
7300 
7301     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7302         return -TARGET_EFAULT;
7303     }
7304 
7305     __get_user(host_tx->modes, &target_tx->modes);
7306     __get_user(host_tx->offset, &target_tx->offset);
7307     __get_user(host_tx->freq, &target_tx->freq);
7308     __get_user(host_tx->maxerror, &target_tx->maxerror);
7309     __get_user(host_tx->esterror, &target_tx->esterror);
7310     __get_user(host_tx->status, &target_tx->status);
7311     __get_user(host_tx->constant, &target_tx->constant);
7312     __get_user(host_tx->precision, &target_tx->precision);
7313     __get_user(host_tx->tolerance, &target_tx->tolerance);
7314     __get_user(host_tx->tick, &target_tx->tick);
7315     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7316     __get_user(host_tx->jitter, &target_tx->jitter);
7317     __get_user(host_tx->shift, &target_tx->shift);
7318     __get_user(host_tx->stabil, &target_tx->stabil);
7319     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7320     __get_user(host_tx->calcnt, &target_tx->calcnt);
7321     __get_user(host_tx->errcnt, &target_tx->errcnt);
7322     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7323     __get_user(host_tx->tai, &target_tx->tai);
7324 
7325     unlock_user_struct(target_tx, target_addr, 0);
7326     return 0;
7327 }
7328 
7329 static inline abi_long host_to_target_timex64(abi_long target_addr,
7330                                               struct timex *host_tx)
7331 {
7332     struct target__kernel_timex *target_tx;
7333 
7334    if (copy_to_user_timeval64(target_addr +
7335                               offsetof(struct target__kernel_timex, time),
7336                               &host_tx->time)) {
7337         return -TARGET_EFAULT;
7338     }
7339 
7340     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7341         return -TARGET_EFAULT;
7342     }
7343 
7344     __put_user(host_tx->modes, &target_tx->modes);
7345     __put_user(host_tx->offset, &target_tx->offset);
7346     __put_user(host_tx->freq, &target_tx->freq);
7347     __put_user(host_tx->maxerror, &target_tx->maxerror);
7348     __put_user(host_tx->esterror, &target_tx->esterror);
7349     __put_user(host_tx->status, &target_tx->status);
7350     __put_user(host_tx->constant, &target_tx->constant);
7351     __put_user(host_tx->precision, &target_tx->precision);
7352     __put_user(host_tx->tolerance, &target_tx->tolerance);
7353     __put_user(host_tx->tick, &target_tx->tick);
7354     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7355     __put_user(host_tx->jitter, &target_tx->jitter);
7356     __put_user(host_tx->shift, &target_tx->shift);
7357     __put_user(host_tx->stabil, &target_tx->stabil);
7358     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7359     __put_user(host_tx->calcnt, &target_tx->calcnt);
7360     __put_user(host_tx->errcnt, &target_tx->errcnt);
7361     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7362     __put_user(host_tx->tai, &target_tx->tai);
7363 
7364     unlock_user_struct(target_tx, target_addr, 1);
7365     return 0;
7366 }
7367 #endif
7368 
7369 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7370                                                abi_ulong target_addr)
7371 {
7372     struct target_sigevent *target_sevp;
7373 
7374     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7375         return -TARGET_EFAULT;
7376     }
7377 
7378     /* This union is awkward on 64 bit systems because it has a 32 bit
7379      * integer and a pointer in it; we follow the conversion approach
7380      * used for handling sigval types in signal.c so the guest should get
7381      * the correct value back even if we did a 64 bit byteswap and it's
7382      * using the 32 bit integer.
7383      */
7384     host_sevp->sigev_value.sival_ptr =
7385         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7386     host_sevp->sigev_signo =
7387         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7388     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7389     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7390 
7391     unlock_user_struct(target_sevp, target_addr, 1);
7392     return 0;
7393 }
7394 
7395 #if defined(TARGET_NR_mlockall)
7396 static inline int target_to_host_mlockall_arg(int arg)
7397 {
7398     int result = 0;
7399 
7400     if (arg & TARGET_MCL_CURRENT) {
7401         result |= MCL_CURRENT;
7402     }
7403     if (arg & TARGET_MCL_FUTURE) {
7404         result |= MCL_FUTURE;
7405     }
7406 #ifdef MCL_ONFAULT
7407     if (arg & TARGET_MCL_ONFAULT) {
7408         result |= MCL_ONFAULT;
7409     }
7410 #endif
7411 
7412     return result;
7413 }
7414 #endif
7415 
7416 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7417      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7418      defined(TARGET_NR_newfstatat))
7419 static inline abi_long host_to_target_stat64(void *cpu_env,
7420                                              abi_ulong target_addr,
7421                                              struct stat *host_st)
7422 {
7423 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7424     if (((CPUARMState *)cpu_env)->eabi) {
7425         struct target_eabi_stat64 *target_st;
7426 
7427         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7428             return -TARGET_EFAULT;
7429         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7430         __put_user(host_st->st_dev, &target_st->st_dev);
7431         __put_user(host_st->st_ino, &target_st->st_ino);
7432 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7433         __put_user(host_st->st_ino, &target_st->__st_ino);
7434 #endif
7435         __put_user(host_st->st_mode, &target_st->st_mode);
7436         __put_user(host_st->st_nlink, &target_st->st_nlink);
7437         __put_user(host_st->st_uid, &target_st->st_uid);
7438         __put_user(host_st->st_gid, &target_st->st_gid);
7439         __put_user(host_st->st_rdev, &target_st->st_rdev);
7440         __put_user(host_st->st_size, &target_st->st_size);
7441         __put_user(host_st->st_blksize, &target_st->st_blksize);
7442         __put_user(host_st->st_blocks, &target_st->st_blocks);
7443         __put_user(host_st->st_atime, &target_st->target_st_atime);
7444         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7445         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7446 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7447         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7448         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7449         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7450 #endif
7451         unlock_user_struct(target_st, target_addr, 1);
7452     } else
7453 #endif
7454     {
7455 #if defined(TARGET_HAS_STRUCT_STAT64)
7456         struct target_stat64 *target_st;
7457 #else
7458         struct target_stat *target_st;
7459 #endif
7460 
7461         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7462             return -TARGET_EFAULT;
7463         memset(target_st, 0, sizeof(*target_st));
7464         __put_user(host_st->st_dev, &target_st->st_dev);
7465         __put_user(host_st->st_ino, &target_st->st_ino);
7466 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7467         __put_user(host_st->st_ino, &target_st->__st_ino);
7468 #endif
7469         __put_user(host_st->st_mode, &target_st->st_mode);
7470         __put_user(host_st->st_nlink, &target_st->st_nlink);
7471         __put_user(host_st->st_uid, &target_st->st_uid);
7472         __put_user(host_st->st_gid, &target_st->st_gid);
7473         __put_user(host_st->st_rdev, &target_st->st_rdev);
7474         /* XXX: better use of kernel struct */
7475         __put_user(host_st->st_size, &target_st->st_size);
7476         __put_user(host_st->st_blksize, &target_st->st_blksize);
7477         __put_user(host_st->st_blocks, &target_st->st_blocks);
7478         __put_user(host_st->st_atime, &target_st->target_st_atime);
7479         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7480         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7481 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7482         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7483         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7484         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7485 #endif
7486         unlock_user_struct(target_st, target_addr, 1);
7487     }
7488 
7489     return 0;
7490 }
7491 #endif
7492 
7493 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7494 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7495                                             abi_ulong target_addr)
7496 {
7497     struct target_statx *target_stx;
7498 
7499     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7500         return -TARGET_EFAULT;
7501     }
7502     memset(target_stx, 0, sizeof(*target_stx));
7503 
7504     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7505     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7506     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7507     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7508     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7509     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7510     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7511     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7512     __put_user(host_stx->stx_size, &target_stx->stx_size);
7513     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7514     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7515     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7516     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7517     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7518     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7519     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7520     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7521     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7522     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7523     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7524     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7525     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7526     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7527 
7528     unlock_user_struct(target_stx, target_addr, 1);
7529 
7530     return 0;
7531 }
7532 #endif
7533 
7534 static int do_sys_futex(int *uaddr, int op, int val,
7535                          const struct timespec *timeout, int *uaddr2,
7536                          int val3)
7537 {
7538 #if HOST_LONG_BITS == 64
7539 #if defined(__NR_futex)
7540     /* always a 64-bit time_t, it doesn't define _time64 version  */
7541     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7542 
7543 #endif
7544 #else /* HOST_LONG_BITS == 64 */
7545 #if defined(__NR_futex_time64)
7546     if (sizeof(timeout->tv_sec) == 8) {
7547         /* _time64 function on 32bit arch */
7548         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7549     }
7550 #endif
7551 #if defined(__NR_futex)
7552     /* old function on 32bit arch */
7553     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7554 #endif
7555 #endif /* HOST_LONG_BITS == 64 */
7556     g_assert_not_reached();
7557 }
7558 
7559 static int do_safe_futex(int *uaddr, int op, int val,
7560                          const struct timespec *timeout, int *uaddr2,
7561                          int val3)
7562 {
7563 #if HOST_LONG_BITS == 64
7564 #if defined(__NR_futex)
7565     /* always a 64-bit time_t, it doesn't define _time64 version  */
7566     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7567 #endif
7568 #else /* HOST_LONG_BITS == 64 */
7569 #if defined(__NR_futex_time64)
7570     if (sizeof(timeout->tv_sec) == 8) {
7571         /* _time64 function on 32bit arch */
7572         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7573                                            val3));
7574     }
7575 #endif
7576 #if defined(__NR_futex)
7577     /* old function on 32bit arch */
7578     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7579 #endif
7580 #endif /* HOST_LONG_BITS == 64 */
7581     return -TARGET_ENOSYS;
7582 }
7583 
7584 /* ??? Using host futex calls even when target atomic operations
7585    are not really atomic probably breaks things.  However implementing
7586    futexes locally would make futexes shared between multiple processes
7587    tricky.  However they're probably useless because guest atomic
7588    operations won't work either.  */
7589 #if defined(TARGET_NR_futex)
7590 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7591                     target_ulong uaddr2, int val3)
7592 {
7593     struct timespec ts, *pts;
7594     int base_op;
7595 
7596     /* ??? We assume FUTEX_* constants are the same on both host
7597        and target.  */
7598 #ifdef FUTEX_CMD_MASK
7599     base_op = op & FUTEX_CMD_MASK;
7600 #else
7601     base_op = op;
7602 #endif
7603     switch (base_op) {
7604     case FUTEX_WAIT:
7605     case FUTEX_WAIT_BITSET:
7606         if (timeout) {
7607             pts = &ts;
7608             target_to_host_timespec(pts, timeout);
7609         } else {
7610             pts = NULL;
7611         }
7612         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7613     case FUTEX_WAKE:
7614         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7615     case FUTEX_FD:
7616         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7617     case FUTEX_REQUEUE:
7618     case FUTEX_CMP_REQUEUE:
7619     case FUTEX_WAKE_OP:
7620         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7621            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7622            But the prototype takes a `struct timespec *'; insert casts
7623            to satisfy the compiler.  We do not need to tswap TIMEOUT
7624            since it's not compared to guest memory.  */
7625         pts = (struct timespec *)(uintptr_t) timeout;
7626         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7627                              (base_op == FUTEX_CMP_REQUEUE
7628                                       ? tswap32(val3)
7629                                       : val3));
7630     default:
7631         return -TARGET_ENOSYS;
7632     }
7633 }
7634 #endif
7635 
7636 #if defined(TARGET_NR_futex_time64)
7637 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7638                            target_ulong uaddr2, int val3)
7639 {
7640     struct timespec ts, *pts;
7641     int base_op;
7642 
7643     /* ??? We assume FUTEX_* constants are the same on both host
7644        and target.  */
7645 #ifdef FUTEX_CMD_MASK
7646     base_op = op & FUTEX_CMD_MASK;
7647 #else
7648     base_op = op;
7649 #endif
7650     switch (base_op) {
7651     case FUTEX_WAIT:
7652     case FUTEX_WAIT_BITSET:
7653         if (timeout) {
7654             pts = &ts;
7655             if (target_to_host_timespec64(pts, timeout)) {
7656                 return -TARGET_EFAULT;
7657             }
7658         } else {
7659             pts = NULL;
7660         }
7661         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7662     case FUTEX_WAKE:
7663         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7664     case FUTEX_FD:
7665         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7666     case FUTEX_REQUEUE:
7667     case FUTEX_CMP_REQUEUE:
7668     case FUTEX_WAKE_OP:
7669         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7670            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7671            But the prototype takes a `struct timespec *'; insert casts
7672            to satisfy the compiler.  We do not need to tswap TIMEOUT
7673            since it's not compared to guest memory.  */
7674         pts = (struct timespec *)(uintptr_t) timeout;
7675         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7676                              (base_op == FUTEX_CMP_REQUEUE
7677                                       ? tswap32(val3)
7678                                       : val3));
7679     default:
7680         return -TARGET_ENOSYS;
7681     }
7682 }
7683 #endif
7684 
7685 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7686 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7687                                      abi_long handle, abi_long mount_id,
7688                                      abi_long flags)
7689 {
7690     struct file_handle *target_fh;
7691     struct file_handle *fh;
7692     int mid = 0;
7693     abi_long ret;
7694     char *name;
7695     unsigned int size, total_size;
7696 
7697     if (get_user_s32(size, handle)) {
7698         return -TARGET_EFAULT;
7699     }
7700 
7701     name = lock_user_string(pathname);
7702     if (!name) {
7703         return -TARGET_EFAULT;
7704     }
7705 
7706     total_size = sizeof(struct file_handle) + size;
7707     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7708     if (!target_fh) {
7709         unlock_user(name, pathname, 0);
7710         return -TARGET_EFAULT;
7711     }
7712 
7713     fh = g_malloc0(total_size);
7714     fh->handle_bytes = size;
7715 
7716     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7717     unlock_user(name, pathname, 0);
7718 
7719     /* man name_to_handle_at(2):
7720      * Other than the use of the handle_bytes field, the caller should treat
7721      * the file_handle structure as an opaque data type
7722      */
7723 
7724     memcpy(target_fh, fh, total_size);
7725     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7726     target_fh->handle_type = tswap32(fh->handle_type);
7727     g_free(fh);
7728     unlock_user(target_fh, handle, total_size);
7729 
7730     if (put_user_s32(mid, mount_id)) {
7731         return -TARGET_EFAULT;
7732     }
7733 
7734     return ret;
7735 
7736 }
7737 #endif
7738 
7739 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7740 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7741                                      abi_long flags)
7742 {
7743     struct file_handle *target_fh;
7744     struct file_handle *fh;
7745     unsigned int size, total_size;
7746     abi_long ret;
7747 
7748     if (get_user_s32(size, handle)) {
7749         return -TARGET_EFAULT;
7750     }
7751 
7752     total_size = sizeof(struct file_handle) + size;
7753     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7754     if (!target_fh) {
7755         return -TARGET_EFAULT;
7756     }
7757 
7758     fh = g_memdup(target_fh, total_size);
7759     fh->handle_bytes = size;
7760     fh->handle_type = tswap32(target_fh->handle_type);
7761 
7762     ret = get_errno(open_by_handle_at(mount_fd, fh,
7763                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7764 
7765     g_free(fh);
7766 
7767     unlock_user(target_fh, handle, total_size);
7768 
7769     return ret;
7770 }
7771 #endif
7772 
7773 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7774 
7775 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7776 {
7777     int host_flags;
7778     target_sigset_t *target_mask;
7779     sigset_t host_mask;
7780     abi_long ret;
7781 
7782     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7783         return -TARGET_EINVAL;
7784     }
7785     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7786         return -TARGET_EFAULT;
7787     }
7788 
7789     target_to_host_sigset(&host_mask, target_mask);
7790 
7791     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7792 
7793     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7794     if (ret >= 0) {
7795         fd_trans_register(ret, &target_signalfd_trans);
7796     }
7797 
7798     unlock_user_struct(target_mask, mask, 0);
7799 
7800     return ret;
7801 }
7802 #endif
7803 
7804 /* Map host to target signal numbers for the wait family of syscalls.
7805    Assume all other status bits are the same.  */
7806 int host_to_target_waitstatus(int status)
7807 {
7808     if (WIFSIGNALED(status)) {
7809         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7810     }
7811     if (WIFSTOPPED(status)) {
7812         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7813                | (status & 0xff);
7814     }
7815     return status;
7816 }
7817 
7818 static int open_self_cmdline(void *cpu_env, int fd)
7819 {
7820     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7821     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7822     int i;
7823 
7824     for (i = 0; i < bprm->argc; i++) {
7825         size_t len = strlen(bprm->argv[i]) + 1;
7826 
7827         if (write(fd, bprm->argv[i], len) != len) {
7828             return -1;
7829         }
7830     }
7831 
7832     return 0;
7833 }
7834 
7835 static int open_self_maps(void *cpu_env, int fd)
7836 {
7837     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7838     TaskState *ts = cpu->opaque;
7839     GSList *map_info = read_self_maps();
7840     GSList *s;
7841     int count;
7842 
7843     for (s = map_info; s; s = g_slist_next(s)) {
7844         MapInfo *e = (MapInfo *) s->data;
7845 
7846         if (h2g_valid(e->start)) {
7847             unsigned long min = e->start;
7848             unsigned long max = e->end;
7849             int flags = page_get_flags(h2g(min));
7850             const char *path;
7851 
7852             max = h2g_valid(max - 1) ?
7853                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7854 
7855             if (page_check_range(h2g(min), max - min, flags) == -1) {
7856                 continue;
7857             }
7858 
7859             if (h2g(min) == ts->info->stack_limit) {
7860                 path = "[stack]";
7861             } else {
7862                 path = e->path;
7863             }
7864 
7865             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7866                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7867                             h2g(min), h2g(max - 1) + 1,
7868                             e->is_read ? 'r' : '-',
7869                             e->is_write ? 'w' : '-',
7870                             e->is_exec ? 'x' : '-',
7871                             e->is_priv ? 'p' : '-',
7872                             (uint64_t) e->offset, e->dev, e->inode);
7873             if (path) {
7874                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7875             } else {
7876                 dprintf(fd, "\n");
7877             }
7878         }
7879     }
7880 
7881     free_self_maps(map_info);
7882 
7883 #ifdef TARGET_VSYSCALL_PAGE
7884     /*
7885      * We only support execution from the vsyscall page.
7886      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7887      */
7888     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7889                     " --xp 00000000 00:00 0",
7890                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7891     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7892 #endif
7893 
7894     return 0;
7895 }
7896 
7897 static int open_self_stat(void *cpu_env, int fd)
7898 {
7899     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7900     TaskState *ts = cpu->opaque;
7901     g_autoptr(GString) buf = g_string_new(NULL);
7902     int i;
7903 
7904     for (i = 0; i < 44; i++) {
7905         if (i == 0) {
7906             /* pid */
7907             g_string_printf(buf, FMT_pid " ", getpid());
7908         } else if (i == 1) {
7909             /* app name */
7910             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7911             bin = bin ? bin + 1 : ts->bprm->argv[0];
7912             g_string_printf(buf, "(%.15s) ", bin);
7913         } else if (i == 27) {
7914             /* stack bottom */
7915             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7916         } else {
7917             /* for the rest, there is MasterCard */
7918             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7919         }
7920 
7921         if (write(fd, buf->str, buf->len) != buf->len) {
7922             return -1;
7923         }
7924     }
7925 
7926     return 0;
7927 }
7928 
7929 static int open_self_auxv(void *cpu_env, int fd)
7930 {
7931     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7932     TaskState *ts = cpu->opaque;
7933     abi_ulong auxv = ts->info->saved_auxv;
7934     abi_ulong len = ts->info->auxv_len;
7935     char *ptr;
7936 
7937     /*
7938      * Auxiliary vector is stored in target process stack.
7939      * read in whole auxv vector and copy it to file
7940      */
7941     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7942     if (ptr != NULL) {
7943         while (len > 0) {
7944             ssize_t r;
7945             r = write(fd, ptr, len);
7946             if (r <= 0) {
7947                 break;
7948             }
7949             len -= r;
7950             ptr += r;
7951         }
7952         lseek(fd, 0, SEEK_SET);
7953         unlock_user(ptr, auxv, len);
7954     }
7955 
7956     return 0;
7957 }
7958 
7959 static int is_proc_myself(const char *filename, const char *entry)
7960 {
7961     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7962         filename += strlen("/proc/");
7963         if (!strncmp(filename, "self/", strlen("self/"))) {
7964             filename += strlen("self/");
7965         } else if (*filename >= '1' && *filename <= '9') {
7966             char myself[80];
7967             snprintf(myself, sizeof(myself), "%d/", getpid());
7968             if (!strncmp(filename, myself, strlen(myself))) {
7969                 filename += strlen(myself);
7970             } else {
7971                 return 0;
7972             }
7973         } else {
7974             return 0;
7975         }
7976         if (!strcmp(filename, entry)) {
7977             return 1;
7978         }
7979     }
7980     return 0;
7981 }
7982 
7983 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7984     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7985 static int is_proc(const char *filename, const char *entry)
7986 {
7987     return strcmp(filename, entry) == 0;
7988 }
7989 #endif
7990 
7991 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7992 static int open_net_route(void *cpu_env, int fd)
7993 {
7994     FILE *fp;
7995     char *line = NULL;
7996     size_t len = 0;
7997     ssize_t read;
7998 
7999     fp = fopen("/proc/net/route", "r");
8000     if (fp == NULL) {
8001         return -1;
8002     }
8003 
8004     /* read header */
8005 
8006     read = getline(&line, &len, fp);
8007     dprintf(fd, "%s", line);
8008 
8009     /* read routes */
8010 
8011     while ((read = getline(&line, &len, fp)) != -1) {
8012         char iface[16];
8013         uint32_t dest, gw, mask;
8014         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8015         int fields;
8016 
8017         fields = sscanf(line,
8018                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8019                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8020                         &mask, &mtu, &window, &irtt);
8021         if (fields != 11) {
8022             continue;
8023         }
8024         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8025                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8026                 metric, tswap32(mask), mtu, window, irtt);
8027     }
8028 
8029     free(line);
8030     fclose(fp);
8031 
8032     return 0;
8033 }
8034 #endif
8035 
8036 #if defined(TARGET_SPARC)
8037 static int open_cpuinfo(void *cpu_env, int fd)
8038 {
8039     dprintf(fd, "type\t\t: sun4u\n");
8040     return 0;
8041 }
8042 #endif
8043 
8044 #if defined(TARGET_HPPA)
8045 static int open_cpuinfo(void *cpu_env, int fd)
8046 {
8047     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8048     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8049     dprintf(fd, "capabilities\t: os32\n");
8050     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8051     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8052     return 0;
8053 }
8054 #endif
8055 
8056 #if defined(TARGET_M68K)
8057 static int open_hardware(void *cpu_env, int fd)
8058 {
8059     dprintf(fd, "Model:\t\tqemu-m68k\n");
8060     return 0;
8061 }
8062 #endif
8063 
8064 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8065 {
8066     struct fake_open {
8067         const char *filename;
8068         int (*fill)(void *cpu_env, int fd);
8069         int (*cmp)(const char *s1, const char *s2);
8070     };
8071     const struct fake_open *fake_open;
8072     static const struct fake_open fakes[] = {
8073         { "maps", open_self_maps, is_proc_myself },
8074         { "stat", open_self_stat, is_proc_myself },
8075         { "auxv", open_self_auxv, is_proc_myself },
8076         { "cmdline", open_self_cmdline, is_proc_myself },
8077 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8078         { "/proc/net/route", open_net_route, is_proc },
8079 #endif
8080 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8081         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8082 #endif
8083 #if defined(TARGET_M68K)
8084         { "/proc/hardware", open_hardware, is_proc },
8085 #endif
8086         { NULL, NULL, NULL }
8087     };
8088 
8089     if (is_proc_myself(pathname, "exe")) {
8090         int execfd = qemu_getauxval(AT_EXECFD);
8091         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8092     }
8093 
8094     for (fake_open = fakes; fake_open->filename; fake_open++) {
8095         if (fake_open->cmp(pathname, fake_open->filename)) {
8096             break;
8097         }
8098     }
8099 
8100     if (fake_open->filename) {
8101         const char *tmpdir;
8102         char filename[PATH_MAX];
8103         int fd, r;
8104 
8105         /* create temporary file to map stat to */
8106         tmpdir = getenv("TMPDIR");
8107         if (!tmpdir)
8108             tmpdir = "/tmp";
8109         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8110         fd = mkstemp(filename);
8111         if (fd < 0) {
8112             return fd;
8113         }
8114         unlink(filename);
8115 
8116         if ((r = fake_open->fill(cpu_env, fd))) {
8117             int e = errno;
8118             close(fd);
8119             errno = e;
8120             return r;
8121         }
8122         lseek(fd, 0, SEEK_SET);
8123 
8124         return fd;
8125     }
8126 
8127     return safe_openat(dirfd, path(pathname), flags, mode);
8128 }
8129 
8130 #define TIMER_MAGIC 0x0caf0000
8131 #define TIMER_MAGIC_MASK 0xffff0000
8132 
8133 /* Convert QEMU provided timer ID back to internal 16bit index format */
8134 static target_timer_t get_timer_id(abi_long arg)
8135 {
8136     target_timer_t timerid = arg;
8137 
8138     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8139         return -TARGET_EINVAL;
8140     }
8141 
8142     timerid &= 0xffff;
8143 
8144     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8145         return -TARGET_EINVAL;
8146     }
8147 
8148     return timerid;
8149 }
8150 
8151 static int target_to_host_cpu_mask(unsigned long *host_mask,
8152                                    size_t host_size,
8153                                    abi_ulong target_addr,
8154                                    size_t target_size)
8155 {
8156     unsigned target_bits = sizeof(abi_ulong) * 8;
8157     unsigned host_bits = sizeof(*host_mask) * 8;
8158     abi_ulong *target_mask;
8159     unsigned i, j;
8160 
8161     assert(host_size >= target_size);
8162 
8163     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8164     if (!target_mask) {
8165         return -TARGET_EFAULT;
8166     }
8167     memset(host_mask, 0, host_size);
8168 
8169     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8170         unsigned bit = i * target_bits;
8171         abi_ulong val;
8172 
8173         __get_user(val, &target_mask[i]);
8174         for (j = 0; j < target_bits; j++, bit++) {
8175             if (val & (1UL << j)) {
8176                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8177             }
8178         }
8179     }
8180 
8181     unlock_user(target_mask, target_addr, 0);
8182     return 0;
8183 }
8184 
8185 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8186                                    size_t host_size,
8187                                    abi_ulong target_addr,
8188                                    size_t target_size)
8189 {
8190     unsigned target_bits = sizeof(abi_ulong) * 8;
8191     unsigned host_bits = sizeof(*host_mask) * 8;
8192     abi_ulong *target_mask;
8193     unsigned i, j;
8194 
8195     assert(host_size >= target_size);
8196 
8197     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8198     if (!target_mask) {
8199         return -TARGET_EFAULT;
8200     }
8201 
8202     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8203         unsigned bit = i * target_bits;
8204         abi_ulong val = 0;
8205 
8206         for (j = 0; j < target_bits; j++, bit++) {
8207             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8208                 val |= 1UL << j;
8209             }
8210         }
8211         __put_user(val, &target_mask[i]);
8212     }
8213 
8214     unlock_user(target_mask, target_addr, target_size);
8215     return 0;
8216 }
8217 
8218 /* This is an internal helper for do_syscall so that it is easier
8219  * to have a single return point, so that actions, such as logging
8220  * of syscall results, can be performed.
8221  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8222  */
8223 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8224                             abi_long arg2, abi_long arg3, abi_long arg4,
8225                             abi_long arg5, abi_long arg6, abi_long arg7,
8226                             abi_long arg8)
8227 {
8228     CPUState *cpu = env_cpu(cpu_env);
8229     abi_long ret;
8230 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8231     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8232     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8233     || defined(TARGET_NR_statx)
8234     struct stat st;
8235 #endif
8236 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8237     || defined(TARGET_NR_fstatfs)
8238     struct statfs stfs;
8239 #endif
8240     void *p;
8241 
8242     switch(num) {
8243     case TARGET_NR_exit:
8244         /* In old applications this may be used to implement _exit(2).
8245            However in threaded applications it is used for thread termination,
8246            and _exit_group is used for application termination.
8247            Do thread termination if we have more then one thread.  */
8248 
8249         if (block_signals()) {
8250             return -TARGET_ERESTARTSYS;
8251         }
8252 
8253         pthread_mutex_lock(&clone_lock);
8254 
8255         if (CPU_NEXT(first_cpu)) {
8256             TaskState *ts = cpu->opaque;
8257 
8258             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8259             object_unref(OBJECT(cpu));
8260             /*
8261              * At this point the CPU should be unrealized and removed
8262              * from cpu lists. We can clean-up the rest of the thread
8263              * data without the lock held.
8264              */
8265 
8266             pthread_mutex_unlock(&clone_lock);
8267 
8268             if (ts->child_tidptr) {
8269                 put_user_u32(0, ts->child_tidptr);
8270                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8271                           NULL, NULL, 0);
8272             }
8273             thread_cpu = NULL;
8274             g_free(ts);
8275             rcu_unregister_thread();
8276             pthread_exit(NULL);
8277         }
8278 
8279         pthread_mutex_unlock(&clone_lock);
8280         preexit_cleanup(cpu_env, arg1);
8281         _exit(arg1);
8282         return 0; /* avoid warning */
8283     case TARGET_NR_read:
8284         if (arg2 == 0 && arg3 == 0) {
8285             return get_errno(safe_read(arg1, 0, 0));
8286         } else {
8287             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8288                 return -TARGET_EFAULT;
8289             ret = get_errno(safe_read(arg1, p, arg3));
8290             if (ret >= 0 &&
8291                 fd_trans_host_to_target_data(arg1)) {
8292                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8293             }
8294             unlock_user(p, arg2, ret);
8295         }
8296         return ret;
8297     case TARGET_NR_write:
8298         if (arg2 == 0 && arg3 == 0) {
8299             return get_errno(safe_write(arg1, 0, 0));
8300         }
8301         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8302             return -TARGET_EFAULT;
8303         if (fd_trans_target_to_host_data(arg1)) {
8304             void *copy = g_malloc(arg3);
8305             memcpy(copy, p, arg3);
8306             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8307             if (ret >= 0) {
8308                 ret = get_errno(safe_write(arg1, copy, ret));
8309             }
8310             g_free(copy);
8311         } else {
8312             ret = get_errno(safe_write(arg1, p, arg3));
8313         }
8314         unlock_user(p, arg2, 0);
8315         return ret;
8316 
8317 #ifdef TARGET_NR_open
8318     case TARGET_NR_open:
8319         if (!(p = lock_user_string(arg1)))
8320             return -TARGET_EFAULT;
8321         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8322                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8323                                   arg3));
8324         fd_trans_unregister(ret);
8325         unlock_user(p, arg1, 0);
8326         return ret;
8327 #endif
8328     case TARGET_NR_openat:
8329         if (!(p = lock_user_string(arg2)))
8330             return -TARGET_EFAULT;
8331         ret = get_errno(do_openat(cpu_env, arg1, p,
8332                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8333                                   arg4));
8334         fd_trans_unregister(ret);
8335         unlock_user(p, arg2, 0);
8336         return ret;
8337 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8338     case TARGET_NR_name_to_handle_at:
8339         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8340         return ret;
8341 #endif
8342 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8343     case TARGET_NR_open_by_handle_at:
8344         ret = do_open_by_handle_at(arg1, arg2, arg3);
8345         fd_trans_unregister(ret);
8346         return ret;
8347 #endif
8348     case TARGET_NR_close:
8349         fd_trans_unregister(arg1);
8350         return get_errno(close(arg1));
8351 
8352     case TARGET_NR_brk:
8353         return do_brk(arg1);
8354 #ifdef TARGET_NR_fork
8355     case TARGET_NR_fork:
8356         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8357 #endif
8358 #ifdef TARGET_NR_waitpid
8359     case TARGET_NR_waitpid:
8360         {
8361             int status;
8362             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8363             if (!is_error(ret) && arg2 && ret
8364                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8365                 return -TARGET_EFAULT;
8366         }
8367         return ret;
8368 #endif
8369 #ifdef TARGET_NR_waitid
8370     case TARGET_NR_waitid:
8371         {
8372             siginfo_t info;
8373             info.si_pid = 0;
8374             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8375             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8376                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8377                     return -TARGET_EFAULT;
8378                 host_to_target_siginfo(p, &info);
8379                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8380             }
8381         }
8382         return ret;
8383 #endif
8384 #ifdef TARGET_NR_creat /* not on alpha */
8385     case TARGET_NR_creat:
8386         if (!(p = lock_user_string(arg1)))
8387             return -TARGET_EFAULT;
8388         ret = get_errno(creat(p, arg2));
8389         fd_trans_unregister(ret);
8390         unlock_user(p, arg1, 0);
8391         return ret;
8392 #endif
8393 #ifdef TARGET_NR_link
8394     case TARGET_NR_link:
8395         {
8396             void * p2;
8397             p = lock_user_string(arg1);
8398             p2 = lock_user_string(arg2);
8399             if (!p || !p2)
8400                 ret = -TARGET_EFAULT;
8401             else
8402                 ret = get_errno(link(p, p2));
8403             unlock_user(p2, arg2, 0);
8404             unlock_user(p, arg1, 0);
8405         }
8406         return ret;
8407 #endif
8408 #if defined(TARGET_NR_linkat)
8409     case TARGET_NR_linkat:
8410         {
8411             void * p2 = NULL;
8412             if (!arg2 || !arg4)
8413                 return -TARGET_EFAULT;
8414             p  = lock_user_string(arg2);
8415             p2 = lock_user_string(arg4);
8416             if (!p || !p2)
8417                 ret = -TARGET_EFAULT;
8418             else
8419                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8420             unlock_user(p, arg2, 0);
8421             unlock_user(p2, arg4, 0);
8422         }
8423         return ret;
8424 #endif
8425 #ifdef TARGET_NR_unlink
8426     case TARGET_NR_unlink:
8427         if (!(p = lock_user_string(arg1)))
8428             return -TARGET_EFAULT;
8429         ret = get_errno(unlink(p));
8430         unlock_user(p, arg1, 0);
8431         return ret;
8432 #endif
8433 #if defined(TARGET_NR_unlinkat)
8434     case TARGET_NR_unlinkat:
8435         if (!(p = lock_user_string(arg2)))
8436             return -TARGET_EFAULT;
8437         ret = get_errno(unlinkat(arg1, p, arg3));
8438         unlock_user(p, arg2, 0);
8439         return ret;
8440 #endif
8441     case TARGET_NR_execve:
8442         {
8443             char **argp, **envp;
8444             int argc, envc;
8445             abi_ulong gp;
8446             abi_ulong guest_argp;
8447             abi_ulong guest_envp;
8448             abi_ulong addr;
8449             char **q;
8450             int total_size = 0;
8451 
8452             argc = 0;
8453             guest_argp = arg2;
8454             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8455                 if (get_user_ual(addr, gp))
8456                     return -TARGET_EFAULT;
8457                 if (!addr)
8458                     break;
8459                 argc++;
8460             }
8461             envc = 0;
8462             guest_envp = arg3;
8463             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8464                 if (get_user_ual(addr, gp))
8465                     return -TARGET_EFAULT;
8466                 if (!addr)
8467                     break;
8468                 envc++;
8469             }
8470 
8471             argp = g_new0(char *, argc + 1);
8472             envp = g_new0(char *, envc + 1);
8473 
8474             for (gp = guest_argp, q = argp; gp;
8475                   gp += sizeof(abi_ulong), q++) {
8476                 if (get_user_ual(addr, gp))
8477                     goto execve_efault;
8478                 if (!addr)
8479                     break;
8480                 if (!(*q = lock_user_string(addr)))
8481                     goto execve_efault;
8482                 total_size += strlen(*q) + 1;
8483             }
8484             *q = NULL;
8485 
8486             for (gp = guest_envp, q = envp; gp;
8487                   gp += sizeof(abi_ulong), q++) {
8488                 if (get_user_ual(addr, gp))
8489                     goto execve_efault;
8490                 if (!addr)
8491                     break;
8492                 if (!(*q = lock_user_string(addr)))
8493                     goto execve_efault;
8494                 total_size += strlen(*q) + 1;
8495             }
8496             *q = NULL;
8497 
8498             if (!(p = lock_user_string(arg1)))
8499                 goto execve_efault;
8500             /* Although execve() is not an interruptible syscall it is
8501              * a special case where we must use the safe_syscall wrapper:
8502              * if we allow a signal to happen before we make the host
8503              * syscall then we will 'lose' it, because at the point of
8504              * execve the process leaves QEMU's control. So we use the
8505              * safe syscall wrapper to ensure that we either take the
8506              * signal as a guest signal, or else it does not happen
8507              * before the execve completes and makes it the other
8508              * program's problem.
8509              */
8510             ret = get_errno(safe_execve(p, argp, envp));
8511             unlock_user(p, arg1, 0);
8512 
8513             goto execve_end;
8514 
8515         execve_efault:
8516             ret = -TARGET_EFAULT;
8517 
8518         execve_end:
8519             for (gp = guest_argp, q = argp; *q;
8520                   gp += sizeof(abi_ulong), q++) {
8521                 if (get_user_ual(addr, gp)
8522                     || !addr)
8523                     break;
8524                 unlock_user(*q, addr, 0);
8525             }
8526             for (gp = guest_envp, q = envp; *q;
8527                   gp += sizeof(abi_ulong), q++) {
8528                 if (get_user_ual(addr, gp)
8529                     || !addr)
8530                     break;
8531                 unlock_user(*q, addr, 0);
8532             }
8533 
8534             g_free(argp);
8535             g_free(envp);
8536         }
8537         return ret;
8538     case TARGET_NR_chdir:
8539         if (!(p = lock_user_string(arg1)))
8540             return -TARGET_EFAULT;
8541         ret = get_errno(chdir(p));
8542         unlock_user(p, arg1, 0);
8543         return ret;
8544 #ifdef TARGET_NR_time
8545     case TARGET_NR_time:
8546         {
8547             time_t host_time;
8548             ret = get_errno(time(&host_time));
8549             if (!is_error(ret)
8550                 && arg1
8551                 && put_user_sal(host_time, arg1))
8552                 return -TARGET_EFAULT;
8553         }
8554         return ret;
8555 #endif
8556 #ifdef TARGET_NR_mknod
8557     case TARGET_NR_mknod:
8558         if (!(p = lock_user_string(arg1)))
8559             return -TARGET_EFAULT;
8560         ret = get_errno(mknod(p, arg2, arg3));
8561         unlock_user(p, arg1, 0);
8562         return ret;
8563 #endif
8564 #if defined(TARGET_NR_mknodat)
8565     case TARGET_NR_mknodat:
8566         if (!(p = lock_user_string(arg2)))
8567             return -TARGET_EFAULT;
8568         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8569         unlock_user(p, arg2, 0);
8570         return ret;
8571 #endif
8572 #ifdef TARGET_NR_chmod
8573     case TARGET_NR_chmod:
8574         if (!(p = lock_user_string(arg1)))
8575             return -TARGET_EFAULT;
8576         ret = get_errno(chmod(p, arg2));
8577         unlock_user(p, arg1, 0);
8578         return ret;
8579 #endif
8580 #ifdef TARGET_NR_lseek
8581     case TARGET_NR_lseek:
8582         return get_errno(lseek(arg1, arg2, arg3));
8583 #endif
8584 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8585     /* Alpha specific */
8586     case TARGET_NR_getxpid:
8587         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8588         return get_errno(getpid());
8589 #endif
8590 #ifdef TARGET_NR_getpid
8591     case TARGET_NR_getpid:
8592         return get_errno(getpid());
8593 #endif
8594     case TARGET_NR_mount:
8595         {
8596             /* need to look at the data field */
8597             void *p2, *p3;
8598 
8599             if (arg1) {
8600                 p = lock_user_string(arg1);
8601                 if (!p) {
8602                     return -TARGET_EFAULT;
8603                 }
8604             } else {
8605                 p = NULL;
8606             }
8607 
8608             p2 = lock_user_string(arg2);
8609             if (!p2) {
8610                 if (arg1) {
8611                     unlock_user(p, arg1, 0);
8612                 }
8613                 return -TARGET_EFAULT;
8614             }
8615 
8616             if (arg3) {
8617                 p3 = lock_user_string(arg3);
8618                 if (!p3) {
8619                     if (arg1) {
8620                         unlock_user(p, arg1, 0);
8621                     }
8622                     unlock_user(p2, arg2, 0);
8623                     return -TARGET_EFAULT;
8624                 }
8625             } else {
8626                 p3 = NULL;
8627             }
8628 
8629             /* FIXME - arg5 should be locked, but it isn't clear how to
8630              * do that since it's not guaranteed to be a NULL-terminated
8631              * string.
8632              */
8633             if (!arg5) {
8634                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8635             } else {
8636                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8637             }
8638             ret = get_errno(ret);
8639 
8640             if (arg1) {
8641                 unlock_user(p, arg1, 0);
8642             }
8643             unlock_user(p2, arg2, 0);
8644             if (arg3) {
8645                 unlock_user(p3, arg3, 0);
8646             }
8647         }
8648         return ret;
8649 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8650 #if defined(TARGET_NR_umount)
8651     case TARGET_NR_umount:
8652 #endif
8653 #if defined(TARGET_NR_oldumount)
8654     case TARGET_NR_oldumount:
8655 #endif
8656         if (!(p = lock_user_string(arg1)))
8657             return -TARGET_EFAULT;
8658         ret = get_errno(umount(p));
8659         unlock_user(p, arg1, 0);
8660         return ret;
8661 #endif
8662 #ifdef TARGET_NR_stime /* not on alpha */
8663     case TARGET_NR_stime:
8664         {
8665             struct timespec ts;
8666             ts.tv_nsec = 0;
8667             if (get_user_sal(ts.tv_sec, arg1)) {
8668                 return -TARGET_EFAULT;
8669             }
8670             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8671         }
8672 #endif
8673 #ifdef TARGET_NR_alarm /* not on alpha */
8674     case TARGET_NR_alarm:
8675         return alarm(arg1);
8676 #endif
8677 #ifdef TARGET_NR_pause /* not on alpha */
8678     case TARGET_NR_pause:
8679         if (!block_signals()) {
8680             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8681         }
8682         return -TARGET_EINTR;
8683 #endif
8684 #ifdef TARGET_NR_utime
8685     case TARGET_NR_utime:
8686         {
8687             struct utimbuf tbuf, *host_tbuf;
8688             struct target_utimbuf *target_tbuf;
8689             if (arg2) {
8690                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8691                     return -TARGET_EFAULT;
8692                 tbuf.actime = tswapal(target_tbuf->actime);
8693                 tbuf.modtime = tswapal(target_tbuf->modtime);
8694                 unlock_user_struct(target_tbuf, arg2, 0);
8695                 host_tbuf = &tbuf;
8696             } else {
8697                 host_tbuf = NULL;
8698             }
8699             if (!(p = lock_user_string(arg1)))
8700                 return -TARGET_EFAULT;
8701             ret = get_errno(utime(p, host_tbuf));
8702             unlock_user(p, arg1, 0);
8703         }
8704         return ret;
8705 #endif
8706 #ifdef TARGET_NR_utimes
8707     case TARGET_NR_utimes:
8708         {
8709             struct timeval *tvp, tv[2];
8710             if (arg2) {
8711                 if (copy_from_user_timeval(&tv[0], arg2)
8712                     || copy_from_user_timeval(&tv[1],
8713                                               arg2 + sizeof(struct target_timeval)))
8714                     return -TARGET_EFAULT;
8715                 tvp = tv;
8716             } else {
8717                 tvp = NULL;
8718             }
8719             if (!(p = lock_user_string(arg1)))
8720                 return -TARGET_EFAULT;
8721             ret = get_errno(utimes(p, tvp));
8722             unlock_user(p, arg1, 0);
8723         }
8724         return ret;
8725 #endif
8726 #if defined(TARGET_NR_futimesat)
8727     case TARGET_NR_futimesat:
8728         {
8729             struct timeval *tvp, tv[2];
8730             if (arg3) {
8731                 if (copy_from_user_timeval(&tv[0], arg3)
8732                     || copy_from_user_timeval(&tv[1],
8733                                               arg3 + sizeof(struct target_timeval)))
8734                     return -TARGET_EFAULT;
8735                 tvp = tv;
8736             } else {
8737                 tvp = NULL;
8738             }
8739             if (!(p = lock_user_string(arg2))) {
8740                 return -TARGET_EFAULT;
8741             }
8742             ret = get_errno(futimesat(arg1, path(p), tvp));
8743             unlock_user(p, arg2, 0);
8744         }
8745         return ret;
8746 #endif
8747 #ifdef TARGET_NR_access
8748     case TARGET_NR_access:
8749         if (!(p = lock_user_string(arg1))) {
8750             return -TARGET_EFAULT;
8751         }
8752         ret = get_errno(access(path(p), arg2));
8753         unlock_user(p, arg1, 0);
8754         return ret;
8755 #endif
8756 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8757     case TARGET_NR_faccessat:
8758         if (!(p = lock_user_string(arg2))) {
8759             return -TARGET_EFAULT;
8760         }
8761         ret = get_errno(faccessat(arg1, p, arg3, 0));
8762         unlock_user(p, arg2, 0);
8763         return ret;
8764 #endif
8765 #ifdef TARGET_NR_nice /* not on alpha */
8766     case TARGET_NR_nice:
8767         return get_errno(nice(arg1));
8768 #endif
8769     case TARGET_NR_sync:
8770         sync();
8771         return 0;
8772 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8773     case TARGET_NR_syncfs:
8774         return get_errno(syncfs(arg1));
8775 #endif
8776     case TARGET_NR_kill:
8777         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8778 #ifdef TARGET_NR_rename
8779     case TARGET_NR_rename:
8780         {
8781             void *p2;
8782             p = lock_user_string(arg1);
8783             p2 = lock_user_string(arg2);
8784             if (!p || !p2)
8785                 ret = -TARGET_EFAULT;
8786             else
8787                 ret = get_errno(rename(p, p2));
8788             unlock_user(p2, arg2, 0);
8789             unlock_user(p, arg1, 0);
8790         }
8791         return ret;
8792 #endif
8793 #if defined(TARGET_NR_renameat)
8794     case TARGET_NR_renameat:
8795         {
8796             void *p2;
8797             p  = lock_user_string(arg2);
8798             p2 = lock_user_string(arg4);
8799             if (!p || !p2)
8800                 ret = -TARGET_EFAULT;
8801             else
8802                 ret = get_errno(renameat(arg1, p, arg3, p2));
8803             unlock_user(p2, arg4, 0);
8804             unlock_user(p, arg2, 0);
8805         }
8806         return ret;
8807 #endif
8808 #if defined(TARGET_NR_renameat2)
8809     case TARGET_NR_renameat2:
8810         {
8811             void *p2;
8812             p  = lock_user_string(arg2);
8813             p2 = lock_user_string(arg4);
8814             if (!p || !p2) {
8815                 ret = -TARGET_EFAULT;
8816             } else {
8817                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8818             }
8819             unlock_user(p2, arg4, 0);
8820             unlock_user(p, arg2, 0);
8821         }
8822         return ret;
8823 #endif
8824 #ifdef TARGET_NR_mkdir
8825     case TARGET_NR_mkdir:
8826         if (!(p = lock_user_string(arg1)))
8827             return -TARGET_EFAULT;
8828         ret = get_errno(mkdir(p, arg2));
8829         unlock_user(p, arg1, 0);
8830         return ret;
8831 #endif
8832 #if defined(TARGET_NR_mkdirat)
8833     case TARGET_NR_mkdirat:
8834         if (!(p = lock_user_string(arg2)))
8835             return -TARGET_EFAULT;
8836         ret = get_errno(mkdirat(arg1, p, arg3));
8837         unlock_user(p, arg2, 0);
8838         return ret;
8839 #endif
8840 #ifdef TARGET_NR_rmdir
8841     case TARGET_NR_rmdir:
8842         if (!(p = lock_user_string(arg1)))
8843             return -TARGET_EFAULT;
8844         ret = get_errno(rmdir(p));
8845         unlock_user(p, arg1, 0);
8846         return ret;
8847 #endif
8848     case TARGET_NR_dup:
8849         ret = get_errno(dup(arg1));
8850         if (ret >= 0) {
8851             fd_trans_dup(arg1, ret);
8852         }
8853         return ret;
8854 #ifdef TARGET_NR_pipe
8855     case TARGET_NR_pipe:
8856         return do_pipe(cpu_env, arg1, 0, 0);
8857 #endif
8858 #ifdef TARGET_NR_pipe2
8859     case TARGET_NR_pipe2:
8860         return do_pipe(cpu_env, arg1,
8861                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8862 #endif
8863     case TARGET_NR_times:
8864         {
8865             struct target_tms *tmsp;
8866             struct tms tms;
8867             ret = get_errno(times(&tms));
8868             if (arg1) {
8869                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8870                 if (!tmsp)
8871                     return -TARGET_EFAULT;
8872                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8873                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8874                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8875                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8876             }
8877             if (!is_error(ret))
8878                 ret = host_to_target_clock_t(ret);
8879         }
8880         return ret;
8881     case TARGET_NR_acct:
8882         if (arg1 == 0) {
8883             ret = get_errno(acct(NULL));
8884         } else {
8885             if (!(p = lock_user_string(arg1))) {
8886                 return -TARGET_EFAULT;
8887             }
8888             ret = get_errno(acct(path(p)));
8889             unlock_user(p, arg1, 0);
8890         }
8891         return ret;
8892 #ifdef TARGET_NR_umount2
8893     case TARGET_NR_umount2:
8894         if (!(p = lock_user_string(arg1)))
8895             return -TARGET_EFAULT;
8896         ret = get_errno(umount2(p, arg2));
8897         unlock_user(p, arg1, 0);
8898         return ret;
8899 #endif
8900     case TARGET_NR_ioctl:
8901         return do_ioctl(arg1, arg2, arg3);
8902 #ifdef TARGET_NR_fcntl
8903     case TARGET_NR_fcntl:
8904         return do_fcntl(arg1, arg2, arg3);
8905 #endif
8906     case TARGET_NR_setpgid:
8907         return get_errno(setpgid(arg1, arg2));
8908     case TARGET_NR_umask:
8909         return get_errno(umask(arg1));
8910     case TARGET_NR_chroot:
8911         if (!(p = lock_user_string(arg1)))
8912             return -TARGET_EFAULT;
8913         ret = get_errno(chroot(p));
8914         unlock_user(p, arg1, 0);
8915         return ret;
8916 #ifdef TARGET_NR_dup2
8917     case TARGET_NR_dup2:
8918         ret = get_errno(dup2(arg1, arg2));
8919         if (ret >= 0) {
8920             fd_trans_dup(arg1, arg2);
8921         }
8922         return ret;
8923 #endif
8924 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8925     case TARGET_NR_dup3:
8926     {
8927         int host_flags;
8928 
8929         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8930             return -EINVAL;
8931         }
8932         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8933         ret = get_errno(dup3(arg1, arg2, host_flags));
8934         if (ret >= 0) {
8935             fd_trans_dup(arg1, arg2);
8936         }
8937         return ret;
8938     }
8939 #endif
8940 #ifdef TARGET_NR_getppid /* not on alpha */
8941     case TARGET_NR_getppid:
8942         return get_errno(getppid());
8943 #endif
8944 #ifdef TARGET_NR_getpgrp
8945     case TARGET_NR_getpgrp:
8946         return get_errno(getpgrp());
8947 #endif
8948     case TARGET_NR_setsid:
8949         return get_errno(setsid());
8950 #ifdef TARGET_NR_sigaction
8951     case TARGET_NR_sigaction:
8952         {
8953 #if defined(TARGET_ALPHA)
8954             struct target_sigaction act, oact, *pact = 0;
8955             struct target_old_sigaction *old_act;
8956             if (arg2) {
8957                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8958                     return -TARGET_EFAULT;
8959                 act._sa_handler = old_act->_sa_handler;
8960                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8961                 act.sa_flags = old_act->sa_flags;
8962                 act.sa_restorer = 0;
8963                 unlock_user_struct(old_act, arg2, 0);
8964                 pact = &act;
8965             }
8966             ret = get_errno(do_sigaction(arg1, pact, &oact));
8967             if (!is_error(ret) && arg3) {
8968                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8969                     return -TARGET_EFAULT;
8970                 old_act->_sa_handler = oact._sa_handler;
8971                 old_act->sa_mask = oact.sa_mask.sig[0];
8972                 old_act->sa_flags = oact.sa_flags;
8973                 unlock_user_struct(old_act, arg3, 1);
8974             }
8975 #elif defined(TARGET_MIPS)
8976 	    struct target_sigaction act, oact, *pact, *old_act;
8977 
8978 	    if (arg2) {
8979                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8980                     return -TARGET_EFAULT;
8981 		act._sa_handler = old_act->_sa_handler;
8982 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8983 		act.sa_flags = old_act->sa_flags;
8984 		unlock_user_struct(old_act, arg2, 0);
8985 		pact = &act;
8986 	    } else {
8987 		pact = NULL;
8988 	    }
8989 
8990 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8991 
8992 	    if (!is_error(ret) && arg3) {
8993                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8994                     return -TARGET_EFAULT;
8995 		old_act->_sa_handler = oact._sa_handler;
8996 		old_act->sa_flags = oact.sa_flags;
8997 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8998 		old_act->sa_mask.sig[1] = 0;
8999 		old_act->sa_mask.sig[2] = 0;
9000 		old_act->sa_mask.sig[3] = 0;
9001 		unlock_user_struct(old_act, arg3, 1);
9002 	    }
9003 #else
9004             struct target_old_sigaction *old_act;
9005             struct target_sigaction act, oact, *pact;
9006             if (arg2) {
9007                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9008                     return -TARGET_EFAULT;
9009                 act._sa_handler = old_act->_sa_handler;
9010                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9011                 act.sa_flags = old_act->sa_flags;
9012                 act.sa_restorer = old_act->sa_restorer;
9013 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9014                 act.ka_restorer = 0;
9015 #endif
9016                 unlock_user_struct(old_act, arg2, 0);
9017                 pact = &act;
9018             } else {
9019                 pact = NULL;
9020             }
9021             ret = get_errno(do_sigaction(arg1, pact, &oact));
9022             if (!is_error(ret) && arg3) {
9023                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9024                     return -TARGET_EFAULT;
9025                 old_act->_sa_handler = oact._sa_handler;
9026                 old_act->sa_mask = oact.sa_mask.sig[0];
9027                 old_act->sa_flags = oact.sa_flags;
9028                 old_act->sa_restorer = oact.sa_restorer;
9029                 unlock_user_struct(old_act, arg3, 1);
9030             }
9031 #endif
9032         }
9033         return ret;
9034 #endif
9035     case TARGET_NR_rt_sigaction:
9036         {
9037 #if defined(TARGET_ALPHA)
9038             /* For Alpha and SPARC this is a 5 argument syscall, with
9039              * a 'restorer' parameter which must be copied into the
9040              * sa_restorer field of the sigaction struct.
9041              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9042              * and arg5 is the sigsetsize.
9043              * Alpha also has a separate rt_sigaction struct that it uses
9044              * here; SPARC uses the usual sigaction struct.
9045              */
9046             struct target_rt_sigaction *rt_act;
9047             struct target_sigaction act, oact, *pact = 0;
9048 
9049             if (arg4 != sizeof(target_sigset_t)) {
9050                 return -TARGET_EINVAL;
9051             }
9052             if (arg2) {
9053                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
9054                     return -TARGET_EFAULT;
9055                 act._sa_handler = rt_act->_sa_handler;
9056                 act.sa_mask = rt_act->sa_mask;
9057                 act.sa_flags = rt_act->sa_flags;
9058                 act.sa_restorer = arg5;
9059                 unlock_user_struct(rt_act, arg2, 0);
9060                 pact = &act;
9061             }
9062             ret = get_errno(do_sigaction(arg1, pact, &oact));
9063             if (!is_error(ret) && arg3) {
9064                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
9065                     return -TARGET_EFAULT;
9066                 rt_act->_sa_handler = oact._sa_handler;
9067                 rt_act->sa_mask = oact.sa_mask;
9068                 rt_act->sa_flags = oact.sa_flags;
9069                 unlock_user_struct(rt_act, arg3, 1);
9070             }
9071 #else
9072 #ifdef TARGET_SPARC
9073             target_ulong restorer = arg4;
9074             target_ulong sigsetsize = arg5;
9075 #else
9076             target_ulong sigsetsize = arg4;
9077 #endif
9078             struct target_sigaction *act;
9079             struct target_sigaction *oact;
9080 
9081             if (sigsetsize != sizeof(target_sigset_t)) {
9082                 return -TARGET_EINVAL;
9083             }
9084             if (arg2) {
9085                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9086                     return -TARGET_EFAULT;
9087                 }
9088 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9089                 act->ka_restorer = restorer;
9090 #endif
9091             } else {
9092                 act = NULL;
9093             }
9094             if (arg3) {
9095                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9096                     ret = -TARGET_EFAULT;
9097                     goto rt_sigaction_fail;
9098                 }
9099             } else
9100                 oact = NULL;
9101             ret = get_errno(do_sigaction(arg1, act, oact));
9102 	rt_sigaction_fail:
9103             if (act)
9104                 unlock_user_struct(act, arg2, 0);
9105             if (oact)
9106                 unlock_user_struct(oact, arg3, 1);
9107 #endif
9108         }
9109         return ret;
9110 #ifdef TARGET_NR_sgetmask /* not on alpha */
9111     case TARGET_NR_sgetmask:
9112         {
9113             sigset_t cur_set;
9114             abi_ulong target_set;
9115             ret = do_sigprocmask(0, NULL, &cur_set);
9116             if (!ret) {
9117                 host_to_target_old_sigset(&target_set, &cur_set);
9118                 ret = target_set;
9119             }
9120         }
9121         return ret;
9122 #endif
9123 #ifdef TARGET_NR_ssetmask /* not on alpha */
9124     case TARGET_NR_ssetmask:
9125         {
9126             sigset_t set, oset;
9127             abi_ulong target_set = arg1;
9128             target_to_host_old_sigset(&set, &target_set);
9129             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9130             if (!ret) {
9131                 host_to_target_old_sigset(&target_set, &oset);
9132                 ret = target_set;
9133             }
9134         }
9135         return ret;
9136 #endif
9137 #ifdef TARGET_NR_sigprocmask
9138     case TARGET_NR_sigprocmask:
9139         {
9140 #if defined(TARGET_ALPHA)
9141             sigset_t set, oldset;
9142             abi_ulong mask;
9143             int how;
9144 
9145             switch (arg1) {
9146             case TARGET_SIG_BLOCK:
9147                 how = SIG_BLOCK;
9148                 break;
9149             case TARGET_SIG_UNBLOCK:
9150                 how = SIG_UNBLOCK;
9151                 break;
9152             case TARGET_SIG_SETMASK:
9153                 how = SIG_SETMASK;
9154                 break;
9155             default:
9156                 return -TARGET_EINVAL;
9157             }
9158             mask = arg2;
9159             target_to_host_old_sigset(&set, &mask);
9160 
9161             ret = do_sigprocmask(how, &set, &oldset);
9162             if (!is_error(ret)) {
9163                 host_to_target_old_sigset(&mask, &oldset);
9164                 ret = mask;
9165                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9166             }
9167 #else
9168             sigset_t set, oldset, *set_ptr;
9169             int how;
9170 
9171             if (arg2) {
9172                 switch (arg1) {
9173                 case TARGET_SIG_BLOCK:
9174                     how = SIG_BLOCK;
9175                     break;
9176                 case TARGET_SIG_UNBLOCK:
9177                     how = SIG_UNBLOCK;
9178                     break;
9179                 case TARGET_SIG_SETMASK:
9180                     how = SIG_SETMASK;
9181                     break;
9182                 default:
9183                     return -TARGET_EINVAL;
9184                 }
9185                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9186                     return -TARGET_EFAULT;
9187                 target_to_host_old_sigset(&set, p);
9188                 unlock_user(p, arg2, 0);
9189                 set_ptr = &set;
9190             } else {
9191                 how = 0;
9192                 set_ptr = NULL;
9193             }
9194             ret = do_sigprocmask(how, set_ptr, &oldset);
9195             if (!is_error(ret) && arg3) {
9196                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9197                     return -TARGET_EFAULT;
9198                 host_to_target_old_sigset(p, &oldset);
9199                 unlock_user(p, arg3, sizeof(target_sigset_t));
9200             }
9201 #endif
9202         }
9203         return ret;
9204 #endif
9205     case TARGET_NR_rt_sigprocmask:
9206         {
9207             int how = arg1;
9208             sigset_t set, oldset, *set_ptr;
9209 
9210             if (arg4 != sizeof(target_sigset_t)) {
9211                 return -TARGET_EINVAL;
9212             }
9213 
9214             if (arg2) {
9215                 switch(how) {
9216                 case TARGET_SIG_BLOCK:
9217                     how = SIG_BLOCK;
9218                     break;
9219                 case TARGET_SIG_UNBLOCK:
9220                     how = SIG_UNBLOCK;
9221                     break;
9222                 case TARGET_SIG_SETMASK:
9223                     how = SIG_SETMASK;
9224                     break;
9225                 default:
9226                     return -TARGET_EINVAL;
9227                 }
9228                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9229                     return -TARGET_EFAULT;
9230                 target_to_host_sigset(&set, p);
9231                 unlock_user(p, arg2, 0);
9232                 set_ptr = &set;
9233             } else {
9234                 how = 0;
9235                 set_ptr = NULL;
9236             }
9237             ret = do_sigprocmask(how, set_ptr, &oldset);
9238             if (!is_error(ret) && arg3) {
9239                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9240                     return -TARGET_EFAULT;
9241                 host_to_target_sigset(p, &oldset);
9242                 unlock_user(p, arg3, sizeof(target_sigset_t));
9243             }
9244         }
9245         return ret;
9246 #ifdef TARGET_NR_sigpending
9247     case TARGET_NR_sigpending:
9248         {
9249             sigset_t set;
9250             ret = get_errno(sigpending(&set));
9251             if (!is_error(ret)) {
9252                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9253                     return -TARGET_EFAULT;
9254                 host_to_target_old_sigset(p, &set);
9255                 unlock_user(p, arg1, sizeof(target_sigset_t));
9256             }
9257         }
9258         return ret;
9259 #endif
9260     case TARGET_NR_rt_sigpending:
9261         {
9262             sigset_t set;
9263 
9264             /* Yes, this check is >, not != like most. We follow the kernel's
9265              * logic and it does it like this because it implements
9266              * NR_sigpending through the same code path, and in that case
9267              * the old_sigset_t is smaller in size.
9268              */
9269             if (arg2 > sizeof(target_sigset_t)) {
9270                 return -TARGET_EINVAL;
9271             }
9272 
9273             ret = get_errno(sigpending(&set));
9274             if (!is_error(ret)) {
9275                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9276                     return -TARGET_EFAULT;
9277                 host_to_target_sigset(p, &set);
9278                 unlock_user(p, arg1, sizeof(target_sigset_t));
9279             }
9280         }
9281         return ret;
9282 #ifdef TARGET_NR_sigsuspend
9283     case TARGET_NR_sigsuspend:
9284         {
9285             TaskState *ts = cpu->opaque;
9286 #if defined(TARGET_ALPHA)
9287             abi_ulong mask = arg1;
9288             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9289 #else
9290             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9291                 return -TARGET_EFAULT;
9292             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9293             unlock_user(p, arg1, 0);
9294 #endif
9295             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9296                                                SIGSET_T_SIZE));
9297             if (ret != -TARGET_ERESTARTSYS) {
9298                 ts->in_sigsuspend = 1;
9299             }
9300         }
9301         return ret;
9302 #endif
9303     case TARGET_NR_rt_sigsuspend:
9304         {
9305             TaskState *ts = cpu->opaque;
9306 
9307             if (arg2 != sizeof(target_sigset_t)) {
9308                 return -TARGET_EINVAL;
9309             }
9310             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9311                 return -TARGET_EFAULT;
9312             target_to_host_sigset(&ts->sigsuspend_mask, p);
9313             unlock_user(p, arg1, 0);
9314             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9315                                                SIGSET_T_SIZE));
9316             if (ret != -TARGET_ERESTARTSYS) {
9317                 ts->in_sigsuspend = 1;
9318             }
9319         }
9320         return ret;
9321 #ifdef TARGET_NR_rt_sigtimedwait
9322     case TARGET_NR_rt_sigtimedwait:
9323         {
9324             sigset_t set;
9325             struct timespec uts, *puts;
9326             siginfo_t uinfo;
9327 
9328             if (arg4 != sizeof(target_sigset_t)) {
9329                 return -TARGET_EINVAL;
9330             }
9331 
9332             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9333                 return -TARGET_EFAULT;
9334             target_to_host_sigset(&set, p);
9335             unlock_user(p, arg1, 0);
9336             if (arg3) {
9337                 puts = &uts;
9338                 if (target_to_host_timespec(puts, arg3)) {
9339                     return -TARGET_EFAULT;
9340                 }
9341             } else {
9342                 puts = NULL;
9343             }
9344             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9345                                                  SIGSET_T_SIZE));
9346             if (!is_error(ret)) {
9347                 if (arg2) {
9348                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9349                                   0);
9350                     if (!p) {
9351                         return -TARGET_EFAULT;
9352                     }
9353                     host_to_target_siginfo(p, &uinfo);
9354                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9355                 }
9356                 ret = host_to_target_signal(ret);
9357             }
9358         }
9359         return ret;
9360 #endif
9361 #ifdef TARGET_NR_rt_sigtimedwait_time64
9362     case TARGET_NR_rt_sigtimedwait_time64:
9363         {
9364             sigset_t set;
9365             struct timespec uts, *puts;
9366             siginfo_t uinfo;
9367 
9368             if (arg4 != sizeof(target_sigset_t)) {
9369                 return -TARGET_EINVAL;
9370             }
9371 
9372             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9373             if (!p) {
9374                 return -TARGET_EFAULT;
9375             }
9376             target_to_host_sigset(&set, p);
9377             unlock_user(p, arg1, 0);
9378             if (arg3) {
9379                 puts = &uts;
9380                 if (target_to_host_timespec64(puts, arg3)) {
9381                     return -TARGET_EFAULT;
9382                 }
9383             } else {
9384                 puts = NULL;
9385             }
9386             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9387                                                  SIGSET_T_SIZE));
9388             if (!is_error(ret)) {
9389                 if (arg2) {
9390                     p = lock_user(VERIFY_WRITE, arg2,
9391                                   sizeof(target_siginfo_t), 0);
9392                     if (!p) {
9393                         return -TARGET_EFAULT;
9394                     }
9395                     host_to_target_siginfo(p, &uinfo);
9396                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9397                 }
9398                 ret = host_to_target_signal(ret);
9399             }
9400         }
9401         return ret;
9402 #endif
9403     case TARGET_NR_rt_sigqueueinfo:
9404         {
9405             siginfo_t uinfo;
9406 
9407             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9408             if (!p) {
9409                 return -TARGET_EFAULT;
9410             }
9411             target_to_host_siginfo(&uinfo, p);
9412             unlock_user(p, arg3, 0);
9413             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9414         }
9415         return ret;
9416     case TARGET_NR_rt_tgsigqueueinfo:
9417         {
9418             siginfo_t uinfo;
9419 
9420             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9421             if (!p) {
9422                 return -TARGET_EFAULT;
9423             }
9424             target_to_host_siginfo(&uinfo, p);
9425             unlock_user(p, arg4, 0);
9426             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9427         }
9428         return ret;
9429 #ifdef TARGET_NR_sigreturn
9430     case TARGET_NR_sigreturn:
9431         if (block_signals()) {
9432             return -TARGET_ERESTARTSYS;
9433         }
9434         return do_sigreturn(cpu_env);
9435 #endif
9436     case TARGET_NR_rt_sigreturn:
9437         if (block_signals()) {
9438             return -TARGET_ERESTARTSYS;
9439         }
9440         return do_rt_sigreturn(cpu_env);
9441     case TARGET_NR_sethostname:
9442         if (!(p = lock_user_string(arg1)))
9443             return -TARGET_EFAULT;
9444         ret = get_errno(sethostname(p, arg2));
9445         unlock_user(p, arg1, 0);
9446         return ret;
9447 #ifdef TARGET_NR_setrlimit
9448     case TARGET_NR_setrlimit:
9449         {
9450             int resource = target_to_host_resource(arg1);
9451             struct target_rlimit *target_rlim;
9452             struct rlimit rlim;
9453             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9454                 return -TARGET_EFAULT;
9455             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9456             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9457             unlock_user_struct(target_rlim, arg2, 0);
9458             /*
9459              * If we just passed through resource limit settings for memory then
9460              * they would also apply to QEMU's own allocations, and QEMU will
9461              * crash or hang or die if its allocations fail. Ideally we would
9462              * track the guest allocations in QEMU and apply the limits ourselves.
9463              * For now, just tell the guest the call succeeded but don't actually
9464              * limit anything.
9465              */
9466             if (resource != RLIMIT_AS &&
9467                 resource != RLIMIT_DATA &&
9468                 resource != RLIMIT_STACK) {
9469                 return get_errno(setrlimit(resource, &rlim));
9470             } else {
9471                 return 0;
9472             }
9473         }
9474 #endif
9475 #ifdef TARGET_NR_getrlimit
9476     case TARGET_NR_getrlimit:
9477         {
9478             int resource = target_to_host_resource(arg1);
9479             struct target_rlimit *target_rlim;
9480             struct rlimit rlim;
9481 
9482             ret = get_errno(getrlimit(resource, &rlim));
9483             if (!is_error(ret)) {
9484                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9485                     return -TARGET_EFAULT;
9486                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9487                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9488                 unlock_user_struct(target_rlim, arg2, 1);
9489             }
9490         }
9491         return ret;
9492 #endif
9493     case TARGET_NR_getrusage:
9494         {
9495             struct rusage rusage;
9496             ret = get_errno(getrusage(arg1, &rusage));
9497             if (!is_error(ret)) {
9498                 ret = host_to_target_rusage(arg2, &rusage);
9499             }
9500         }
9501         return ret;
9502 #if defined(TARGET_NR_gettimeofday)
9503     case TARGET_NR_gettimeofday:
9504         {
9505             struct timeval tv;
9506             struct timezone tz;
9507 
9508             ret = get_errno(gettimeofday(&tv, &tz));
9509             if (!is_error(ret)) {
9510                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9511                     return -TARGET_EFAULT;
9512                 }
9513                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9514                     return -TARGET_EFAULT;
9515                 }
9516             }
9517         }
9518         return ret;
9519 #endif
9520 #if defined(TARGET_NR_settimeofday)
9521     case TARGET_NR_settimeofday:
9522         {
9523             struct timeval tv, *ptv = NULL;
9524             struct timezone tz, *ptz = NULL;
9525 
9526             if (arg1) {
9527                 if (copy_from_user_timeval(&tv, arg1)) {
9528                     return -TARGET_EFAULT;
9529                 }
9530                 ptv = &tv;
9531             }
9532 
9533             if (arg2) {
9534                 if (copy_from_user_timezone(&tz, arg2)) {
9535                     return -TARGET_EFAULT;
9536                 }
9537                 ptz = &tz;
9538             }
9539 
9540             return get_errno(settimeofday(ptv, ptz));
9541         }
9542 #endif
9543 #if defined(TARGET_NR_select)
9544     case TARGET_NR_select:
9545 #if defined(TARGET_WANT_NI_OLD_SELECT)
9546         /* some architectures used to have old_select here
9547          * but now ENOSYS it.
9548          */
9549         ret = -TARGET_ENOSYS;
9550 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9551         ret = do_old_select(arg1);
9552 #else
9553         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9554 #endif
9555         return ret;
9556 #endif
9557 #ifdef TARGET_NR_pselect6
9558     case TARGET_NR_pselect6:
9559         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9560 #endif
9561 #ifdef TARGET_NR_pselect6_time64
9562     case TARGET_NR_pselect6_time64:
9563         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9564 #endif
9565 #ifdef TARGET_NR_symlink
9566     case TARGET_NR_symlink:
9567         {
9568             void *p2;
9569             p = lock_user_string(arg1);
9570             p2 = lock_user_string(arg2);
9571             if (!p || !p2)
9572                 ret = -TARGET_EFAULT;
9573             else
9574                 ret = get_errno(symlink(p, p2));
9575             unlock_user(p2, arg2, 0);
9576             unlock_user(p, arg1, 0);
9577         }
9578         return ret;
9579 #endif
9580 #if defined(TARGET_NR_symlinkat)
9581     case TARGET_NR_symlinkat:
9582         {
9583             void *p2;
9584             p  = lock_user_string(arg1);
9585             p2 = lock_user_string(arg3);
9586             if (!p || !p2)
9587                 ret = -TARGET_EFAULT;
9588             else
9589                 ret = get_errno(symlinkat(p, arg2, p2));
9590             unlock_user(p2, arg3, 0);
9591             unlock_user(p, arg1, 0);
9592         }
9593         return ret;
9594 #endif
9595 #ifdef TARGET_NR_readlink
9596     case TARGET_NR_readlink:
9597         {
9598             void *p2;
9599             p = lock_user_string(arg1);
9600             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9601             if (!p || !p2) {
9602                 ret = -TARGET_EFAULT;
9603             } else if (!arg3) {
9604                 /* Short circuit this for the magic exe check. */
9605                 ret = -TARGET_EINVAL;
9606             } else if (is_proc_myself((const char *)p, "exe")) {
9607                 char real[PATH_MAX], *temp;
9608                 temp = realpath(exec_path, real);
9609                 /* Return value is # of bytes that we wrote to the buffer. */
9610                 if (temp == NULL) {
9611                     ret = get_errno(-1);
9612                 } else {
9613                     /* Don't worry about sign mismatch as earlier mapping
9614                      * logic would have thrown a bad address error. */
9615                     ret = MIN(strlen(real), arg3);
9616                     /* We cannot NUL terminate the string. */
9617                     memcpy(p2, real, ret);
9618                 }
9619             } else {
9620                 ret = get_errno(readlink(path(p), p2, arg3));
9621             }
9622             unlock_user(p2, arg2, ret);
9623             unlock_user(p, arg1, 0);
9624         }
9625         return ret;
9626 #endif
9627 #if defined(TARGET_NR_readlinkat)
9628     case TARGET_NR_readlinkat:
9629         {
9630             void *p2;
9631             p  = lock_user_string(arg2);
9632             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9633             if (!p || !p2) {
9634                 ret = -TARGET_EFAULT;
9635             } else if (is_proc_myself((const char *)p, "exe")) {
9636                 char real[PATH_MAX], *temp;
9637                 temp = realpath(exec_path, real);
9638                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9639                 snprintf((char *)p2, arg4, "%s", real);
9640             } else {
9641                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9642             }
9643             unlock_user(p2, arg3, ret);
9644             unlock_user(p, arg2, 0);
9645         }
9646         return ret;
9647 #endif
9648 #ifdef TARGET_NR_swapon
9649     case TARGET_NR_swapon:
9650         if (!(p = lock_user_string(arg1)))
9651             return -TARGET_EFAULT;
9652         ret = get_errno(swapon(p, arg2));
9653         unlock_user(p, arg1, 0);
9654         return ret;
9655 #endif
9656     case TARGET_NR_reboot:
9657         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9658            /* arg4 must be ignored in all other cases */
9659            p = lock_user_string(arg4);
9660            if (!p) {
9661                return -TARGET_EFAULT;
9662            }
9663            ret = get_errno(reboot(arg1, arg2, arg3, p));
9664            unlock_user(p, arg4, 0);
9665         } else {
9666            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9667         }
9668         return ret;
9669 #ifdef TARGET_NR_mmap
9670     case TARGET_NR_mmap:
9671 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9672     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9673     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9674     || defined(TARGET_S390X)
9675         {
9676             abi_ulong *v;
9677             abi_ulong v1, v2, v3, v4, v5, v6;
9678             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9679                 return -TARGET_EFAULT;
9680             v1 = tswapal(v[0]);
9681             v2 = tswapal(v[1]);
9682             v3 = tswapal(v[2]);
9683             v4 = tswapal(v[3]);
9684             v5 = tswapal(v[4]);
9685             v6 = tswapal(v[5]);
9686             unlock_user(v, arg1, 0);
9687             ret = get_errno(target_mmap(v1, v2, v3,
9688                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9689                                         v5, v6));
9690         }
9691 #else
9692         ret = get_errno(target_mmap(arg1, arg2, arg3,
9693                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9694                                     arg5,
9695                                     arg6));
9696 #endif
9697         return ret;
9698 #endif
9699 #ifdef TARGET_NR_mmap2
9700     case TARGET_NR_mmap2:
9701 #ifndef MMAP_SHIFT
9702 #define MMAP_SHIFT 12
9703 #endif
9704         ret = target_mmap(arg1, arg2, arg3,
9705                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9706                           arg5, arg6 << MMAP_SHIFT);
9707         return get_errno(ret);
9708 #endif
9709     case TARGET_NR_munmap:
9710         return get_errno(target_munmap(arg1, arg2));
9711     case TARGET_NR_mprotect:
9712         {
9713             TaskState *ts = cpu->opaque;
9714             /* Special hack to detect libc making the stack executable.  */
9715             if ((arg3 & PROT_GROWSDOWN)
9716                 && arg1 >= ts->info->stack_limit
9717                 && arg1 <= ts->info->start_stack) {
9718                 arg3 &= ~PROT_GROWSDOWN;
9719                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9720                 arg1 = ts->info->stack_limit;
9721             }
9722         }
9723         return get_errno(target_mprotect(arg1, arg2, arg3));
9724 #ifdef TARGET_NR_mremap
9725     case TARGET_NR_mremap:
9726         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9727 #endif
9728         /* ??? msync/mlock/munlock are broken for softmmu.  */
9729 #ifdef TARGET_NR_msync
9730     case TARGET_NR_msync:
9731         return get_errno(msync(g2h(arg1), arg2, arg3));
9732 #endif
9733 #ifdef TARGET_NR_mlock
9734     case TARGET_NR_mlock:
9735         return get_errno(mlock(g2h(arg1), arg2));
9736 #endif
9737 #ifdef TARGET_NR_munlock
9738     case TARGET_NR_munlock:
9739         return get_errno(munlock(g2h(arg1), arg2));
9740 #endif
9741 #ifdef TARGET_NR_mlockall
9742     case TARGET_NR_mlockall:
9743         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9744 #endif
9745 #ifdef TARGET_NR_munlockall
9746     case TARGET_NR_munlockall:
9747         return get_errno(munlockall());
9748 #endif
9749 #ifdef TARGET_NR_truncate
9750     case TARGET_NR_truncate:
9751         if (!(p = lock_user_string(arg1)))
9752             return -TARGET_EFAULT;
9753         ret = get_errno(truncate(p, arg2));
9754         unlock_user(p, arg1, 0);
9755         return ret;
9756 #endif
9757 #ifdef TARGET_NR_ftruncate
9758     case TARGET_NR_ftruncate:
9759         return get_errno(ftruncate(arg1, arg2));
9760 #endif
9761     case TARGET_NR_fchmod:
9762         return get_errno(fchmod(arg1, arg2));
9763 #if defined(TARGET_NR_fchmodat)
9764     case TARGET_NR_fchmodat:
9765         if (!(p = lock_user_string(arg2)))
9766             return -TARGET_EFAULT;
9767         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9768         unlock_user(p, arg2, 0);
9769         return ret;
9770 #endif
9771     case TARGET_NR_getpriority:
9772         /* Note that negative values are valid for getpriority, so we must
9773            differentiate based on errno settings.  */
9774         errno = 0;
9775         ret = getpriority(arg1, arg2);
9776         if (ret == -1 && errno != 0) {
9777             return -host_to_target_errno(errno);
9778         }
9779 #ifdef TARGET_ALPHA
9780         /* Return value is the unbiased priority.  Signal no error.  */
9781         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9782 #else
9783         /* Return value is a biased priority to avoid negative numbers.  */
9784         ret = 20 - ret;
9785 #endif
9786         return ret;
9787     case TARGET_NR_setpriority:
9788         return get_errno(setpriority(arg1, arg2, arg3));
9789 #ifdef TARGET_NR_statfs
9790     case TARGET_NR_statfs:
9791         if (!(p = lock_user_string(arg1))) {
9792             return -TARGET_EFAULT;
9793         }
9794         ret = get_errno(statfs(path(p), &stfs));
9795         unlock_user(p, arg1, 0);
9796     convert_statfs:
9797         if (!is_error(ret)) {
9798             struct target_statfs *target_stfs;
9799 
9800             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9801                 return -TARGET_EFAULT;
9802             __put_user(stfs.f_type, &target_stfs->f_type);
9803             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9804             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9805             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9806             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9807             __put_user(stfs.f_files, &target_stfs->f_files);
9808             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9809             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9810             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9811             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9812             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9813 #ifdef _STATFS_F_FLAGS
9814             __put_user(stfs.f_flags, &target_stfs->f_flags);
9815 #else
9816             __put_user(0, &target_stfs->f_flags);
9817 #endif
9818             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9819             unlock_user_struct(target_stfs, arg2, 1);
9820         }
9821         return ret;
9822 #endif
9823 #ifdef TARGET_NR_fstatfs
9824     case TARGET_NR_fstatfs:
9825         ret = get_errno(fstatfs(arg1, &stfs));
9826         goto convert_statfs;
9827 #endif
9828 #ifdef TARGET_NR_statfs64
9829     case TARGET_NR_statfs64:
9830         if (!(p = lock_user_string(arg1))) {
9831             return -TARGET_EFAULT;
9832         }
9833         ret = get_errno(statfs(path(p), &stfs));
9834         unlock_user(p, arg1, 0);
9835     convert_statfs64:
9836         if (!is_error(ret)) {
9837             struct target_statfs64 *target_stfs;
9838 
9839             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9840                 return -TARGET_EFAULT;
9841             __put_user(stfs.f_type, &target_stfs->f_type);
9842             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9843             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9844             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9845             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9846             __put_user(stfs.f_files, &target_stfs->f_files);
9847             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9848             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9849             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9850             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9851             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9852 #ifdef _STATFS_F_FLAGS
9853             __put_user(stfs.f_flags, &target_stfs->f_flags);
9854 #else
9855             __put_user(0, &target_stfs->f_flags);
9856 #endif
9857             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9858             unlock_user_struct(target_stfs, arg3, 1);
9859         }
9860         return ret;
9861     case TARGET_NR_fstatfs64:
9862         ret = get_errno(fstatfs(arg1, &stfs));
9863         goto convert_statfs64;
9864 #endif
9865 #ifdef TARGET_NR_socketcall
9866     case TARGET_NR_socketcall:
9867         return do_socketcall(arg1, arg2);
9868 #endif
9869 #ifdef TARGET_NR_accept
9870     case TARGET_NR_accept:
9871         return do_accept4(arg1, arg2, arg3, 0);
9872 #endif
9873 #ifdef TARGET_NR_accept4
9874     case TARGET_NR_accept4:
9875         return do_accept4(arg1, arg2, arg3, arg4);
9876 #endif
9877 #ifdef TARGET_NR_bind
9878     case TARGET_NR_bind:
9879         return do_bind(arg1, arg2, arg3);
9880 #endif
9881 #ifdef TARGET_NR_connect
9882     case TARGET_NR_connect:
9883         return do_connect(arg1, arg2, arg3);
9884 #endif
9885 #ifdef TARGET_NR_getpeername
9886     case TARGET_NR_getpeername:
9887         return do_getpeername(arg1, arg2, arg3);
9888 #endif
9889 #ifdef TARGET_NR_getsockname
9890     case TARGET_NR_getsockname:
9891         return do_getsockname(arg1, arg2, arg3);
9892 #endif
9893 #ifdef TARGET_NR_getsockopt
9894     case TARGET_NR_getsockopt:
9895         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9896 #endif
9897 #ifdef TARGET_NR_listen
9898     case TARGET_NR_listen:
9899         return get_errno(listen(arg1, arg2));
9900 #endif
9901 #ifdef TARGET_NR_recv
9902     case TARGET_NR_recv:
9903         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9904 #endif
9905 #ifdef TARGET_NR_recvfrom
9906     case TARGET_NR_recvfrom:
9907         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9908 #endif
9909 #ifdef TARGET_NR_recvmsg
9910     case TARGET_NR_recvmsg:
9911         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9912 #endif
9913 #ifdef TARGET_NR_send
9914     case TARGET_NR_send:
9915         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9916 #endif
9917 #ifdef TARGET_NR_sendmsg
9918     case TARGET_NR_sendmsg:
9919         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9920 #endif
9921 #ifdef TARGET_NR_sendmmsg
9922     case TARGET_NR_sendmmsg:
9923         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9924 #endif
9925 #ifdef TARGET_NR_recvmmsg
9926     case TARGET_NR_recvmmsg:
9927         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9928 #endif
9929 #ifdef TARGET_NR_sendto
9930     case TARGET_NR_sendto:
9931         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9932 #endif
9933 #ifdef TARGET_NR_shutdown
9934     case TARGET_NR_shutdown:
9935         return get_errno(shutdown(arg1, arg2));
9936 #endif
9937 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9938     case TARGET_NR_getrandom:
9939         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9940         if (!p) {
9941             return -TARGET_EFAULT;
9942         }
9943         ret = get_errno(getrandom(p, arg2, arg3));
9944         unlock_user(p, arg1, ret);
9945         return ret;
9946 #endif
9947 #ifdef TARGET_NR_socket
9948     case TARGET_NR_socket:
9949         return do_socket(arg1, arg2, arg3);
9950 #endif
9951 #ifdef TARGET_NR_socketpair
9952     case TARGET_NR_socketpair:
9953         return do_socketpair(arg1, arg2, arg3, arg4);
9954 #endif
9955 #ifdef TARGET_NR_setsockopt
9956     case TARGET_NR_setsockopt:
9957         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9958 #endif
9959 #if defined(TARGET_NR_syslog)
9960     case TARGET_NR_syslog:
9961         {
9962             int len = arg2;
9963 
9964             switch (arg1) {
9965             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9966             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9967             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9968             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9969             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9970             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9971             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9972             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9973                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9974             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9975             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9976             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9977                 {
9978                     if (len < 0) {
9979                         return -TARGET_EINVAL;
9980                     }
9981                     if (len == 0) {
9982                         return 0;
9983                     }
9984                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9985                     if (!p) {
9986                         return -TARGET_EFAULT;
9987                     }
9988                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9989                     unlock_user(p, arg2, arg3);
9990                 }
9991                 return ret;
9992             default:
9993                 return -TARGET_EINVAL;
9994             }
9995         }
9996         break;
9997 #endif
9998     case TARGET_NR_setitimer:
9999         {
10000             struct itimerval value, ovalue, *pvalue;
10001 
10002             if (arg2) {
10003                 pvalue = &value;
10004                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10005                     || copy_from_user_timeval(&pvalue->it_value,
10006                                               arg2 + sizeof(struct target_timeval)))
10007                     return -TARGET_EFAULT;
10008             } else {
10009                 pvalue = NULL;
10010             }
10011             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10012             if (!is_error(ret) && arg3) {
10013                 if (copy_to_user_timeval(arg3,
10014                                          &ovalue.it_interval)
10015                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10016                                             &ovalue.it_value))
10017                     return -TARGET_EFAULT;
10018             }
10019         }
10020         return ret;
10021     case TARGET_NR_getitimer:
10022         {
10023             struct itimerval value;
10024 
10025             ret = get_errno(getitimer(arg1, &value));
10026             if (!is_error(ret) && arg2) {
10027                 if (copy_to_user_timeval(arg2,
10028                                          &value.it_interval)
10029                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10030                                             &value.it_value))
10031                     return -TARGET_EFAULT;
10032             }
10033         }
10034         return ret;
10035 #ifdef TARGET_NR_stat
10036     case TARGET_NR_stat:
10037         if (!(p = lock_user_string(arg1))) {
10038             return -TARGET_EFAULT;
10039         }
10040         ret = get_errno(stat(path(p), &st));
10041         unlock_user(p, arg1, 0);
10042         goto do_stat;
10043 #endif
10044 #ifdef TARGET_NR_lstat
10045     case TARGET_NR_lstat:
10046         if (!(p = lock_user_string(arg1))) {
10047             return -TARGET_EFAULT;
10048         }
10049         ret = get_errno(lstat(path(p), &st));
10050         unlock_user(p, arg1, 0);
10051         goto do_stat;
10052 #endif
10053 #ifdef TARGET_NR_fstat
10054     case TARGET_NR_fstat:
10055         {
10056             ret = get_errno(fstat(arg1, &st));
10057 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10058         do_stat:
10059 #endif
10060             if (!is_error(ret)) {
10061                 struct target_stat *target_st;
10062 
10063                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10064                     return -TARGET_EFAULT;
10065                 memset(target_st, 0, sizeof(*target_st));
10066                 __put_user(st.st_dev, &target_st->st_dev);
10067                 __put_user(st.st_ino, &target_st->st_ino);
10068                 __put_user(st.st_mode, &target_st->st_mode);
10069                 __put_user(st.st_uid, &target_st->st_uid);
10070                 __put_user(st.st_gid, &target_st->st_gid);
10071                 __put_user(st.st_nlink, &target_st->st_nlink);
10072                 __put_user(st.st_rdev, &target_st->st_rdev);
10073                 __put_user(st.st_size, &target_st->st_size);
10074                 __put_user(st.st_blksize, &target_st->st_blksize);
10075                 __put_user(st.st_blocks, &target_st->st_blocks);
10076                 __put_user(st.st_atime, &target_st->target_st_atime);
10077                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10078                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10079 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10080     defined(TARGET_STAT_HAVE_NSEC)
10081                 __put_user(st.st_atim.tv_nsec,
10082                            &target_st->target_st_atime_nsec);
10083                 __put_user(st.st_mtim.tv_nsec,
10084                            &target_st->target_st_mtime_nsec);
10085                 __put_user(st.st_ctim.tv_nsec,
10086                            &target_st->target_st_ctime_nsec);
10087 #endif
10088                 unlock_user_struct(target_st, arg2, 1);
10089             }
10090         }
10091         return ret;
10092 #endif
10093     case TARGET_NR_vhangup:
10094         return get_errno(vhangup());
10095 #ifdef TARGET_NR_syscall
10096     case TARGET_NR_syscall:
10097         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10098                           arg6, arg7, arg8, 0);
10099 #endif
10100 #if defined(TARGET_NR_wait4)
10101     case TARGET_NR_wait4:
10102         {
10103             int status;
10104             abi_long status_ptr = arg2;
10105             struct rusage rusage, *rusage_ptr;
10106             abi_ulong target_rusage = arg4;
10107             abi_long rusage_err;
10108             if (target_rusage)
10109                 rusage_ptr = &rusage;
10110             else
10111                 rusage_ptr = NULL;
10112             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10113             if (!is_error(ret)) {
10114                 if (status_ptr && ret) {
10115                     status = host_to_target_waitstatus(status);
10116                     if (put_user_s32(status, status_ptr))
10117                         return -TARGET_EFAULT;
10118                 }
10119                 if (target_rusage) {
10120                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10121                     if (rusage_err) {
10122                         ret = rusage_err;
10123                     }
10124                 }
10125             }
10126         }
10127         return ret;
10128 #endif
10129 #ifdef TARGET_NR_swapoff
10130     case TARGET_NR_swapoff:
10131         if (!(p = lock_user_string(arg1)))
10132             return -TARGET_EFAULT;
10133         ret = get_errno(swapoff(p));
10134         unlock_user(p, arg1, 0);
10135         return ret;
10136 #endif
10137     case TARGET_NR_sysinfo:
10138         {
10139             struct target_sysinfo *target_value;
10140             struct sysinfo value;
10141             ret = get_errno(sysinfo(&value));
10142             if (!is_error(ret) && arg1)
10143             {
10144                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10145                     return -TARGET_EFAULT;
10146                 __put_user(value.uptime, &target_value->uptime);
10147                 __put_user(value.loads[0], &target_value->loads[0]);
10148                 __put_user(value.loads[1], &target_value->loads[1]);
10149                 __put_user(value.loads[2], &target_value->loads[2]);
10150                 __put_user(value.totalram, &target_value->totalram);
10151                 __put_user(value.freeram, &target_value->freeram);
10152                 __put_user(value.sharedram, &target_value->sharedram);
10153                 __put_user(value.bufferram, &target_value->bufferram);
10154                 __put_user(value.totalswap, &target_value->totalswap);
10155                 __put_user(value.freeswap, &target_value->freeswap);
10156                 __put_user(value.procs, &target_value->procs);
10157                 __put_user(value.totalhigh, &target_value->totalhigh);
10158                 __put_user(value.freehigh, &target_value->freehigh);
10159                 __put_user(value.mem_unit, &target_value->mem_unit);
10160                 unlock_user_struct(target_value, arg1, 1);
10161             }
10162         }
10163         return ret;
10164 #ifdef TARGET_NR_ipc
10165     case TARGET_NR_ipc:
10166         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10167 #endif
10168 #ifdef TARGET_NR_semget
10169     case TARGET_NR_semget:
10170         return get_errno(semget(arg1, arg2, arg3));
10171 #endif
10172 #ifdef TARGET_NR_semop
10173     case TARGET_NR_semop:
10174         return do_semtimedop(arg1, arg2, arg3, 0, false);
10175 #endif
10176 #ifdef TARGET_NR_semtimedop
10177     case TARGET_NR_semtimedop:
10178         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10179 #endif
10180 #ifdef TARGET_NR_semtimedop_time64
10181     case TARGET_NR_semtimedop_time64:
10182         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10183 #endif
10184 #ifdef TARGET_NR_semctl
10185     case TARGET_NR_semctl:
10186         return do_semctl(arg1, arg2, arg3, arg4);
10187 #endif
10188 #ifdef TARGET_NR_msgctl
10189     case TARGET_NR_msgctl:
10190         return do_msgctl(arg1, arg2, arg3);
10191 #endif
10192 #ifdef TARGET_NR_msgget
10193     case TARGET_NR_msgget:
10194         return get_errno(msgget(arg1, arg2));
10195 #endif
10196 #ifdef TARGET_NR_msgrcv
10197     case TARGET_NR_msgrcv:
10198         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10199 #endif
10200 #ifdef TARGET_NR_msgsnd
10201     case TARGET_NR_msgsnd:
10202         return do_msgsnd(arg1, arg2, arg3, arg4);
10203 #endif
10204 #ifdef TARGET_NR_shmget
10205     case TARGET_NR_shmget:
10206         return get_errno(shmget(arg1, arg2, arg3));
10207 #endif
10208 #ifdef TARGET_NR_shmctl
10209     case TARGET_NR_shmctl:
10210         return do_shmctl(arg1, arg2, arg3);
10211 #endif
10212 #ifdef TARGET_NR_shmat
10213     case TARGET_NR_shmat:
10214         return do_shmat(cpu_env, arg1, arg2, arg3);
10215 #endif
10216 #ifdef TARGET_NR_shmdt
10217     case TARGET_NR_shmdt:
10218         return do_shmdt(arg1);
10219 #endif
10220     case TARGET_NR_fsync:
10221         return get_errno(fsync(arg1));
10222     case TARGET_NR_clone:
10223         /* Linux manages to have three different orderings for its
10224          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10225          * match the kernel's CONFIG_CLONE_* settings.
10226          * Microblaze is further special in that it uses a sixth
10227          * implicit argument to clone for the TLS pointer.
10228          */
10229 #if defined(TARGET_MICROBLAZE)
10230         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10231 #elif defined(TARGET_CLONE_BACKWARDS)
10232         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10233 #elif defined(TARGET_CLONE_BACKWARDS2)
10234         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10235 #else
10236         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10237 #endif
10238         return ret;
10239 #ifdef __NR_exit_group
10240         /* new thread calls */
10241     case TARGET_NR_exit_group:
10242         preexit_cleanup(cpu_env, arg1);
10243         return get_errno(exit_group(arg1));
10244 #endif
10245     case TARGET_NR_setdomainname:
10246         if (!(p = lock_user_string(arg1)))
10247             return -TARGET_EFAULT;
10248         ret = get_errno(setdomainname(p, arg2));
10249         unlock_user(p, arg1, 0);
10250         return ret;
10251     case TARGET_NR_uname:
10252         /* no need to transcode because we use the linux syscall */
10253         {
10254             struct new_utsname * buf;
10255 
10256             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10257                 return -TARGET_EFAULT;
10258             ret = get_errno(sys_uname(buf));
10259             if (!is_error(ret)) {
10260                 /* Overwrite the native machine name with whatever is being
10261                    emulated. */
10262                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10263                           sizeof(buf->machine));
10264                 /* Allow the user to override the reported release.  */
10265                 if (qemu_uname_release && *qemu_uname_release) {
10266                     g_strlcpy(buf->release, qemu_uname_release,
10267                               sizeof(buf->release));
10268                 }
10269             }
10270             unlock_user_struct(buf, arg1, 1);
10271         }
10272         return ret;
10273 #ifdef TARGET_I386
10274     case TARGET_NR_modify_ldt:
10275         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10276 #if !defined(TARGET_X86_64)
10277     case TARGET_NR_vm86:
10278         return do_vm86(cpu_env, arg1, arg2);
10279 #endif
10280 #endif
10281 #if defined(TARGET_NR_adjtimex)
10282     case TARGET_NR_adjtimex:
10283         {
10284             struct timex host_buf;
10285 
10286             if (target_to_host_timex(&host_buf, arg1) != 0) {
10287                 return -TARGET_EFAULT;
10288             }
10289             ret = get_errno(adjtimex(&host_buf));
10290             if (!is_error(ret)) {
10291                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10292                     return -TARGET_EFAULT;
10293                 }
10294             }
10295         }
10296         return ret;
10297 #endif
10298 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10299     case TARGET_NR_clock_adjtime:
10300         {
10301             struct timex htx, *phtx = &htx;
10302 
10303             if (target_to_host_timex(phtx, arg2) != 0) {
10304                 return -TARGET_EFAULT;
10305             }
10306             ret = get_errno(clock_adjtime(arg1, phtx));
10307             if (!is_error(ret) && phtx) {
10308                 if (host_to_target_timex(arg2, phtx) != 0) {
10309                     return -TARGET_EFAULT;
10310                 }
10311             }
10312         }
10313         return ret;
10314 #endif
10315 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10316     case TARGET_NR_clock_adjtime64:
10317         {
10318             struct timex htx;
10319 
10320             if (target_to_host_timex64(&htx, arg2) != 0) {
10321                 return -TARGET_EFAULT;
10322             }
10323             ret = get_errno(clock_adjtime(arg1, &htx));
10324             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10325                     return -TARGET_EFAULT;
10326             }
10327         }
10328         return ret;
10329 #endif
10330     case TARGET_NR_getpgid:
10331         return get_errno(getpgid(arg1));
10332     case TARGET_NR_fchdir:
10333         return get_errno(fchdir(arg1));
10334     case TARGET_NR_personality:
10335         return get_errno(personality(arg1));
10336 #ifdef TARGET_NR__llseek /* Not on alpha */
10337     case TARGET_NR__llseek:
10338         {
10339             int64_t res;
10340 #if !defined(__NR_llseek)
10341             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10342             if (res == -1) {
10343                 ret = get_errno(res);
10344             } else {
10345                 ret = 0;
10346             }
10347 #else
10348             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10349 #endif
10350             if ((ret == 0) && put_user_s64(res, arg4)) {
10351                 return -TARGET_EFAULT;
10352             }
10353         }
10354         return ret;
10355 #endif
10356 #ifdef TARGET_NR_getdents
10357     case TARGET_NR_getdents:
10358 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10359 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10360         {
10361             struct target_dirent *target_dirp;
10362             struct linux_dirent *dirp;
10363             abi_long count = arg3;
10364 
10365             dirp = g_try_malloc(count);
10366             if (!dirp) {
10367                 return -TARGET_ENOMEM;
10368             }
10369 
10370             ret = get_errno(sys_getdents(arg1, dirp, count));
10371             if (!is_error(ret)) {
10372                 struct linux_dirent *de;
10373 		struct target_dirent *tde;
10374                 int len = ret;
10375                 int reclen, treclen;
10376 		int count1, tnamelen;
10377 
10378 		count1 = 0;
10379                 de = dirp;
10380                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10381                     return -TARGET_EFAULT;
10382 		tde = target_dirp;
10383                 while (len > 0) {
10384                     reclen = de->d_reclen;
10385                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10386                     assert(tnamelen >= 0);
10387                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10388                     assert(count1 + treclen <= count);
10389                     tde->d_reclen = tswap16(treclen);
10390                     tde->d_ino = tswapal(de->d_ino);
10391                     tde->d_off = tswapal(de->d_off);
10392                     memcpy(tde->d_name, de->d_name, tnamelen);
10393                     de = (struct linux_dirent *)((char *)de + reclen);
10394                     len -= reclen;
10395                     tde = (struct target_dirent *)((char *)tde + treclen);
10396 		    count1 += treclen;
10397                 }
10398 		ret = count1;
10399                 unlock_user(target_dirp, arg2, ret);
10400             }
10401             g_free(dirp);
10402         }
10403 #else
10404         {
10405             struct linux_dirent *dirp;
10406             abi_long count = arg3;
10407 
10408             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10409                 return -TARGET_EFAULT;
10410             ret = get_errno(sys_getdents(arg1, dirp, count));
10411             if (!is_error(ret)) {
10412                 struct linux_dirent *de;
10413                 int len = ret;
10414                 int reclen;
10415                 de = dirp;
10416                 while (len > 0) {
10417                     reclen = de->d_reclen;
10418                     if (reclen > len)
10419                         break;
10420                     de->d_reclen = tswap16(reclen);
10421                     tswapls(&de->d_ino);
10422                     tswapls(&de->d_off);
10423                     de = (struct linux_dirent *)((char *)de + reclen);
10424                     len -= reclen;
10425                 }
10426             }
10427             unlock_user(dirp, arg2, ret);
10428         }
10429 #endif
10430 #else
10431         /* Implement getdents in terms of getdents64 */
10432         {
10433             struct linux_dirent64 *dirp;
10434             abi_long count = arg3;
10435 
10436             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10437             if (!dirp) {
10438                 return -TARGET_EFAULT;
10439             }
10440             ret = get_errno(sys_getdents64(arg1, dirp, count));
10441             if (!is_error(ret)) {
10442                 /* Convert the dirent64 structs to target dirent.  We do this
10443                  * in-place, since we can guarantee that a target_dirent is no
10444                  * larger than a dirent64; however this means we have to be
10445                  * careful to read everything before writing in the new format.
10446                  */
10447                 struct linux_dirent64 *de;
10448                 struct target_dirent *tde;
10449                 int len = ret;
10450                 int tlen = 0;
10451 
10452                 de = dirp;
10453                 tde = (struct target_dirent *)dirp;
10454                 while (len > 0) {
10455                     int namelen, treclen;
10456                     int reclen = de->d_reclen;
10457                     uint64_t ino = de->d_ino;
10458                     int64_t off = de->d_off;
10459                     uint8_t type = de->d_type;
10460 
10461                     namelen = strlen(de->d_name);
10462                     treclen = offsetof(struct target_dirent, d_name)
10463                         + namelen + 2;
10464                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10465 
10466                     memmove(tde->d_name, de->d_name, namelen + 1);
10467                     tde->d_ino = tswapal(ino);
10468                     tde->d_off = tswapal(off);
10469                     tde->d_reclen = tswap16(treclen);
10470                     /* The target_dirent type is in what was formerly a padding
10471                      * byte at the end of the structure:
10472                      */
10473                     *(((char *)tde) + treclen - 1) = type;
10474 
10475                     de = (struct linux_dirent64 *)((char *)de + reclen);
10476                     tde = (struct target_dirent *)((char *)tde + treclen);
10477                     len -= reclen;
10478                     tlen += treclen;
10479                 }
10480                 ret = tlen;
10481             }
10482             unlock_user(dirp, arg2, ret);
10483         }
10484 #endif
10485         return ret;
10486 #endif /* TARGET_NR_getdents */
10487 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10488     case TARGET_NR_getdents64:
10489         {
10490             struct linux_dirent64 *dirp;
10491             abi_long count = arg3;
10492             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10493                 return -TARGET_EFAULT;
10494             ret = get_errno(sys_getdents64(arg1, dirp, count));
10495             if (!is_error(ret)) {
10496                 struct linux_dirent64 *de;
10497                 int len = ret;
10498                 int reclen;
10499                 de = dirp;
10500                 while (len > 0) {
10501                     reclen = de->d_reclen;
10502                     if (reclen > len)
10503                         break;
10504                     de->d_reclen = tswap16(reclen);
10505                     tswap64s((uint64_t *)&de->d_ino);
10506                     tswap64s((uint64_t *)&de->d_off);
10507                     de = (struct linux_dirent64 *)((char *)de + reclen);
10508                     len -= reclen;
10509                 }
10510             }
10511             unlock_user(dirp, arg2, ret);
10512         }
10513         return ret;
10514 #endif /* TARGET_NR_getdents64 */
10515 #if defined(TARGET_NR__newselect)
10516     case TARGET_NR__newselect:
10517         return do_select(arg1, arg2, arg3, arg4, arg5);
10518 #endif
10519 #ifdef TARGET_NR_poll
10520     case TARGET_NR_poll:
10521         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10522 #endif
10523 #ifdef TARGET_NR_ppoll
10524     case TARGET_NR_ppoll:
10525         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10526 #endif
10527 #ifdef TARGET_NR_ppoll_time64
10528     case TARGET_NR_ppoll_time64:
10529         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10530 #endif
10531     case TARGET_NR_flock:
10532         /* NOTE: the flock constant seems to be the same for every
10533            Linux platform */
10534         return get_errno(safe_flock(arg1, arg2));
10535     case TARGET_NR_readv:
10536         {
10537             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10538             if (vec != NULL) {
10539                 ret = get_errno(safe_readv(arg1, vec, arg3));
10540                 unlock_iovec(vec, arg2, arg3, 1);
10541             } else {
10542                 ret = -host_to_target_errno(errno);
10543             }
10544         }
10545         return ret;
10546     case TARGET_NR_writev:
10547         {
10548             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10549             if (vec != NULL) {
10550                 ret = get_errno(safe_writev(arg1, vec, arg3));
10551                 unlock_iovec(vec, arg2, arg3, 0);
10552             } else {
10553                 ret = -host_to_target_errno(errno);
10554             }
10555         }
10556         return ret;
10557 #if defined(TARGET_NR_preadv)
10558     case TARGET_NR_preadv:
10559         {
10560             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10561             if (vec != NULL) {
10562                 unsigned long low, high;
10563 
10564                 target_to_host_low_high(arg4, arg5, &low, &high);
10565                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10566                 unlock_iovec(vec, arg2, arg3, 1);
10567             } else {
10568                 ret = -host_to_target_errno(errno);
10569            }
10570         }
10571         return ret;
10572 #endif
10573 #if defined(TARGET_NR_pwritev)
10574     case TARGET_NR_pwritev:
10575         {
10576             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10577             if (vec != NULL) {
10578                 unsigned long low, high;
10579 
10580                 target_to_host_low_high(arg4, arg5, &low, &high);
10581                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10582                 unlock_iovec(vec, arg2, arg3, 0);
10583             } else {
10584                 ret = -host_to_target_errno(errno);
10585            }
10586         }
10587         return ret;
10588 #endif
10589     case TARGET_NR_getsid:
10590         return get_errno(getsid(arg1));
10591 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10592     case TARGET_NR_fdatasync:
10593         return get_errno(fdatasync(arg1));
10594 #endif
10595     case TARGET_NR_sched_getaffinity:
10596         {
10597             unsigned int mask_size;
10598             unsigned long *mask;
10599 
10600             /*
10601              * sched_getaffinity needs multiples of ulong, so need to take
10602              * care of mismatches between target ulong and host ulong sizes.
10603              */
10604             if (arg2 & (sizeof(abi_ulong) - 1)) {
10605                 return -TARGET_EINVAL;
10606             }
10607             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10608 
10609             mask = alloca(mask_size);
10610             memset(mask, 0, mask_size);
10611             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10612 
10613             if (!is_error(ret)) {
10614                 if (ret > arg2) {
10615                     /* More data returned than the caller's buffer will fit.
10616                      * This only happens if sizeof(abi_long) < sizeof(long)
10617                      * and the caller passed us a buffer holding an odd number
10618                      * of abi_longs. If the host kernel is actually using the
10619                      * extra 4 bytes then fail EINVAL; otherwise we can just
10620                      * ignore them and only copy the interesting part.
10621                      */
10622                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10623                     if (numcpus > arg2 * 8) {
10624                         return -TARGET_EINVAL;
10625                     }
10626                     ret = arg2;
10627                 }
10628 
10629                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10630                     return -TARGET_EFAULT;
10631                 }
10632             }
10633         }
10634         return ret;
10635     case TARGET_NR_sched_setaffinity:
10636         {
10637             unsigned int mask_size;
10638             unsigned long *mask;
10639 
10640             /*
10641              * sched_setaffinity needs multiples of ulong, so need to take
10642              * care of mismatches between target ulong and host ulong sizes.
10643              */
10644             if (arg2 & (sizeof(abi_ulong) - 1)) {
10645                 return -TARGET_EINVAL;
10646             }
10647             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10648             mask = alloca(mask_size);
10649 
10650             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10651             if (ret) {
10652                 return ret;
10653             }
10654 
10655             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10656         }
10657     case TARGET_NR_getcpu:
10658         {
10659             unsigned cpu, node;
10660             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10661                                        arg2 ? &node : NULL,
10662                                        NULL));
10663             if (is_error(ret)) {
10664                 return ret;
10665             }
10666             if (arg1 && put_user_u32(cpu, arg1)) {
10667                 return -TARGET_EFAULT;
10668             }
10669             if (arg2 && put_user_u32(node, arg2)) {
10670                 return -TARGET_EFAULT;
10671             }
10672         }
10673         return ret;
10674     case TARGET_NR_sched_setparam:
10675         {
10676             struct sched_param *target_schp;
10677             struct sched_param schp;
10678 
10679             if (arg2 == 0) {
10680                 return -TARGET_EINVAL;
10681             }
10682             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10683                 return -TARGET_EFAULT;
10684             schp.sched_priority = tswap32(target_schp->sched_priority);
10685             unlock_user_struct(target_schp, arg2, 0);
10686             return get_errno(sched_setparam(arg1, &schp));
10687         }
10688     case TARGET_NR_sched_getparam:
10689         {
10690             struct sched_param *target_schp;
10691             struct sched_param schp;
10692 
10693             if (arg2 == 0) {
10694                 return -TARGET_EINVAL;
10695             }
10696             ret = get_errno(sched_getparam(arg1, &schp));
10697             if (!is_error(ret)) {
10698                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10699                     return -TARGET_EFAULT;
10700                 target_schp->sched_priority = tswap32(schp.sched_priority);
10701                 unlock_user_struct(target_schp, arg2, 1);
10702             }
10703         }
10704         return ret;
10705     case TARGET_NR_sched_setscheduler:
10706         {
10707             struct sched_param *target_schp;
10708             struct sched_param schp;
10709             if (arg3 == 0) {
10710                 return -TARGET_EINVAL;
10711             }
10712             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10713                 return -TARGET_EFAULT;
10714             schp.sched_priority = tswap32(target_schp->sched_priority);
10715             unlock_user_struct(target_schp, arg3, 0);
10716             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10717         }
10718     case TARGET_NR_sched_getscheduler:
10719         return get_errno(sched_getscheduler(arg1));
10720     case TARGET_NR_sched_yield:
10721         return get_errno(sched_yield());
10722     case TARGET_NR_sched_get_priority_max:
10723         return get_errno(sched_get_priority_max(arg1));
10724     case TARGET_NR_sched_get_priority_min:
10725         return get_errno(sched_get_priority_min(arg1));
10726 #ifdef TARGET_NR_sched_rr_get_interval
10727     case TARGET_NR_sched_rr_get_interval:
10728         {
10729             struct timespec ts;
10730             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10731             if (!is_error(ret)) {
10732                 ret = host_to_target_timespec(arg2, &ts);
10733             }
10734         }
10735         return ret;
10736 #endif
10737 #ifdef TARGET_NR_sched_rr_get_interval_time64
10738     case TARGET_NR_sched_rr_get_interval_time64:
10739         {
10740             struct timespec ts;
10741             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10742             if (!is_error(ret)) {
10743                 ret = host_to_target_timespec64(arg2, &ts);
10744             }
10745         }
10746         return ret;
10747 #endif
10748 #if defined(TARGET_NR_nanosleep)
10749     case TARGET_NR_nanosleep:
10750         {
10751             struct timespec req, rem;
10752             target_to_host_timespec(&req, arg1);
10753             ret = get_errno(safe_nanosleep(&req, &rem));
10754             if (is_error(ret) && arg2) {
10755                 host_to_target_timespec(arg2, &rem);
10756             }
10757         }
10758         return ret;
10759 #endif
10760     case TARGET_NR_prctl:
10761         switch (arg1) {
10762         case PR_GET_PDEATHSIG:
10763         {
10764             int deathsig;
10765             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10766             if (!is_error(ret) && arg2
10767                 && put_user_s32(deathsig, arg2)) {
10768                 return -TARGET_EFAULT;
10769             }
10770             return ret;
10771         }
10772 #ifdef PR_GET_NAME
10773         case PR_GET_NAME:
10774         {
10775             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10776             if (!name) {
10777                 return -TARGET_EFAULT;
10778             }
10779             ret = get_errno(prctl(arg1, (unsigned long)name,
10780                                   arg3, arg4, arg5));
10781             unlock_user(name, arg2, 16);
10782             return ret;
10783         }
10784         case PR_SET_NAME:
10785         {
10786             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10787             if (!name) {
10788                 return -TARGET_EFAULT;
10789             }
10790             ret = get_errno(prctl(arg1, (unsigned long)name,
10791                                   arg3, arg4, arg5));
10792             unlock_user(name, arg2, 0);
10793             return ret;
10794         }
10795 #endif
10796 #ifdef TARGET_MIPS
10797         case TARGET_PR_GET_FP_MODE:
10798         {
10799             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10800             ret = 0;
10801             if (env->CP0_Status & (1 << CP0St_FR)) {
10802                 ret |= TARGET_PR_FP_MODE_FR;
10803             }
10804             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10805                 ret |= TARGET_PR_FP_MODE_FRE;
10806             }
10807             return ret;
10808         }
10809         case TARGET_PR_SET_FP_MODE:
10810         {
10811             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10812             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10813             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10814             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10815             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10816 
10817             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10818                                             TARGET_PR_FP_MODE_FRE;
10819 
10820             /* If nothing to change, return right away, successfully.  */
10821             if (old_fr == new_fr && old_fre == new_fre) {
10822                 return 0;
10823             }
10824             /* Check the value is valid */
10825             if (arg2 & ~known_bits) {
10826                 return -TARGET_EOPNOTSUPP;
10827             }
10828             /* Setting FRE without FR is not supported.  */
10829             if (new_fre && !new_fr) {
10830                 return -TARGET_EOPNOTSUPP;
10831             }
10832             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10833                 /* FR1 is not supported */
10834                 return -TARGET_EOPNOTSUPP;
10835             }
10836             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10837                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10838                 /* cannot set FR=0 */
10839                 return -TARGET_EOPNOTSUPP;
10840             }
10841             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10842                 /* Cannot set FRE=1 */
10843                 return -TARGET_EOPNOTSUPP;
10844             }
10845 
10846             int i;
10847             fpr_t *fpr = env->active_fpu.fpr;
10848             for (i = 0; i < 32 ; i += 2) {
10849                 if (!old_fr && new_fr) {
10850                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10851                 } else if (old_fr && !new_fr) {
10852                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10853                 }
10854             }
10855 
10856             if (new_fr) {
10857                 env->CP0_Status |= (1 << CP0St_FR);
10858                 env->hflags |= MIPS_HFLAG_F64;
10859             } else {
10860                 env->CP0_Status &= ~(1 << CP0St_FR);
10861                 env->hflags &= ~MIPS_HFLAG_F64;
10862             }
10863             if (new_fre) {
10864                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10865                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10866                     env->hflags |= MIPS_HFLAG_FRE;
10867                 }
10868             } else {
10869                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10870                 env->hflags &= ~MIPS_HFLAG_FRE;
10871             }
10872 
10873             return 0;
10874         }
10875 #endif /* MIPS */
10876 #ifdef TARGET_AARCH64
10877         case TARGET_PR_SVE_SET_VL:
10878             /*
10879              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10880              * PR_SVE_VL_INHERIT.  Note the kernel definition
10881              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10882              * even though the current architectural maximum is VQ=16.
10883              */
10884             ret = -TARGET_EINVAL;
10885             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10886                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10887                 CPUARMState *env = cpu_env;
10888                 ARMCPU *cpu = env_archcpu(env);
10889                 uint32_t vq, old_vq;
10890 
10891                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10892                 vq = MAX(arg2 / 16, 1);
10893                 vq = MIN(vq, cpu->sve_max_vq);
10894 
10895                 if (vq < old_vq) {
10896                     aarch64_sve_narrow_vq(env, vq);
10897                 }
10898                 env->vfp.zcr_el[1] = vq - 1;
10899                 arm_rebuild_hflags(env);
10900                 ret = vq * 16;
10901             }
10902             return ret;
10903         case TARGET_PR_SVE_GET_VL:
10904             ret = -TARGET_EINVAL;
10905             {
10906                 ARMCPU *cpu = env_archcpu(cpu_env);
10907                 if (cpu_isar_feature(aa64_sve, cpu)) {
10908                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10909                 }
10910             }
10911             return ret;
10912         case TARGET_PR_PAC_RESET_KEYS:
10913             {
10914                 CPUARMState *env = cpu_env;
10915                 ARMCPU *cpu = env_archcpu(env);
10916 
10917                 if (arg3 || arg4 || arg5) {
10918                     return -TARGET_EINVAL;
10919                 }
10920                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10921                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10922                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10923                                TARGET_PR_PAC_APGAKEY);
10924                     int ret = 0;
10925                     Error *err = NULL;
10926 
10927                     if (arg2 == 0) {
10928                         arg2 = all;
10929                     } else if (arg2 & ~all) {
10930                         return -TARGET_EINVAL;
10931                     }
10932                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10933                         ret |= qemu_guest_getrandom(&env->keys.apia,
10934                                                     sizeof(ARMPACKey), &err);
10935                     }
10936                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10937                         ret |= qemu_guest_getrandom(&env->keys.apib,
10938                                                     sizeof(ARMPACKey), &err);
10939                     }
10940                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10941                         ret |= qemu_guest_getrandom(&env->keys.apda,
10942                                                     sizeof(ARMPACKey), &err);
10943                     }
10944                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10945                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10946                                                     sizeof(ARMPACKey), &err);
10947                     }
10948                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10949                         ret |= qemu_guest_getrandom(&env->keys.apga,
10950                                                     sizeof(ARMPACKey), &err);
10951                     }
10952                     if (ret != 0) {
10953                         /*
10954                          * Some unknown failure in the crypto.  The best
10955                          * we can do is log it and fail the syscall.
10956                          * The real syscall cannot fail this way.
10957                          */
10958                         qemu_log_mask(LOG_UNIMP,
10959                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10960                                       error_get_pretty(err));
10961                         error_free(err);
10962                         return -TARGET_EIO;
10963                     }
10964                     return 0;
10965                 }
10966             }
10967             return -TARGET_EINVAL;
10968 #endif /* AARCH64 */
10969         case PR_GET_SECCOMP:
10970         case PR_SET_SECCOMP:
10971             /* Disable seccomp to prevent the target disabling syscalls we
10972              * need. */
10973             return -TARGET_EINVAL;
10974         default:
10975             /* Most prctl options have no pointer arguments */
10976             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10977         }
10978         break;
10979 #ifdef TARGET_NR_arch_prctl
10980     case TARGET_NR_arch_prctl:
10981         return do_arch_prctl(cpu_env, arg1, arg2);
10982 #endif
10983 #ifdef TARGET_NR_pread64
10984     case TARGET_NR_pread64:
10985         if (regpairs_aligned(cpu_env, num)) {
10986             arg4 = arg5;
10987             arg5 = arg6;
10988         }
10989         if (arg2 == 0 && arg3 == 0) {
10990             /* Special-case NULL buffer and zero length, which should succeed */
10991             p = 0;
10992         } else {
10993             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10994             if (!p) {
10995                 return -TARGET_EFAULT;
10996             }
10997         }
10998         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10999         unlock_user(p, arg2, ret);
11000         return ret;
11001     case TARGET_NR_pwrite64:
11002         if (regpairs_aligned(cpu_env, num)) {
11003             arg4 = arg5;
11004             arg5 = arg6;
11005         }
11006         if (arg2 == 0 && arg3 == 0) {
11007             /* Special-case NULL buffer and zero length, which should succeed */
11008             p = 0;
11009         } else {
11010             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11011             if (!p) {
11012                 return -TARGET_EFAULT;
11013             }
11014         }
11015         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11016         unlock_user(p, arg2, 0);
11017         return ret;
11018 #endif
11019     case TARGET_NR_getcwd:
11020         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11021             return -TARGET_EFAULT;
11022         ret = get_errno(sys_getcwd1(p, arg2));
11023         unlock_user(p, arg1, ret);
11024         return ret;
11025     case TARGET_NR_capget:
11026     case TARGET_NR_capset:
11027     {
11028         struct target_user_cap_header *target_header;
11029         struct target_user_cap_data *target_data = NULL;
11030         struct __user_cap_header_struct header;
11031         struct __user_cap_data_struct data[2];
11032         struct __user_cap_data_struct *dataptr = NULL;
11033         int i, target_datalen;
11034         int data_items = 1;
11035 
11036         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11037             return -TARGET_EFAULT;
11038         }
11039         header.version = tswap32(target_header->version);
11040         header.pid = tswap32(target_header->pid);
11041 
11042         if (header.version != _LINUX_CAPABILITY_VERSION) {
11043             /* Version 2 and up takes pointer to two user_data structs */
11044             data_items = 2;
11045         }
11046 
11047         target_datalen = sizeof(*target_data) * data_items;
11048 
11049         if (arg2) {
11050             if (num == TARGET_NR_capget) {
11051                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11052             } else {
11053                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11054             }
11055             if (!target_data) {
11056                 unlock_user_struct(target_header, arg1, 0);
11057                 return -TARGET_EFAULT;
11058             }
11059 
11060             if (num == TARGET_NR_capset) {
11061                 for (i = 0; i < data_items; i++) {
11062                     data[i].effective = tswap32(target_data[i].effective);
11063                     data[i].permitted = tswap32(target_data[i].permitted);
11064                     data[i].inheritable = tswap32(target_data[i].inheritable);
11065                 }
11066             }
11067 
11068             dataptr = data;
11069         }
11070 
11071         if (num == TARGET_NR_capget) {
11072             ret = get_errno(capget(&header, dataptr));
11073         } else {
11074             ret = get_errno(capset(&header, dataptr));
11075         }
11076 
11077         /* The kernel always updates version for both capget and capset */
11078         target_header->version = tswap32(header.version);
11079         unlock_user_struct(target_header, arg1, 1);
11080 
11081         if (arg2) {
11082             if (num == TARGET_NR_capget) {
11083                 for (i = 0; i < data_items; i++) {
11084                     target_data[i].effective = tswap32(data[i].effective);
11085                     target_data[i].permitted = tswap32(data[i].permitted);
11086                     target_data[i].inheritable = tswap32(data[i].inheritable);
11087                 }
11088                 unlock_user(target_data, arg2, target_datalen);
11089             } else {
11090                 unlock_user(target_data, arg2, 0);
11091             }
11092         }
11093         return ret;
11094     }
11095     case TARGET_NR_sigaltstack:
11096         return do_sigaltstack(arg1, arg2,
11097                               get_sp_from_cpustate((CPUArchState *)cpu_env));
11098 
11099 #ifdef CONFIG_SENDFILE
11100 #ifdef TARGET_NR_sendfile
11101     case TARGET_NR_sendfile:
11102     {
11103         off_t *offp = NULL;
11104         off_t off;
11105         if (arg3) {
11106             ret = get_user_sal(off, arg3);
11107             if (is_error(ret)) {
11108                 return ret;
11109             }
11110             offp = &off;
11111         }
11112         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11113         if (!is_error(ret) && arg3) {
11114             abi_long ret2 = put_user_sal(off, arg3);
11115             if (is_error(ret2)) {
11116                 ret = ret2;
11117             }
11118         }
11119         return ret;
11120     }
11121 #endif
11122 #ifdef TARGET_NR_sendfile64
11123     case TARGET_NR_sendfile64:
11124     {
11125         off_t *offp = NULL;
11126         off_t off;
11127         if (arg3) {
11128             ret = get_user_s64(off, arg3);
11129             if (is_error(ret)) {
11130                 return ret;
11131             }
11132             offp = &off;
11133         }
11134         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11135         if (!is_error(ret) && arg3) {
11136             abi_long ret2 = put_user_s64(off, arg3);
11137             if (is_error(ret2)) {
11138                 ret = ret2;
11139             }
11140         }
11141         return ret;
11142     }
11143 #endif
11144 #endif
11145 #ifdef TARGET_NR_vfork
11146     case TARGET_NR_vfork:
11147         return get_errno(do_fork(cpu_env,
11148                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11149                          0, 0, 0, 0));
11150 #endif
11151 #ifdef TARGET_NR_ugetrlimit
11152     case TARGET_NR_ugetrlimit:
11153     {
11154 	struct rlimit rlim;
11155 	int resource = target_to_host_resource(arg1);
11156 	ret = get_errno(getrlimit(resource, &rlim));
11157 	if (!is_error(ret)) {
11158 	    struct target_rlimit *target_rlim;
11159             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11160                 return -TARGET_EFAULT;
11161 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11162 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11163             unlock_user_struct(target_rlim, arg2, 1);
11164 	}
11165         return ret;
11166     }
11167 #endif
11168 #ifdef TARGET_NR_truncate64
11169     case TARGET_NR_truncate64:
11170         if (!(p = lock_user_string(arg1)))
11171             return -TARGET_EFAULT;
11172 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11173         unlock_user(p, arg1, 0);
11174         return ret;
11175 #endif
11176 #ifdef TARGET_NR_ftruncate64
11177     case TARGET_NR_ftruncate64:
11178         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11179 #endif
11180 #ifdef TARGET_NR_stat64
11181     case TARGET_NR_stat64:
11182         if (!(p = lock_user_string(arg1))) {
11183             return -TARGET_EFAULT;
11184         }
11185         ret = get_errno(stat(path(p), &st));
11186         unlock_user(p, arg1, 0);
11187         if (!is_error(ret))
11188             ret = host_to_target_stat64(cpu_env, arg2, &st);
11189         return ret;
11190 #endif
11191 #ifdef TARGET_NR_lstat64
11192     case TARGET_NR_lstat64:
11193         if (!(p = lock_user_string(arg1))) {
11194             return -TARGET_EFAULT;
11195         }
11196         ret = get_errno(lstat(path(p), &st));
11197         unlock_user(p, arg1, 0);
11198         if (!is_error(ret))
11199             ret = host_to_target_stat64(cpu_env, arg2, &st);
11200         return ret;
11201 #endif
11202 #ifdef TARGET_NR_fstat64
11203     case TARGET_NR_fstat64:
11204         ret = get_errno(fstat(arg1, &st));
11205         if (!is_error(ret))
11206             ret = host_to_target_stat64(cpu_env, arg2, &st);
11207         return ret;
11208 #endif
11209 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11210 #ifdef TARGET_NR_fstatat64
11211     case TARGET_NR_fstatat64:
11212 #endif
11213 #ifdef TARGET_NR_newfstatat
11214     case TARGET_NR_newfstatat:
11215 #endif
11216         if (!(p = lock_user_string(arg2))) {
11217             return -TARGET_EFAULT;
11218         }
11219         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11220         unlock_user(p, arg2, 0);
11221         if (!is_error(ret))
11222             ret = host_to_target_stat64(cpu_env, arg3, &st);
11223         return ret;
11224 #endif
11225 #if defined(TARGET_NR_statx)
11226     case TARGET_NR_statx:
11227         {
11228             struct target_statx *target_stx;
11229             int dirfd = arg1;
11230             int flags = arg3;
11231 
11232             p = lock_user_string(arg2);
11233             if (p == NULL) {
11234                 return -TARGET_EFAULT;
11235             }
11236 #if defined(__NR_statx)
11237             {
11238                 /*
11239                  * It is assumed that struct statx is architecture independent.
11240                  */
11241                 struct target_statx host_stx;
11242                 int mask = arg4;
11243 
11244                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11245                 if (!is_error(ret)) {
11246                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11247                         unlock_user(p, arg2, 0);
11248                         return -TARGET_EFAULT;
11249                     }
11250                 }
11251 
11252                 if (ret != -TARGET_ENOSYS) {
11253                     unlock_user(p, arg2, 0);
11254                     return ret;
11255                 }
11256             }
11257 #endif
11258             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11259             unlock_user(p, arg2, 0);
11260 
11261             if (!is_error(ret)) {
11262                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11263                     return -TARGET_EFAULT;
11264                 }
11265                 memset(target_stx, 0, sizeof(*target_stx));
11266                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11267                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11268                 __put_user(st.st_ino, &target_stx->stx_ino);
11269                 __put_user(st.st_mode, &target_stx->stx_mode);
11270                 __put_user(st.st_uid, &target_stx->stx_uid);
11271                 __put_user(st.st_gid, &target_stx->stx_gid);
11272                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11273                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11274                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11275                 __put_user(st.st_size, &target_stx->stx_size);
11276                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11277                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11278                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11279                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11280                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11281                 unlock_user_struct(target_stx, arg5, 1);
11282             }
11283         }
11284         return ret;
11285 #endif
11286 #ifdef TARGET_NR_lchown
11287     case TARGET_NR_lchown:
11288         if (!(p = lock_user_string(arg1)))
11289             return -TARGET_EFAULT;
11290         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11291         unlock_user(p, arg1, 0);
11292         return ret;
11293 #endif
11294 #ifdef TARGET_NR_getuid
11295     case TARGET_NR_getuid:
11296         return get_errno(high2lowuid(getuid()));
11297 #endif
11298 #ifdef TARGET_NR_getgid
11299     case TARGET_NR_getgid:
11300         return get_errno(high2lowgid(getgid()));
11301 #endif
11302 #ifdef TARGET_NR_geteuid
11303     case TARGET_NR_geteuid:
11304         return get_errno(high2lowuid(geteuid()));
11305 #endif
11306 #ifdef TARGET_NR_getegid
11307     case TARGET_NR_getegid:
11308         return get_errno(high2lowgid(getegid()));
11309 #endif
11310     case TARGET_NR_setreuid:
11311         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11312     case TARGET_NR_setregid:
11313         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11314     case TARGET_NR_getgroups:
11315         {
11316             int gidsetsize = arg1;
11317             target_id *target_grouplist;
11318             gid_t *grouplist;
11319             int i;
11320 
11321             grouplist = alloca(gidsetsize * sizeof(gid_t));
11322             ret = get_errno(getgroups(gidsetsize, grouplist));
11323             if (gidsetsize == 0)
11324                 return ret;
11325             if (!is_error(ret)) {
11326                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11327                 if (!target_grouplist)
11328                     return -TARGET_EFAULT;
11329                 for(i = 0;i < ret; i++)
11330                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11331                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11332             }
11333         }
11334         return ret;
11335     case TARGET_NR_setgroups:
11336         {
11337             int gidsetsize = arg1;
11338             target_id *target_grouplist;
11339             gid_t *grouplist = NULL;
11340             int i;
11341             if (gidsetsize) {
11342                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11343                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11344                 if (!target_grouplist) {
11345                     return -TARGET_EFAULT;
11346                 }
11347                 for (i = 0; i < gidsetsize; i++) {
11348                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11349                 }
11350                 unlock_user(target_grouplist, arg2, 0);
11351             }
11352             return get_errno(setgroups(gidsetsize, grouplist));
11353         }
11354     case TARGET_NR_fchown:
11355         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11356 #if defined(TARGET_NR_fchownat)
11357     case TARGET_NR_fchownat:
11358         if (!(p = lock_user_string(arg2)))
11359             return -TARGET_EFAULT;
11360         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11361                                  low2highgid(arg4), arg5));
11362         unlock_user(p, arg2, 0);
11363         return ret;
11364 #endif
11365 #ifdef TARGET_NR_setresuid
11366     case TARGET_NR_setresuid:
11367         return get_errno(sys_setresuid(low2highuid(arg1),
11368                                        low2highuid(arg2),
11369                                        low2highuid(arg3)));
11370 #endif
11371 #ifdef TARGET_NR_getresuid
11372     case TARGET_NR_getresuid:
11373         {
11374             uid_t ruid, euid, suid;
11375             ret = get_errno(getresuid(&ruid, &euid, &suid));
11376             if (!is_error(ret)) {
11377                 if (put_user_id(high2lowuid(ruid), arg1)
11378                     || put_user_id(high2lowuid(euid), arg2)
11379                     || put_user_id(high2lowuid(suid), arg3))
11380                     return -TARGET_EFAULT;
11381             }
11382         }
11383         return ret;
11384 #endif
11385 #ifdef TARGET_NR_getresgid
11386     case TARGET_NR_setresgid:
11387         return get_errno(sys_setresgid(low2highgid(arg1),
11388                                        low2highgid(arg2),
11389                                        low2highgid(arg3)));
11390 #endif
11391 #ifdef TARGET_NR_getresgid
11392     case TARGET_NR_getresgid:
11393         {
11394             gid_t rgid, egid, sgid;
11395             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11396             if (!is_error(ret)) {
11397                 if (put_user_id(high2lowgid(rgid), arg1)
11398                     || put_user_id(high2lowgid(egid), arg2)
11399                     || put_user_id(high2lowgid(sgid), arg3))
11400                     return -TARGET_EFAULT;
11401             }
11402         }
11403         return ret;
11404 #endif
11405 #ifdef TARGET_NR_chown
11406     case TARGET_NR_chown:
11407         if (!(p = lock_user_string(arg1)))
11408             return -TARGET_EFAULT;
11409         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11410         unlock_user(p, arg1, 0);
11411         return ret;
11412 #endif
11413     case TARGET_NR_setuid:
11414         return get_errno(sys_setuid(low2highuid(arg1)));
11415     case TARGET_NR_setgid:
11416         return get_errno(sys_setgid(low2highgid(arg1)));
11417     case TARGET_NR_setfsuid:
11418         return get_errno(setfsuid(arg1));
11419     case TARGET_NR_setfsgid:
11420         return get_errno(setfsgid(arg1));
11421 
11422 #ifdef TARGET_NR_lchown32
11423     case TARGET_NR_lchown32:
11424         if (!(p = lock_user_string(arg1)))
11425             return -TARGET_EFAULT;
11426         ret = get_errno(lchown(p, arg2, arg3));
11427         unlock_user(p, arg1, 0);
11428         return ret;
11429 #endif
11430 #ifdef TARGET_NR_getuid32
11431     case TARGET_NR_getuid32:
11432         return get_errno(getuid());
11433 #endif
11434 
11435 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11436    /* Alpha specific */
11437     case TARGET_NR_getxuid:
11438          {
11439             uid_t euid;
11440             euid=geteuid();
11441             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11442          }
11443         return get_errno(getuid());
11444 #endif
11445 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11446    /* Alpha specific */
11447     case TARGET_NR_getxgid:
11448          {
11449             uid_t egid;
11450             egid=getegid();
11451             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11452          }
11453         return get_errno(getgid());
11454 #endif
11455 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11456     /* Alpha specific */
11457     case TARGET_NR_osf_getsysinfo:
11458         ret = -TARGET_EOPNOTSUPP;
11459         switch (arg1) {
11460           case TARGET_GSI_IEEE_FP_CONTROL:
11461             {
11462                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11463                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11464 
11465                 swcr &= ~SWCR_STATUS_MASK;
11466                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11467 
11468                 if (put_user_u64 (swcr, arg2))
11469                         return -TARGET_EFAULT;
11470                 ret = 0;
11471             }
11472             break;
11473 
11474           /* case GSI_IEEE_STATE_AT_SIGNAL:
11475              -- Not implemented in linux kernel.
11476              case GSI_UACPROC:
11477              -- Retrieves current unaligned access state; not much used.
11478              case GSI_PROC_TYPE:
11479              -- Retrieves implver information; surely not used.
11480              case GSI_GET_HWRPB:
11481              -- Grabs a copy of the HWRPB; surely not used.
11482           */
11483         }
11484         return ret;
11485 #endif
11486 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11487     /* Alpha specific */
11488     case TARGET_NR_osf_setsysinfo:
11489         ret = -TARGET_EOPNOTSUPP;
11490         switch (arg1) {
11491           case TARGET_SSI_IEEE_FP_CONTROL:
11492             {
11493                 uint64_t swcr, fpcr;
11494 
11495                 if (get_user_u64 (swcr, arg2)) {
11496                     return -TARGET_EFAULT;
11497                 }
11498 
11499                 /*
11500                  * The kernel calls swcr_update_status to update the
11501                  * status bits from the fpcr at every point that it
11502                  * could be queried.  Therefore, we store the status
11503                  * bits only in FPCR.
11504                  */
11505                 ((CPUAlphaState *)cpu_env)->swcr
11506                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11507 
11508                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11509                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11510                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11511                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11512                 ret = 0;
11513             }
11514             break;
11515 
11516           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11517             {
11518                 uint64_t exc, fpcr, fex;
11519 
11520                 if (get_user_u64(exc, arg2)) {
11521                     return -TARGET_EFAULT;
11522                 }
11523                 exc &= SWCR_STATUS_MASK;
11524                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11525 
11526                 /* Old exceptions are not signaled.  */
11527                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11528                 fex = exc & ~fex;
11529                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11530                 fex &= ((CPUArchState *)cpu_env)->swcr;
11531 
11532                 /* Update the hardware fpcr.  */
11533                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11534                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11535 
11536                 if (fex) {
11537                     int si_code = TARGET_FPE_FLTUNK;
11538                     target_siginfo_t info;
11539 
11540                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11541                         si_code = TARGET_FPE_FLTUND;
11542                     }
11543                     if (fex & SWCR_TRAP_ENABLE_INE) {
11544                         si_code = TARGET_FPE_FLTRES;
11545                     }
11546                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11547                         si_code = TARGET_FPE_FLTUND;
11548                     }
11549                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11550                         si_code = TARGET_FPE_FLTOVF;
11551                     }
11552                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11553                         si_code = TARGET_FPE_FLTDIV;
11554                     }
11555                     if (fex & SWCR_TRAP_ENABLE_INV) {
11556                         si_code = TARGET_FPE_FLTINV;
11557                     }
11558 
11559                     info.si_signo = SIGFPE;
11560                     info.si_errno = 0;
11561                     info.si_code = si_code;
11562                     info._sifields._sigfault._addr
11563                         = ((CPUArchState *)cpu_env)->pc;
11564                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11565                                  QEMU_SI_FAULT, &info);
11566                 }
11567                 ret = 0;
11568             }
11569             break;
11570 
11571           /* case SSI_NVPAIRS:
11572              -- Used with SSIN_UACPROC to enable unaligned accesses.
11573              case SSI_IEEE_STATE_AT_SIGNAL:
11574              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11575              -- Not implemented in linux kernel
11576           */
11577         }
11578         return ret;
11579 #endif
11580 #ifdef TARGET_NR_osf_sigprocmask
11581     /* Alpha specific.  */
11582     case TARGET_NR_osf_sigprocmask:
11583         {
11584             abi_ulong mask;
11585             int how;
11586             sigset_t set, oldset;
11587 
11588             switch(arg1) {
11589             case TARGET_SIG_BLOCK:
11590                 how = SIG_BLOCK;
11591                 break;
11592             case TARGET_SIG_UNBLOCK:
11593                 how = SIG_UNBLOCK;
11594                 break;
11595             case TARGET_SIG_SETMASK:
11596                 how = SIG_SETMASK;
11597                 break;
11598             default:
11599                 return -TARGET_EINVAL;
11600             }
11601             mask = arg2;
11602             target_to_host_old_sigset(&set, &mask);
11603             ret = do_sigprocmask(how, &set, &oldset);
11604             if (!ret) {
11605                 host_to_target_old_sigset(&mask, &oldset);
11606                 ret = mask;
11607             }
11608         }
11609         return ret;
11610 #endif
11611 
11612 #ifdef TARGET_NR_getgid32
11613     case TARGET_NR_getgid32:
11614         return get_errno(getgid());
11615 #endif
11616 #ifdef TARGET_NR_geteuid32
11617     case TARGET_NR_geteuid32:
11618         return get_errno(geteuid());
11619 #endif
11620 #ifdef TARGET_NR_getegid32
11621     case TARGET_NR_getegid32:
11622         return get_errno(getegid());
11623 #endif
11624 #ifdef TARGET_NR_setreuid32
11625     case TARGET_NR_setreuid32:
11626         return get_errno(setreuid(arg1, arg2));
11627 #endif
11628 #ifdef TARGET_NR_setregid32
11629     case TARGET_NR_setregid32:
11630         return get_errno(setregid(arg1, arg2));
11631 #endif
11632 #ifdef TARGET_NR_getgroups32
11633     case TARGET_NR_getgroups32:
11634         {
11635             int gidsetsize = arg1;
11636             uint32_t *target_grouplist;
11637             gid_t *grouplist;
11638             int i;
11639 
11640             grouplist = alloca(gidsetsize * sizeof(gid_t));
11641             ret = get_errno(getgroups(gidsetsize, grouplist));
11642             if (gidsetsize == 0)
11643                 return ret;
11644             if (!is_error(ret)) {
11645                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11646                 if (!target_grouplist) {
11647                     return -TARGET_EFAULT;
11648                 }
11649                 for(i = 0;i < ret; i++)
11650                     target_grouplist[i] = tswap32(grouplist[i]);
11651                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11652             }
11653         }
11654         return ret;
11655 #endif
11656 #ifdef TARGET_NR_setgroups32
11657     case TARGET_NR_setgroups32:
11658         {
11659             int gidsetsize = arg1;
11660             uint32_t *target_grouplist;
11661             gid_t *grouplist;
11662             int i;
11663 
11664             grouplist = alloca(gidsetsize * sizeof(gid_t));
11665             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11666             if (!target_grouplist) {
11667                 return -TARGET_EFAULT;
11668             }
11669             for(i = 0;i < gidsetsize; i++)
11670                 grouplist[i] = tswap32(target_grouplist[i]);
11671             unlock_user(target_grouplist, arg2, 0);
11672             return get_errno(setgroups(gidsetsize, grouplist));
11673         }
11674 #endif
11675 #ifdef TARGET_NR_fchown32
11676     case TARGET_NR_fchown32:
11677         return get_errno(fchown(arg1, arg2, arg3));
11678 #endif
11679 #ifdef TARGET_NR_setresuid32
11680     case TARGET_NR_setresuid32:
11681         return get_errno(sys_setresuid(arg1, arg2, arg3));
11682 #endif
11683 #ifdef TARGET_NR_getresuid32
11684     case TARGET_NR_getresuid32:
11685         {
11686             uid_t ruid, euid, suid;
11687             ret = get_errno(getresuid(&ruid, &euid, &suid));
11688             if (!is_error(ret)) {
11689                 if (put_user_u32(ruid, arg1)
11690                     || put_user_u32(euid, arg2)
11691                     || put_user_u32(suid, arg3))
11692                     return -TARGET_EFAULT;
11693             }
11694         }
11695         return ret;
11696 #endif
11697 #ifdef TARGET_NR_setresgid32
11698     case TARGET_NR_setresgid32:
11699         return get_errno(sys_setresgid(arg1, arg2, arg3));
11700 #endif
11701 #ifdef TARGET_NR_getresgid32
11702     case TARGET_NR_getresgid32:
11703         {
11704             gid_t rgid, egid, sgid;
11705             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11706             if (!is_error(ret)) {
11707                 if (put_user_u32(rgid, arg1)
11708                     || put_user_u32(egid, arg2)
11709                     || put_user_u32(sgid, arg3))
11710                     return -TARGET_EFAULT;
11711             }
11712         }
11713         return ret;
11714 #endif
11715 #ifdef TARGET_NR_chown32
11716     case TARGET_NR_chown32:
11717         if (!(p = lock_user_string(arg1)))
11718             return -TARGET_EFAULT;
11719         ret = get_errno(chown(p, arg2, arg3));
11720         unlock_user(p, arg1, 0);
11721         return ret;
11722 #endif
11723 #ifdef TARGET_NR_setuid32
11724     case TARGET_NR_setuid32:
11725         return get_errno(sys_setuid(arg1));
11726 #endif
11727 #ifdef TARGET_NR_setgid32
11728     case TARGET_NR_setgid32:
11729         return get_errno(sys_setgid(arg1));
11730 #endif
11731 #ifdef TARGET_NR_setfsuid32
11732     case TARGET_NR_setfsuid32:
11733         return get_errno(setfsuid(arg1));
11734 #endif
11735 #ifdef TARGET_NR_setfsgid32
11736     case TARGET_NR_setfsgid32:
11737         return get_errno(setfsgid(arg1));
11738 #endif
11739 #ifdef TARGET_NR_mincore
11740     case TARGET_NR_mincore:
11741         {
11742             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11743             if (!a) {
11744                 return -TARGET_ENOMEM;
11745             }
11746             p = lock_user_string(arg3);
11747             if (!p) {
11748                 ret = -TARGET_EFAULT;
11749             } else {
11750                 ret = get_errno(mincore(a, arg2, p));
11751                 unlock_user(p, arg3, ret);
11752             }
11753             unlock_user(a, arg1, 0);
11754         }
11755         return ret;
11756 #endif
11757 #ifdef TARGET_NR_arm_fadvise64_64
11758     case TARGET_NR_arm_fadvise64_64:
11759         /* arm_fadvise64_64 looks like fadvise64_64 but
11760          * with different argument order: fd, advice, offset, len
11761          * rather than the usual fd, offset, len, advice.
11762          * Note that offset and len are both 64-bit so appear as
11763          * pairs of 32-bit registers.
11764          */
11765         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11766                             target_offset64(arg5, arg6), arg2);
11767         return -host_to_target_errno(ret);
11768 #endif
11769 
11770 #if TARGET_ABI_BITS == 32
11771 
11772 #ifdef TARGET_NR_fadvise64_64
11773     case TARGET_NR_fadvise64_64:
11774 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11775         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11776         ret = arg2;
11777         arg2 = arg3;
11778         arg3 = arg4;
11779         arg4 = arg5;
11780         arg5 = arg6;
11781         arg6 = ret;
11782 #else
11783         /* 6 args: fd, offset (high, low), len (high, low), advice */
11784         if (regpairs_aligned(cpu_env, num)) {
11785             /* offset is in (3,4), len in (5,6) and advice in 7 */
11786             arg2 = arg3;
11787             arg3 = arg4;
11788             arg4 = arg5;
11789             arg5 = arg6;
11790             arg6 = arg7;
11791         }
11792 #endif
11793         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11794                             target_offset64(arg4, arg5), arg6);
11795         return -host_to_target_errno(ret);
11796 #endif
11797 
11798 #ifdef TARGET_NR_fadvise64
11799     case TARGET_NR_fadvise64:
11800         /* 5 args: fd, offset (high, low), len, advice */
11801         if (regpairs_aligned(cpu_env, num)) {
11802             /* offset is in (3,4), len in 5 and advice in 6 */
11803             arg2 = arg3;
11804             arg3 = arg4;
11805             arg4 = arg5;
11806             arg5 = arg6;
11807         }
11808         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11809         return -host_to_target_errno(ret);
11810 #endif
11811 
11812 #else /* not a 32-bit ABI */
11813 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11814 #ifdef TARGET_NR_fadvise64_64
11815     case TARGET_NR_fadvise64_64:
11816 #endif
11817 #ifdef TARGET_NR_fadvise64
11818     case TARGET_NR_fadvise64:
11819 #endif
11820 #ifdef TARGET_S390X
11821         switch (arg4) {
11822         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11823         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11824         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11825         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11826         default: break;
11827         }
11828 #endif
11829         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11830 #endif
11831 #endif /* end of 64-bit ABI fadvise handling */
11832 
11833 #ifdef TARGET_NR_madvise
11834     case TARGET_NR_madvise:
11835         /* A straight passthrough may not be safe because qemu sometimes
11836            turns private file-backed mappings into anonymous mappings.
11837            This will break MADV_DONTNEED.
11838            This is a hint, so ignoring and returning success is ok.  */
11839         return 0;
11840 #endif
11841 #ifdef TARGET_NR_fcntl64
11842     case TARGET_NR_fcntl64:
11843     {
11844         int cmd;
11845         struct flock64 fl;
11846         from_flock64_fn *copyfrom = copy_from_user_flock64;
11847         to_flock64_fn *copyto = copy_to_user_flock64;
11848 
11849 #ifdef TARGET_ARM
11850         if (!((CPUARMState *)cpu_env)->eabi) {
11851             copyfrom = copy_from_user_oabi_flock64;
11852             copyto = copy_to_user_oabi_flock64;
11853         }
11854 #endif
11855 
11856         cmd = target_to_host_fcntl_cmd(arg2);
11857         if (cmd == -TARGET_EINVAL) {
11858             return cmd;
11859         }
11860 
11861         switch(arg2) {
11862         case TARGET_F_GETLK64:
11863             ret = copyfrom(&fl, arg3);
11864             if (ret) {
11865                 break;
11866             }
11867             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11868             if (ret == 0) {
11869                 ret = copyto(arg3, &fl);
11870             }
11871 	    break;
11872 
11873         case TARGET_F_SETLK64:
11874         case TARGET_F_SETLKW64:
11875             ret = copyfrom(&fl, arg3);
11876             if (ret) {
11877                 break;
11878             }
11879             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11880 	    break;
11881         default:
11882             ret = do_fcntl(arg1, arg2, arg3);
11883             break;
11884         }
11885         return ret;
11886     }
11887 #endif
11888 #ifdef TARGET_NR_cacheflush
11889     case TARGET_NR_cacheflush:
11890         /* self-modifying code is handled automatically, so nothing needed */
11891         return 0;
11892 #endif
11893 #ifdef TARGET_NR_getpagesize
11894     case TARGET_NR_getpagesize:
11895         return TARGET_PAGE_SIZE;
11896 #endif
11897     case TARGET_NR_gettid:
11898         return get_errno(sys_gettid());
11899 #ifdef TARGET_NR_readahead
11900     case TARGET_NR_readahead:
11901 #if TARGET_ABI_BITS == 32
11902         if (regpairs_aligned(cpu_env, num)) {
11903             arg2 = arg3;
11904             arg3 = arg4;
11905             arg4 = arg5;
11906         }
11907         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11908 #else
11909         ret = get_errno(readahead(arg1, arg2, arg3));
11910 #endif
11911         return ret;
11912 #endif
11913 #ifdef CONFIG_ATTR
11914 #ifdef TARGET_NR_setxattr
11915     case TARGET_NR_listxattr:
11916     case TARGET_NR_llistxattr:
11917     {
11918         void *p, *b = 0;
11919         if (arg2) {
11920             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11921             if (!b) {
11922                 return -TARGET_EFAULT;
11923             }
11924         }
11925         p = lock_user_string(arg1);
11926         if (p) {
11927             if (num == TARGET_NR_listxattr) {
11928                 ret = get_errno(listxattr(p, b, arg3));
11929             } else {
11930                 ret = get_errno(llistxattr(p, b, arg3));
11931             }
11932         } else {
11933             ret = -TARGET_EFAULT;
11934         }
11935         unlock_user(p, arg1, 0);
11936         unlock_user(b, arg2, arg3);
11937         return ret;
11938     }
11939     case TARGET_NR_flistxattr:
11940     {
11941         void *b = 0;
11942         if (arg2) {
11943             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11944             if (!b) {
11945                 return -TARGET_EFAULT;
11946             }
11947         }
11948         ret = get_errno(flistxattr(arg1, b, arg3));
11949         unlock_user(b, arg2, arg3);
11950         return ret;
11951     }
11952     case TARGET_NR_setxattr:
11953     case TARGET_NR_lsetxattr:
11954         {
11955             void *p, *n, *v = 0;
11956             if (arg3) {
11957                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11958                 if (!v) {
11959                     return -TARGET_EFAULT;
11960                 }
11961             }
11962             p = lock_user_string(arg1);
11963             n = lock_user_string(arg2);
11964             if (p && n) {
11965                 if (num == TARGET_NR_setxattr) {
11966                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11967                 } else {
11968                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11969                 }
11970             } else {
11971                 ret = -TARGET_EFAULT;
11972             }
11973             unlock_user(p, arg1, 0);
11974             unlock_user(n, arg2, 0);
11975             unlock_user(v, arg3, 0);
11976         }
11977         return ret;
11978     case TARGET_NR_fsetxattr:
11979         {
11980             void *n, *v = 0;
11981             if (arg3) {
11982                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11983                 if (!v) {
11984                     return -TARGET_EFAULT;
11985                 }
11986             }
11987             n = lock_user_string(arg2);
11988             if (n) {
11989                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11990             } else {
11991                 ret = -TARGET_EFAULT;
11992             }
11993             unlock_user(n, arg2, 0);
11994             unlock_user(v, arg3, 0);
11995         }
11996         return ret;
11997     case TARGET_NR_getxattr:
11998     case TARGET_NR_lgetxattr:
11999         {
12000             void *p, *n, *v = 0;
12001             if (arg3) {
12002                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12003                 if (!v) {
12004                     return -TARGET_EFAULT;
12005                 }
12006             }
12007             p = lock_user_string(arg1);
12008             n = lock_user_string(arg2);
12009             if (p && n) {
12010                 if (num == TARGET_NR_getxattr) {
12011                     ret = get_errno(getxattr(p, n, v, arg4));
12012                 } else {
12013                     ret = get_errno(lgetxattr(p, n, v, arg4));
12014                 }
12015             } else {
12016                 ret = -TARGET_EFAULT;
12017             }
12018             unlock_user(p, arg1, 0);
12019             unlock_user(n, arg2, 0);
12020             unlock_user(v, arg3, arg4);
12021         }
12022         return ret;
12023     case TARGET_NR_fgetxattr:
12024         {
12025             void *n, *v = 0;
12026             if (arg3) {
12027                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12028                 if (!v) {
12029                     return -TARGET_EFAULT;
12030                 }
12031             }
12032             n = lock_user_string(arg2);
12033             if (n) {
12034                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12035             } else {
12036                 ret = -TARGET_EFAULT;
12037             }
12038             unlock_user(n, arg2, 0);
12039             unlock_user(v, arg3, arg4);
12040         }
12041         return ret;
12042     case TARGET_NR_removexattr:
12043     case TARGET_NR_lremovexattr:
12044         {
12045             void *p, *n;
12046             p = lock_user_string(arg1);
12047             n = lock_user_string(arg2);
12048             if (p && n) {
12049                 if (num == TARGET_NR_removexattr) {
12050                     ret = get_errno(removexattr(p, n));
12051                 } else {
12052                     ret = get_errno(lremovexattr(p, n));
12053                 }
12054             } else {
12055                 ret = -TARGET_EFAULT;
12056             }
12057             unlock_user(p, arg1, 0);
12058             unlock_user(n, arg2, 0);
12059         }
12060         return ret;
12061     case TARGET_NR_fremovexattr:
12062         {
12063             void *n;
12064             n = lock_user_string(arg2);
12065             if (n) {
12066                 ret = get_errno(fremovexattr(arg1, n));
12067             } else {
12068                 ret = -TARGET_EFAULT;
12069             }
12070             unlock_user(n, arg2, 0);
12071         }
12072         return ret;
12073 #endif
12074 #endif /* CONFIG_ATTR */
12075 #ifdef TARGET_NR_set_thread_area
12076     case TARGET_NR_set_thread_area:
12077 #if defined(TARGET_MIPS)
12078       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12079       return 0;
12080 #elif defined(TARGET_CRIS)
12081       if (arg1 & 0xff)
12082           ret = -TARGET_EINVAL;
12083       else {
12084           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12085           ret = 0;
12086       }
12087       return ret;
12088 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12089       return do_set_thread_area(cpu_env, arg1);
12090 #elif defined(TARGET_M68K)
12091       {
12092           TaskState *ts = cpu->opaque;
12093           ts->tp_value = arg1;
12094           return 0;
12095       }
12096 #else
12097       return -TARGET_ENOSYS;
12098 #endif
12099 #endif
12100 #ifdef TARGET_NR_get_thread_area
12101     case TARGET_NR_get_thread_area:
12102 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12103         return do_get_thread_area(cpu_env, arg1);
12104 #elif defined(TARGET_M68K)
12105         {
12106             TaskState *ts = cpu->opaque;
12107             return ts->tp_value;
12108         }
12109 #else
12110         return -TARGET_ENOSYS;
12111 #endif
12112 #endif
12113 #ifdef TARGET_NR_getdomainname
12114     case TARGET_NR_getdomainname:
12115         return -TARGET_ENOSYS;
12116 #endif
12117 
12118 #ifdef TARGET_NR_clock_settime
12119     case TARGET_NR_clock_settime:
12120     {
12121         struct timespec ts;
12122 
12123         ret = target_to_host_timespec(&ts, arg2);
12124         if (!is_error(ret)) {
12125             ret = get_errno(clock_settime(arg1, &ts));
12126         }
12127         return ret;
12128     }
12129 #endif
12130 #ifdef TARGET_NR_clock_settime64
12131     case TARGET_NR_clock_settime64:
12132     {
12133         struct timespec ts;
12134 
12135         ret = target_to_host_timespec64(&ts, arg2);
12136         if (!is_error(ret)) {
12137             ret = get_errno(clock_settime(arg1, &ts));
12138         }
12139         return ret;
12140     }
12141 #endif
12142 #ifdef TARGET_NR_clock_gettime
12143     case TARGET_NR_clock_gettime:
12144     {
12145         struct timespec ts;
12146         ret = get_errno(clock_gettime(arg1, &ts));
12147         if (!is_error(ret)) {
12148             ret = host_to_target_timespec(arg2, &ts);
12149         }
12150         return ret;
12151     }
12152 #endif
12153 #ifdef TARGET_NR_clock_gettime64
12154     case TARGET_NR_clock_gettime64:
12155     {
12156         struct timespec ts;
12157         ret = get_errno(clock_gettime(arg1, &ts));
12158         if (!is_error(ret)) {
12159             ret = host_to_target_timespec64(arg2, &ts);
12160         }
12161         return ret;
12162     }
12163 #endif
12164 #ifdef TARGET_NR_clock_getres
12165     case TARGET_NR_clock_getres:
12166     {
12167         struct timespec ts;
12168         ret = get_errno(clock_getres(arg1, &ts));
12169         if (!is_error(ret)) {
12170             host_to_target_timespec(arg2, &ts);
12171         }
12172         return ret;
12173     }
12174 #endif
12175 #ifdef TARGET_NR_clock_getres_time64
12176     case TARGET_NR_clock_getres_time64:
12177     {
12178         struct timespec ts;
12179         ret = get_errno(clock_getres(arg1, &ts));
12180         if (!is_error(ret)) {
12181             host_to_target_timespec64(arg2, &ts);
12182         }
12183         return ret;
12184     }
12185 #endif
12186 #ifdef TARGET_NR_clock_nanosleep
12187     case TARGET_NR_clock_nanosleep:
12188     {
12189         struct timespec ts;
12190         if (target_to_host_timespec(&ts, arg3)) {
12191             return -TARGET_EFAULT;
12192         }
12193         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12194                                              &ts, arg4 ? &ts : NULL));
12195         /*
12196          * if the call is interrupted by a signal handler, it fails
12197          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12198          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12199          */
12200         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12201             host_to_target_timespec(arg4, &ts)) {
12202               return -TARGET_EFAULT;
12203         }
12204 
12205         return ret;
12206     }
12207 #endif
12208 #ifdef TARGET_NR_clock_nanosleep_time64
12209     case TARGET_NR_clock_nanosleep_time64:
12210     {
12211         struct timespec ts;
12212 
12213         if (target_to_host_timespec64(&ts, arg3)) {
12214             return -TARGET_EFAULT;
12215         }
12216 
12217         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12218                                              &ts, arg4 ? &ts : NULL));
12219 
12220         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12221             host_to_target_timespec64(arg4, &ts)) {
12222             return -TARGET_EFAULT;
12223         }
12224         return ret;
12225     }
12226 #endif
12227 
12228 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12229     case TARGET_NR_set_tid_address:
12230         return get_errno(set_tid_address((int *)g2h(arg1)));
12231 #endif
12232 
12233     case TARGET_NR_tkill:
12234         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12235 
12236     case TARGET_NR_tgkill:
12237         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12238                          target_to_host_signal(arg3)));
12239 
12240 #ifdef TARGET_NR_set_robust_list
12241     case TARGET_NR_set_robust_list:
12242     case TARGET_NR_get_robust_list:
12243         /* The ABI for supporting robust futexes has userspace pass
12244          * the kernel a pointer to a linked list which is updated by
12245          * userspace after the syscall; the list is walked by the kernel
12246          * when the thread exits. Since the linked list in QEMU guest
12247          * memory isn't a valid linked list for the host and we have
12248          * no way to reliably intercept the thread-death event, we can't
12249          * support these. Silently return ENOSYS so that guest userspace
12250          * falls back to a non-robust futex implementation (which should
12251          * be OK except in the corner case of the guest crashing while
12252          * holding a mutex that is shared with another process via
12253          * shared memory).
12254          */
12255         return -TARGET_ENOSYS;
12256 #endif
12257 
12258 #if defined(TARGET_NR_utimensat)
12259     case TARGET_NR_utimensat:
12260         {
12261             struct timespec *tsp, ts[2];
12262             if (!arg3) {
12263                 tsp = NULL;
12264             } else {
12265                 if (target_to_host_timespec(ts, arg3)) {
12266                     return -TARGET_EFAULT;
12267                 }
12268                 if (target_to_host_timespec(ts + 1, arg3 +
12269                                             sizeof(struct target_timespec))) {
12270                     return -TARGET_EFAULT;
12271                 }
12272                 tsp = ts;
12273             }
12274             if (!arg2)
12275                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12276             else {
12277                 if (!(p = lock_user_string(arg2))) {
12278                     return -TARGET_EFAULT;
12279                 }
12280                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12281                 unlock_user(p, arg2, 0);
12282             }
12283         }
12284         return ret;
12285 #endif
12286 #ifdef TARGET_NR_utimensat_time64
12287     case TARGET_NR_utimensat_time64:
12288         {
12289             struct timespec *tsp, ts[2];
12290             if (!arg3) {
12291                 tsp = NULL;
12292             } else {
12293                 if (target_to_host_timespec64(ts, arg3)) {
12294                     return -TARGET_EFAULT;
12295                 }
12296                 if (target_to_host_timespec64(ts + 1, arg3 +
12297                                      sizeof(struct target__kernel_timespec))) {
12298                     return -TARGET_EFAULT;
12299                 }
12300                 tsp = ts;
12301             }
12302             if (!arg2)
12303                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12304             else {
12305                 p = lock_user_string(arg2);
12306                 if (!p) {
12307                     return -TARGET_EFAULT;
12308                 }
12309                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12310                 unlock_user(p, arg2, 0);
12311             }
12312         }
12313         return ret;
12314 #endif
12315 #ifdef TARGET_NR_futex
12316     case TARGET_NR_futex:
12317         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12318 #endif
12319 #ifdef TARGET_NR_futex_time64
12320     case TARGET_NR_futex_time64:
12321         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
12322 #endif
12323 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12324     case TARGET_NR_inotify_init:
12325         ret = get_errno(sys_inotify_init());
12326         if (ret >= 0) {
12327             fd_trans_register(ret, &target_inotify_trans);
12328         }
12329         return ret;
12330 #endif
12331 #ifdef CONFIG_INOTIFY1
12332 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12333     case TARGET_NR_inotify_init1:
12334         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12335                                           fcntl_flags_tbl)));
12336         if (ret >= 0) {
12337             fd_trans_register(ret, &target_inotify_trans);
12338         }
12339         return ret;
12340 #endif
12341 #endif
12342 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12343     case TARGET_NR_inotify_add_watch:
12344         p = lock_user_string(arg2);
12345         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12346         unlock_user(p, arg2, 0);
12347         return ret;
12348 #endif
12349 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12350     case TARGET_NR_inotify_rm_watch:
12351         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12352 #endif
12353 
12354 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12355     case TARGET_NR_mq_open:
12356         {
12357             struct mq_attr posix_mq_attr;
12358             struct mq_attr *pposix_mq_attr;
12359             int host_flags;
12360 
12361             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12362             pposix_mq_attr = NULL;
12363             if (arg4) {
12364                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12365                     return -TARGET_EFAULT;
12366                 }
12367                 pposix_mq_attr = &posix_mq_attr;
12368             }
12369             p = lock_user_string(arg1 - 1);
12370             if (!p) {
12371                 return -TARGET_EFAULT;
12372             }
12373             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12374             unlock_user (p, arg1, 0);
12375         }
12376         return ret;
12377 
12378     case TARGET_NR_mq_unlink:
12379         p = lock_user_string(arg1 - 1);
12380         if (!p) {
12381             return -TARGET_EFAULT;
12382         }
12383         ret = get_errno(mq_unlink(p));
12384         unlock_user (p, arg1, 0);
12385         return ret;
12386 
12387 #ifdef TARGET_NR_mq_timedsend
12388     case TARGET_NR_mq_timedsend:
12389         {
12390             struct timespec ts;
12391 
12392             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12393             if (arg5 != 0) {
12394                 if (target_to_host_timespec(&ts, arg5)) {
12395                     return -TARGET_EFAULT;
12396                 }
12397                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12398                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12399                     return -TARGET_EFAULT;
12400                 }
12401             } else {
12402                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12403             }
12404             unlock_user (p, arg2, arg3);
12405         }
12406         return ret;
12407 #endif
12408 #ifdef TARGET_NR_mq_timedsend_time64
12409     case TARGET_NR_mq_timedsend_time64:
12410         {
12411             struct timespec ts;
12412 
12413             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12414             if (arg5 != 0) {
12415                 if (target_to_host_timespec64(&ts, arg5)) {
12416                     return -TARGET_EFAULT;
12417                 }
12418                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12419                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12420                     return -TARGET_EFAULT;
12421                 }
12422             } else {
12423                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12424             }
12425             unlock_user(p, arg2, arg3);
12426         }
12427         return ret;
12428 #endif
12429 
12430 #ifdef TARGET_NR_mq_timedreceive
12431     case TARGET_NR_mq_timedreceive:
12432         {
12433             struct timespec ts;
12434             unsigned int prio;
12435 
12436             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12437             if (arg5 != 0) {
12438                 if (target_to_host_timespec(&ts, arg5)) {
12439                     return -TARGET_EFAULT;
12440                 }
12441                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12442                                                      &prio, &ts));
12443                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12444                     return -TARGET_EFAULT;
12445                 }
12446             } else {
12447                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12448                                                      &prio, NULL));
12449             }
12450             unlock_user (p, arg2, arg3);
12451             if (arg4 != 0)
12452                 put_user_u32(prio, arg4);
12453         }
12454         return ret;
12455 #endif
12456 #ifdef TARGET_NR_mq_timedreceive_time64
12457     case TARGET_NR_mq_timedreceive_time64:
12458         {
12459             struct timespec ts;
12460             unsigned int prio;
12461 
12462             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12463             if (arg5 != 0) {
12464                 if (target_to_host_timespec64(&ts, arg5)) {
12465                     return -TARGET_EFAULT;
12466                 }
12467                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12468                                                      &prio, &ts));
12469                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12470                     return -TARGET_EFAULT;
12471                 }
12472             } else {
12473                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12474                                                      &prio, NULL));
12475             }
12476             unlock_user(p, arg2, arg3);
12477             if (arg4 != 0) {
12478                 put_user_u32(prio, arg4);
12479             }
12480         }
12481         return ret;
12482 #endif
12483 
12484     /* Not implemented for now... */
12485 /*     case TARGET_NR_mq_notify: */
12486 /*         break; */
12487 
12488     case TARGET_NR_mq_getsetattr:
12489         {
12490             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12491             ret = 0;
12492             if (arg2 != 0) {
12493                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12494                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12495                                            &posix_mq_attr_out));
12496             } else if (arg3 != 0) {
12497                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12498             }
12499             if (ret == 0 && arg3 != 0) {
12500                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12501             }
12502         }
12503         return ret;
12504 #endif
12505 
12506 #ifdef CONFIG_SPLICE
12507 #ifdef TARGET_NR_tee
12508     case TARGET_NR_tee:
12509         {
12510             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12511         }
12512         return ret;
12513 #endif
12514 #ifdef TARGET_NR_splice
12515     case TARGET_NR_splice:
12516         {
12517             loff_t loff_in, loff_out;
12518             loff_t *ploff_in = NULL, *ploff_out = NULL;
12519             if (arg2) {
12520                 if (get_user_u64(loff_in, arg2)) {
12521                     return -TARGET_EFAULT;
12522                 }
12523                 ploff_in = &loff_in;
12524             }
12525             if (arg4) {
12526                 if (get_user_u64(loff_out, arg4)) {
12527                     return -TARGET_EFAULT;
12528                 }
12529                 ploff_out = &loff_out;
12530             }
12531             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12532             if (arg2) {
12533                 if (put_user_u64(loff_in, arg2)) {
12534                     return -TARGET_EFAULT;
12535                 }
12536             }
12537             if (arg4) {
12538                 if (put_user_u64(loff_out, arg4)) {
12539                     return -TARGET_EFAULT;
12540                 }
12541             }
12542         }
12543         return ret;
12544 #endif
12545 #ifdef TARGET_NR_vmsplice
12546 	case TARGET_NR_vmsplice:
12547         {
12548             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12549             if (vec != NULL) {
12550                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12551                 unlock_iovec(vec, arg2, arg3, 0);
12552             } else {
12553                 ret = -host_to_target_errno(errno);
12554             }
12555         }
12556         return ret;
12557 #endif
12558 #endif /* CONFIG_SPLICE */
12559 #ifdef CONFIG_EVENTFD
12560 #if defined(TARGET_NR_eventfd)
12561     case TARGET_NR_eventfd:
12562         ret = get_errno(eventfd(arg1, 0));
12563         if (ret >= 0) {
12564             fd_trans_register(ret, &target_eventfd_trans);
12565         }
12566         return ret;
12567 #endif
12568 #if defined(TARGET_NR_eventfd2)
12569     case TARGET_NR_eventfd2:
12570     {
12571         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12572         if (arg2 & TARGET_O_NONBLOCK) {
12573             host_flags |= O_NONBLOCK;
12574         }
12575         if (arg2 & TARGET_O_CLOEXEC) {
12576             host_flags |= O_CLOEXEC;
12577         }
12578         ret = get_errno(eventfd(arg1, host_flags));
12579         if (ret >= 0) {
12580             fd_trans_register(ret, &target_eventfd_trans);
12581         }
12582         return ret;
12583     }
12584 #endif
12585 #endif /* CONFIG_EVENTFD  */
12586 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12587     case TARGET_NR_fallocate:
12588 #if TARGET_ABI_BITS == 32
12589         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12590                                   target_offset64(arg5, arg6)));
12591 #else
12592         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12593 #endif
12594         return ret;
12595 #endif
12596 #if defined(CONFIG_SYNC_FILE_RANGE)
12597 #if defined(TARGET_NR_sync_file_range)
12598     case TARGET_NR_sync_file_range:
12599 #if TARGET_ABI_BITS == 32
12600 #if defined(TARGET_MIPS)
12601         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12602                                         target_offset64(arg5, arg6), arg7));
12603 #else
12604         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12605                                         target_offset64(arg4, arg5), arg6));
12606 #endif /* !TARGET_MIPS */
12607 #else
12608         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12609 #endif
12610         return ret;
12611 #endif
12612 #if defined(TARGET_NR_sync_file_range2) || \
12613     defined(TARGET_NR_arm_sync_file_range)
12614 #if defined(TARGET_NR_sync_file_range2)
12615     case TARGET_NR_sync_file_range2:
12616 #endif
12617 #if defined(TARGET_NR_arm_sync_file_range)
12618     case TARGET_NR_arm_sync_file_range:
12619 #endif
12620         /* This is like sync_file_range but the arguments are reordered */
12621 #if TARGET_ABI_BITS == 32
12622         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12623                                         target_offset64(arg5, arg6), arg2));
12624 #else
12625         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12626 #endif
12627         return ret;
12628 #endif
12629 #endif
12630 #if defined(TARGET_NR_signalfd4)
12631     case TARGET_NR_signalfd4:
12632         return do_signalfd4(arg1, arg2, arg4);
12633 #endif
12634 #if defined(TARGET_NR_signalfd)
12635     case TARGET_NR_signalfd:
12636         return do_signalfd4(arg1, arg2, 0);
12637 #endif
12638 #if defined(CONFIG_EPOLL)
12639 #if defined(TARGET_NR_epoll_create)
12640     case TARGET_NR_epoll_create:
12641         return get_errno(epoll_create(arg1));
12642 #endif
12643 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12644     case TARGET_NR_epoll_create1:
12645         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12646 #endif
12647 #if defined(TARGET_NR_epoll_ctl)
12648     case TARGET_NR_epoll_ctl:
12649     {
12650         struct epoll_event ep;
12651         struct epoll_event *epp = 0;
12652         if (arg4) {
12653             if (arg2 != EPOLL_CTL_DEL) {
12654                 struct target_epoll_event *target_ep;
12655                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12656                     return -TARGET_EFAULT;
12657                 }
12658                 ep.events = tswap32(target_ep->events);
12659                 /*
12660                  * The epoll_data_t union is just opaque data to the kernel,
12661                  * so we transfer all 64 bits across and need not worry what
12662                  * actual data type it is.
12663                  */
12664                 ep.data.u64 = tswap64(target_ep->data.u64);
12665                 unlock_user_struct(target_ep, arg4, 0);
12666             }
12667             /*
12668              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12669              * non-null pointer, even though this argument is ignored.
12670              *
12671              */
12672             epp = &ep;
12673         }
12674         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12675     }
12676 #endif
12677 
12678 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12679 #if defined(TARGET_NR_epoll_wait)
12680     case TARGET_NR_epoll_wait:
12681 #endif
12682 #if defined(TARGET_NR_epoll_pwait)
12683     case TARGET_NR_epoll_pwait:
12684 #endif
12685     {
12686         struct target_epoll_event *target_ep;
12687         struct epoll_event *ep;
12688         int epfd = arg1;
12689         int maxevents = arg3;
12690         int timeout = arg4;
12691 
12692         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12693             return -TARGET_EINVAL;
12694         }
12695 
12696         target_ep = lock_user(VERIFY_WRITE, arg2,
12697                               maxevents * sizeof(struct target_epoll_event), 1);
12698         if (!target_ep) {
12699             return -TARGET_EFAULT;
12700         }
12701 
12702         ep = g_try_new(struct epoll_event, maxevents);
12703         if (!ep) {
12704             unlock_user(target_ep, arg2, 0);
12705             return -TARGET_ENOMEM;
12706         }
12707 
12708         switch (num) {
12709 #if defined(TARGET_NR_epoll_pwait)
12710         case TARGET_NR_epoll_pwait:
12711         {
12712             target_sigset_t *target_set;
12713             sigset_t _set, *set = &_set;
12714 
12715             if (arg5) {
12716                 if (arg6 != sizeof(target_sigset_t)) {
12717                     ret = -TARGET_EINVAL;
12718                     break;
12719                 }
12720 
12721                 target_set = lock_user(VERIFY_READ, arg5,
12722                                        sizeof(target_sigset_t), 1);
12723                 if (!target_set) {
12724                     ret = -TARGET_EFAULT;
12725                     break;
12726                 }
12727                 target_to_host_sigset(set, target_set);
12728                 unlock_user(target_set, arg5, 0);
12729             } else {
12730                 set = NULL;
12731             }
12732 
12733             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12734                                              set, SIGSET_T_SIZE));
12735             break;
12736         }
12737 #endif
12738 #if defined(TARGET_NR_epoll_wait)
12739         case TARGET_NR_epoll_wait:
12740             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12741                                              NULL, 0));
12742             break;
12743 #endif
12744         default:
12745             ret = -TARGET_ENOSYS;
12746         }
12747         if (!is_error(ret)) {
12748             int i;
12749             for (i = 0; i < ret; i++) {
12750                 target_ep[i].events = tswap32(ep[i].events);
12751                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12752             }
12753             unlock_user(target_ep, arg2,
12754                         ret * sizeof(struct target_epoll_event));
12755         } else {
12756             unlock_user(target_ep, arg2, 0);
12757         }
12758         g_free(ep);
12759         return ret;
12760     }
12761 #endif
12762 #endif
12763 #ifdef TARGET_NR_prlimit64
12764     case TARGET_NR_prlimit64:
12765     {
12766         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12767         struct target_rlimit64 *target_rnew, *target_rold;
12768         struct host_rlimit64 rnew, rold, *rnewp = 0;
12769         int resource = target_to_host_resource(arg2);
12770 
12771         if (arg3 && (resource != RLIMIT_AS &&
12772                      resource != RLIMIT_DATA &&
12773                      resource != RLIMIT_STACK)) {
12774             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12775                 return -TARGET_EFAULT;
12776             }
12777             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12778             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12779             unlock_user_struct(target_rnew, arg3, 0);
12780             rnewp = &rnew;
12781         }
12782 
12783         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12784         if (!is_error(ret) && arg4) {
12785             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12786                 return -TARGET_EFAULT;
12787             }
12788             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12789             target_rold->rlim_max = tswap64(rold.rlim_max);
12790             unlock_user_struct(target_rold, arg4, 1);
12791         }
12792         return ret;
12793     }
12794 #endif
12795 #ifdef TARGET_NR_gethostname
12796     case TARGET_NR_gethostname:
12797     {
12798         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12799         if (name) {
12800             ret = get_errno(gethostname(name, arg2));
12801             unlock_user(name, arg1, arg2);
12802         } else {
12803             ret = -TARGET_EFAULT;
12804         }
12805         return ret;
12806     }
12807 #endif
12808 #ifdef TARGET_NR_atomic_cmpxchg_32
12809     case TARGET_NR_atomic_cmpxchg_32:
12810     {
12811         /* should use start_exclusive from main.c */
12812         abi_ulong mem_value;
12813         if (get_user_u32(mem_value, arg6)) {
12814             target_siginfo_t info;
12815             info.si_signo = SIGSEGV;
12816             info.si_errno = 0;
12817             info.si_code = TARGET_SEGV_MAPERR;
12818             info._sifields._sigfault._addr = arg6;
12819             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12820                          QEMU_SI_FAULT, &info);
12821             ret = 0xdeadbeef;
12822 
12823         }
12824         if (mem_value == arg2)
12825             put_user_u32(arg1, arg6);
12826         return mem_value;
12827     }
12828 #endif
12829 #ifdef TARGET_NR_atomic_barrier
12830     case TARGET_NR_atomic_barrier:
12831         /* Like the kernel implementation and the
12832            qemu arm barrier, no-op this? */
12833         return 0;
12834 #endif
12835 
12836 #ifdef TARGET_NR_timer_create
12837     case TARGET_NR_timer_create:
12838     {
12839         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12840 
12841         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12842 
12843         int clkid = arg1;
12844         int timer_index = next_free_host_timer();
12845 
12846         if (timer_index < 0) {
12847             ret = -TARGET_EAGAIN;
12848         } else {
12849             timer_t *phtimer = g_posix_timers  + timer_index;
12850 
12851             if (arg2) {
12852                 phost_sevp = &host_sevp;
12853                 ret = target_to_host_sigevent(phost_sevp, arg2);
12854                 if (ret != 0) {
12855                     return ret;
12856                 }
12857             }
12858 
12859             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12860             if (ret) {
12861                 phtimer = NULL;
12862             } else {
12863                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12864                     return -TARGET_EFAULT;
12865                 }
12866             }
12867         }
12868         return ret;
12869     }
12870 #endif
12871 
12872 #ifdef TARGET_NR_timer_settime
12873     case TARGET_NR_timer_settime:
12874     {
12875         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12876          * struct itimerspec * old_value */
12877         target_timer_t timerid = get_timer_id(arg1);
12878 
12879         if (timerid < 0) {
12880             ret = timerid;
12881         } else if (arg3 == 0) {
12882             ret = -TARGET_EINVAL;
12883         } else {
12884             timer_t htimer = g_posix_timers[timerid];
12885             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12886 
12887             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12888                 return -TARGET_EFAULT;
12889             }
12890             ret = get_errno(
12891                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12892             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12893                 return -TARGET_EFAULT;
12894             }
12895         }
12896         return ret;
12897     }
12898 #endif
12899 
12900 #ifdef TARGET_NR_timer_settime64
12901     case TARGET_NR_timer_settime64:
12902     {
12903         target_timer_t timerid = get_timer_id(arg1);
12904 
12905         if (timerid < 0) {
12906             ret = timerid;
12907         } else if (arg3 == 0) {
12908             ret = -TARGET_EINVAL;
12909         } else {
12910             timer_t htimer = g_posix_timers[timerid];
12911             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12912 
12913             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12914                 return -TARGET_EFAULT;
12915             }
12916             ret = get_errno(
12917                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12918             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12919                 return -TARGET_EFAULT;
12920             }
12921         }
12922         return ret;
12923     }
12924 #endif
12925 
12926 #ifdef TARGET_NR_timer_gettime
12927     case TARGET_NR_timer_gettime:
12928     {
12929         /* args: timer_t timerid, struct itimerspec *curr_value */
12930         target_timer_t timerid = get_timer_id(arg1);
12931 
12932         if (timerid < 0) {
12933             ret = timerid;
12934         } else if (!arg2) {
12935             ret = -TARGET_EFAULT;
12936         } else {
12937             timer_t htimer = g_posix_timers[timerid];
12938             struct itimerspec hspec;
12939             ret = get_errno(timer_gettime(htimer, &hspec));
12940 
12941             if (host_to_target_itimerspec(arg2, &hspec)) {
12942                 ret = -TARGET_EFAULT;
12943             }
12944         }
12945         return ret;
12946     }
12947 #endif
12948 
12949 #ifdef TARGET_NR_timer_gettime64
12950     case TARGET_NR_timer_gettime64:
12951     {
12952         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12953         target_timer_t timerid = get_timer_id(arg1);
12954 
12955         if (timerid < 0) {
12956             ret = timerid;
12957         } else if (!arg2) {
12958             ret = -TARGET_EFAULT;
12959         } else {
12960             timer_t htimer = g_posix_timers[timerid];
12961             struct itimerspec hspec;
12962             ret = get_errno(timer_gettime(htimer, &hspec));
12963 
12964             if (host_to_target_itimerspec64(arg2, &hspec)) {
12965                 ret = -TARGET_EFAULT;
12966             }
12967         }
12968         return ret;
12969     }
12970 #endif
12971 
12972 #ifdef TARGET_NR_timer_getoverrun
12973     case TARGET_NR_timer_getoverrun:
12974     {
12975         /* args: timer_t timerid */
12976         target_timer_t timerid = get_timer_id(arg1);
12977 
12978         if (timerid < 0) {
12979             ret = timerid;
12980         } else {
12981             timer_t htimer = g_posix_timers[timerid];
12982             ret = get_errno(timer_getoverrun(htimer));
12983         }
12984         return ret;
12985     }
12986 #endif
12987 
12988 #ifdef TARGET_NR_timer_delete
12989     case TARGET_NR_timer_delete:
12990     {
12991         /* args: timer_t timerid */
12992         target_timer_t timerid = get_timer_id(arg1);
12993 
12994         if (timerid < 0) {
12995             ret = timerid;
12996         } else {
12997             timer_t htimer = g_posix_timers[timerid];
12998             ret = get_errno(timer_delete(htimer));
12999             g_posix_timers[timerid] = 0;
13000         }
13001         return ret;
13002     }
13003 #endif
13004 
13005 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13006     case TARGET_NR_timerfd_create:
13007         return get_errno(timerfd_create(arg1,
13008                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13009 #endif
13010 
13011 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13012     case TARGET_NR_timerfd_gettime:
13013         {
13014             struct itimerspec its_curr;
13015 
13016             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13017 
13018             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13019                 return -TARGET_EFAULT;
13020             }
13021         }
13022         return ret;
13023 #endif
13024 
13025 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13026     case TARGET_NR_timerfd_gettime64:
13027         {
13028             struct itimerspec its_curr;
13029 
13030             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13031 
13032             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13033                 return -TARGET_EFAULT;
13034             }
13035         }
13036         return ret;
13037 #endif
13038 
13039 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13040     case TARGET_NR_timerfd_settime:
13041         {
13042             struct itimerspec its_new, its_old, *p_new;
13043 
13044             if (arg3) {
13045                 if (target_to_host_itimerspec(&its_new, arg3)) {
13046                     return -TARGET_EFAULT;
13047                 }
13048                 p_new = &its_new;
13049             } else {
13050                 p_new = NULL;
13051             }
13052 
13053             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13054 
13055             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13056                 return -TARGET_EFAULT;
13057             }
13058         }
13059         return ret;
13060 #endif
13061 
13062 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13063     case TARGET_NR_timerfd_settime64:
13064         {
13065             struct itimerspec its_new, its_old, *p_new;
13066 
13067             if (arg3) {
13068                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13069                     return -TARGET_EFAULT;
13070                 }
13071                 p_new = &its_new;
13072             } else {
13073                 p_new = NULL;
13074             }
13075 
13076             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13077 
13078             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13079                 return -TARGET_EFAULT;
13080             }
13081         }
13082         return ret;
13083 #endif
13084 
13085 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13086     case TARGET_NR_ioprio_get:
13087         return get_errno(ioprio_get(arg1, arg2));
13088 #endif
13089 
13090 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13091     case TARGET_NR_ioprio_set:
13092         return get_errno(ioprio_set(arg1, arg2, arg3));
13093 #endif
13094 
13095 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13096     case TARGET_NR_setns:
13097         return get_errno(setns(arg1, arg2));
13098 #endif
13099 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13100     case TARGET_NR_unshare:
13101         return get_errno(unshare(arg1));
13102 #endif
13103 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13104     case TARGET_NR_kcmp:
13105         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13106 #endif
13107 #ifdef TARGET_NR_swapcontext
13108     case TARGET_NR_swapcontext:
13109         /* PowerPC specific.  */
13110         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13111 #endif
13112 #ifdef TARGET_NR_memfd_create
13113     case TARGET_NR_memfd_create:
13114         p = lock_user_string(arg1);
13115         if (!p) {
13116             return -TARGET_EFAULT;
13117         }
13118         ret = get_errno(memfd_create(p, arg2));
13119         fd_trans_unregister(ret);
13120         unlock_user(p, arg1, 0);
13121         return ret;
13122 #endif
13123 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13124     case TARGET_NR_membarrier:
13125         return get_errno(membarrier(arg1, arg2));
13126 #endif
13127 
13128 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13129     case TARGET_NR_copy_file_range:
13130         {
13131             loff_t inoff, outoff;
13132             loff_t *pinoff = NULL, *poutoff = NULL;
13133 
13134             if (arg2) {
13135                 if (get_user_u64(inoff, arg2)) {
13136                     return -TARGET_EFAULT;
13137                 }
13138                 pinoff = &inoff;
13139             }
13140             if (arg4) {
13141                 if (get_user_u64(outoff, arg4)) {
13142                     return -TARGET_EFAULT;
13143                 }
13144                 poutoff = &outoff;
13145             }
13146             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13147                                                  arg5, arg6));
13148             if (!is_error(ret) && ret > 0) {
13149                 if (arg2) {
13150                     if (put_user_u64(inoff, arg2)) {
13151                         return -TARGET_EFAULT;
13152                     }
13153                 }
13154                 if (arg4) {
13155                     if (put_user_u64(outoff, arg4)) {
13156                         return -TARGET_EFAULT;
13157                     }
13158                 }
13159             }
13160         }
13161         return ret;
13162 #endif
13163 
13164     default:
13165         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13166         return -TARGET_ENOSYS;
13167     }
13168     return ret;
13169 }
13170 
13171 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13172                     abi_long arg2, abi_long arg3, abi_long arg4,
13173                     abi_long arg5, abi_long arg6, abi_long arg7,
13174                     abi_long arg8)
13175 {
13176     CPUState *cpu = env_cpu(cpu_env);
13177     abi_long ret;
13178 
13179 #ifdef DEBUG_ERESTARTSYS
13180     /* Debug-only code for exercising the syscall-restart code paths
13181      * in the per-architecture cpu main loops: restart every syscall
13182      * the guest makes once before letting it through.
13183      */
13184     {
13185         static bool flag;
13186         flag = !flag;
13187         if (flag) {
13188             return -TARGET_ERESTARTSYS;
13189         }
13190     }
13191 #endif
13192 
13193     record_syscall_start(cpu, num, arg1,
13194                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13195 
13196     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13197         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13198     }
13199 
13200     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13201                       arg5, arg6, arg7, arg8);
13202 
13203     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13204         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13205                           arg3, arg4, arg5, arg6);
13206     }
13207 
13208     record_syscall_return(cpu, num, ret);
13209     return ret;
13210 }
13211