xref: /openbmc/qemu/linux-user/syscall.c (revision 52a96afa)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #include "linux_loop.h"
116 #include "uname.h"
117 
118 #include "qemu.h"
119 #include "qemu/guest-random.h"
120 #include "user/syscall-trace.h"
121 #include "qapi/error.h"
122 #include "fd-trans.h"
123 #include "tcg/tcg.h"
124 
125 #ifndef CLONE_IO
126 #define CLONE_IO                0x80000000      /* Clone io context */
127 #endif
128 
129 /* We can't directly call the host clone syscall, because this will
130  * badly confuse libc (breaking mutexes, for example). So we must
131  * divide clone flags into:
132  *  * flag combinations that look like pthread_create()
133  *  * flag combinations that look like fork()
134  *  * flags we can implement within QEMU itself
135  *  * flags we can't support and will return an error for
136  */
137 /* For thread creation, all these flags must be present; for
138  * fork, none must be present.
139  */
140 #define CLONE_THREAD_FLAGS                              \
141     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
142      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
143 
144 /* These flags are ignored:
145  * CLONE_DETACHED is now ignored by the kernel;
146  * CLONE_IO is just an optimisation hint to the I/O scheduler
147  */
148 #define CLONE_IGNORED_FLAGS                     \
149     (CLONE_DETACHED | CLONE_IO)
150 
151 /* Flags for fork which we can implement within QEMU itself */
152 #define CLONE_OPTIONAL_FORK_FLAGS               \
153     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
154      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
155 
156 /* Flags for thread creation which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
158     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
159      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
160 
161 #define CLONE_INVALID_FORK_FLAGS                                        \
162     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
163 
164 #define CLONE_INVALID_THREAD_FLAGS                                      \
165     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
166        CLONE_IGNORED_FLAGS))
167 
168 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
169  * have almost all been allocated. We cannot support any of
170  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
171  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
172  * The checks against the invalid thread masks above will catch these.
173  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
174  */
175 
176 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
177  * once. This exercises the codepaths for restart.
178  */
179 //#define DEBUG_ERESTARTSYS
180 
181 //#include <linux/msdos_fs.h>
182 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
183 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
184 
185 #undef _syscall0
186 #undef _syscall1
187 #undef _syscall2
188 #undef _syscall3
189 #undef _syscall4
190 #undef _syscall5
191 #undef _syscall6
192 
193 #define _syscall0(type,name)		\
194 static type name (void)			\
195 {					\
196 	return syscall(__NR_##name);	\
197 }
198 
199 #define _syscall1(type,name,type1,arg1)		\
200 static type name (type1 arg1)			\
201 {						\
202 	return syscall(__NR_##name, arg1);	\
203 }
204 
205 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
206 static type name (type1 arg1,type2 arg2)		\
207 {							\
208 	return syscall(__NR_##name, arg1, arg2);	\
209 }
210 
211 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
212 static type name (type1 arg1,type2 arg2,type3 arg3)		\
213 {								\
214 	return syscall(__NR_##name, arg1, arg2, arg3);		\
215 }
216 
217 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
219 {										\
220 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
221 }
222 
223 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
224 		  type5,arg5)							\
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
226 {										\
227 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
228 }
229 
230 
231 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
232 		  type5,arg5,type6,arg6)					\
233 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
234                   type6 arg6)							\
235 {										\
236 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
237 }
238 
239 
240 #define __NR_sys_uname __NR_uname
241 #define __NR_sys_getcwd1 __NR_getcwd
242 #define __NR_sys_getdents __NR_getdents
243 #define __NR_sys_getdents64 __NR_getdents64
244 #define __NR_sys_getpriority __NR_getpriority
245 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
246 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
247 #define __NR_sys_syslog __NR_syslog
248 #define __NR_sys_futex __NR_futex
249 #define __NR_sys_inotify_init __NR_inotify_init
250 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
251 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
252 #define __NR_sys_statx __NR_statx
253 
254 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
255 #define __NR__llseek __NR_lseek
256 #endif
257 
258 /* Newer kernel ports have llseek() instead of _llseek() */
259 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
260 #define TARGET_NR__llseek TARGET_NR_llseek
261 #endif
262 
263 #define __NR_sys_gettid __NR_gettid
264 _syscall0(int, sys_gettid)
265 
266 /* For the 64-bit guest on 32-bit host case we must emulate
267  * getdents using getdents64, because otherwise the host
268  * might hand us back more dirent records than we can fit
269  * into the guest buffer after structure format conversion.
270  * Otherwise we emulate getdents with getdents if the host has it.
271  */
272 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
273 #define EMULATE_GETDENTS_WITH_GETDENTS
274 #endif
275 
276 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
277 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
278 #endif
279 #if (defined(TARGET_NR_getdents) && \
280       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
281     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
282 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
283 #endif
284 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
285 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
286           loff_t *, res, uint, wh);
287 #endif
288 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
289 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
290           siginfo_t *, uinfo)
291 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
292 #ifdef __NR_exit_group
293 _syscall1(int,exit_group,int,error_code)
294 #endif
295 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
296 _syscall1(int,set_tid_address,int *,tidptr)
297 #endif
298 #if (defined(TARGET_NR_futex) || defined(TARGET_NR_exit)) && defined(__NR_futex)
299 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
300           const struct timespec *,timeout,int *,uaddr2,int,val3)
301 #endif
302 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
303 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
304           unsigned long *, user_mask_ptr);
305 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
306 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
307           unsigned long *, user_mask_ptr);
308 #define __NR_sys_getcpu __NR_getcpu
309 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
310 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
311           void *, arg);
312 _syscall2(int, capget, struct __user_cap_header_struct *, header,
313           struct __user_cap_data_struct *, data);
314 _syscall2(int, capset, struct __user_cap_header_struct *, header,
315           struct __user_cap_data_struct *, data);
316 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
317 _syscall2(int, ioprio_get, int, which, int, who)
318 #endif
319 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
320 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
321 #endif
322 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
323 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
324 #endif
325 
326 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
327 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
328           unsigned long, idx1, unsigned long, idx2)
329 #endif
330 
331 /*
332  * It is assumed that struct statx is architecture independent.
333  */
334 #if defined(TARGET_NR_statx) && defined(__NR_statx)
335 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
336           unsigned int, mask, struct target_statx *, statxbuf)
337 #endif
338 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
339 _syscall2(int, membarrier, int, cmd, int, flags)
340 #endif
341 
342 static bitmask_transtbl fcntl_flags_tbl[] = {
343   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
344   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
345   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
346   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
347   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
348   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
349   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
350   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
351   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
352   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
353   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
354   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
355   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
356 #if defined(O_DIRECT)
357   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
358 #endif
359 #if defined(O_NOATIME)
360   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
361 #endif
362 #if defined(O_CLOEXEC)
363   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
364 #endif
365 #if defined(O_PATH)
366   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
367 #endif
368 #if defined(O_TMPFILE)
369   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
370 #endif
371   /* Don't terminate the list prematurely on 64-bit host+guest.  */
372 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
373   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
374 #endif
375   { 0, 0, 0, 0 }
376 };
377 
378 static int sys_getcwd1(char *buf, size_t size)
379 {
380   if (getcwd(buf, size) == NULL) {
381       /* getcwd() sets errno */
382       return (-1);
383   }
384   return strlen(buf)+1;
385 }
386 
387 #ifdef TARGET_NR_utimensat
388 #if defined(__NR_utimensat)
389 #define __NR_sys_utimensat __NR_utimensat
390 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
391           const struct timespec *,tsp,int,flags)
392 #else
393 static int sys_utimensat(int dirfd, const char *pathname,
394                          const struct timespec times[2], int flags)
395 {
396     errno = ENOSYS;
397     return -1;
398 }
399 #endif
400 #endif /* TARGET_NR_utimensat */
401 
402 #ifdef TARGET_NR_renameat2
403 #if defined(__NR_renameat2)
404 #define __NR_sys_renameat2 __NR_renameat2
405 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
406           const char *, new, unsigned int, flags)
407 #else
408 static int sys_renameat2(int oldfd, const char *old,
409                          int newfd, const char *new, int flags)
410 {
411     if (flags == 0) {
412         return renameat(oldfd, old, newfd, new);
413     }
414     errno = ENOSYS;
415     return -1;
416 }
417 #endif
418 #endif /* TARGET_NR_renameat2 */
419 
420 #ifdef CONFIG_INOTIFY
421 #include <sys/inotify.h>
422 
423 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
424 static int sys_inotify_init(void)
425 {
426   return (inotify_init());
427 }
428 #endif
429 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
430 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
431 {
432   return (inotify_add_watch(fd, pathname, mask));
433 }
434 #endif
435 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
436 static int sys_inotify_rm_watch(int fd, int32_t wd)
437 {
438   return (inotify_rm_watch(fd, wd));
439 }
440 #endif
441 #ifdef CONFIG_INOTIFY1
442 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
443 static int sys_inotify_init1(int flags)
444 {
445   return (inotify_init1(flags));
446 }
447 #endif
448 #endif
449 #else
450 /* Userspace can usually survive runtime without inotify */
451 #undef TARGET_NR_inotify_init
452 #undef TARGET_NR_inotify_init1
453 #undef TARGET_NR_inotify_add_watch
454 #undef TARGET_NR_inotify_rm_watch
455 #endif /* CONFIG_INOTIFY  */
456 
457 #if defined(TARGET_NR_prlimit64)
458 #ifndef __NR_prlimit64
459 # define __NR_prlimit64 -1
460 #endif
461 #define __NR_sys_prlimit64 __NR_prlimit64
462 /* The glibc rlimit structure may not be that used by the underlying syscall */
463 struct host_rlimit64 {
464     uint64_t rlim_cur;
465     uint64_t rlim_max;
466 };
467 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
468           const struct host_rlimit64 *, new_limit,
469           struct host_rlimit64 *, old_limit)
470 #endif
471 
472 
473 #if defined(TARGET_NR_timer_create)
474 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
475 static timer_t g_posix_timers[32] = { 0, } ;
476 
477 static inline int next_free_host_timer(void)
478 {
479     int k ;
480     /* FIXME: Does finding the next free slot require a lock? */
481     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
482         if (g_posix_timers[k] == 0) {
483             g_posix_timers[k] = (timer_t) 1;
484             return k;
485         }
486     }
487     return -1;
488 }
489 #endif
490 
491 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
492 #ifdef TARGET_ARM
493 static inline int regpairs_aligned(void *cpu_env, int num)
494 {
495     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
496 }
497 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
498 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
499 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
500 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
501  * of registers which translates to the same as ARM/MIPS, because we start with
502  * r3 as arg1 */
503 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
504 #elif defined(TARGET_SH4)
505 /* SH4 doesn't align register pairs, except for p{read,write}64 */
506 static inline int regpairs_aligned(void *cpu_env, int num)
507 {
508     switch (num) {
509     case TARGET_NR_pread64:
510     case TARGET_NR_pwrite64:
511         return 1;
512 
513     default:
514         return 0;
515     }
516 }
517 #elif defined(TARGET_XTENSA)
518 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
519 #else
520 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
521 #endif
522 
523 #define ERRNO_TABLE_SIZE 1200
524 
525 /* target_to_host_errno_table[] is initialized from
526  * host_to_target_errno_table[] in syscall_init(). */
527 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
528 };
529 
530 /*
531  * This list is the union of errno values overridden in asm-<arch>/errno.h
532  * minus the errnos that are not actually generic to all archs.
533  */
534 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
535     [EAGAIN]		= TARGET_EAGAIN,
536     [EIDRM]		= TARGET_EIDRM,
537     [ECHRNG]		= TARGET_ECHRNG,
538     [EL2NSYNC]		= TARGET_EL2NSYNC,
539     [EL3HLT]		= TARGET_EL3HLT,
540     [EL3RST]		= TARGET_EL3RST,
541     [ELNRNG]		= TARGET_ELNRNG,
542     [EUNATCH]		= TARGET_EUNATCH,
543     [ENOCSI]		= TARGET_ENOCSI,
544     [EL2HLT]		= TARGET_EL2HLT,
545     [EDEADLK]		= TARGET_EDEADLK,
546     [ENOLCK]		= TARGET_ENOLCK,
547     [EBADE]		= TARGET_EBADE,
548     [EBADR]		= TARGET_EBADR,
549     [EXFULL]		= TARGET_EXFULL,
550     [ENOANO]		= TARGET_ENOANO,
551     [EBADRQC]		= TARGET_EBADRQC,
552     [EBADSLT]		= TARGET_EBADSLT,
553     [EBFONT]		= TARGET_EBFONT,
554     [ENOSTR]		= TARGET_ENOSTR,
555     [ENODATA]		= TARGET_ENODATA,
556     [ETIME]		= TARGET_ETIME,
557     [ENOSR]		= TARGET_ENOSR,
558     [ENONET]		= TARGET_ENONET,
559     [ENOPKG]		= TARGET_ENOPKG,
560     [EREMOTE]		= TARGET_EREMOTE,
561     [ENOLINK]		= TARGET_ENOLINK,
562     [EADV]		= TARGET_EADV,
563     [ESRMNT]		= TARGET_ESRMNT,
564     [ECOMM]		= TARGET_ECOMM,
565     [EPROTO]		= TARGET_EPROTO,
566     [EDOTDOT]		= TARGET_EDOTDOT,
567     [EMULTIHOP]		= TARGET_EMULTIHOP,
568     [EBADMSG]		= TARGET_EBADMSG,
569     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
570     [EOVERFLOW]		= TARGET_EOVERFLOW,
571     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
572     [EBADFD]		= TARGET_EBADFD,
573     [EREMCHG]		= TARGET_EREMCHG,
574     [ELIBACC]		= TARGET_ELIBACC,
575     [ELIBBAD]		= TARGET_ELIBBAD,
576     [ELIBSCN]		= TARGET_ELIBSCN,
577     [ELIBMAX]		= TARGET_ELIBMAX,
578     [ELIBEXEC]		= TARGET_ELIBEXEC,
579     [EILSEQ]		= TARGET_EILSEQ,
580     [ENOSYS]		= TARGET_ENOSYS,
581     [ELOOP]		= TARGET_ELOOP,
582     [ERESTART]		= TARGET_ERESTART,
583     [ESTRPIPE]		= TARGET_ESTRPIPE,
584     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
585     [EUSERS]		= TARGET_EUSERS,
586     [ENOTSOCK]		= TARGET_ENOTSOCK,
587     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
588     [EMSGSIZE]		= TARGET_EMSGSIZE,
589     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
590     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
591     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
592     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
593     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
594     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
595     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
596     [EADDRINUSE]	= TARGET_EADDRINUSE,
597     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
598     [ENETDOWN]		= TARGET_ENETDOWN,
599     [ENETUNREACH]	= TARGET_ENETUNREACH,
600     [ENETRESET]		= TARGET_ENETRESET,
601     [ECONNABORTED]	= TARGET_ECONNABORTED,
602     [ECONNRESET]	= TARGET_ECONNRESET,
603     [ENOBUFS]		= TARGET_ENOBUFS,
604     [EISCONN]		= TARGET_EISCONN,
605     [ENOTCONN]		= TARGET_ENOTCONN,
606     [EUCLEAN]		= TARGET_EUCLEAN,
607     [ENOTNAM]		= TARGET_ENOTNAM,
608     [ENAVAIL]		= TARGET_ENAVAIL,
609     [EISNAM]		= TARGET_EISNAM,
610     [EREMOTEIO]		= TARGET_EREMOTEIO,
611     [EDQUOT]            = TARGET_EDQUOT,
612     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
613     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
614     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
615     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
616     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
617     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
618     [EALREADY]		= TARGET_EALREADY,
619     [EINPROGRESS]	= TARGET_EINPROGRESS,
620     [ESTALE]		= TARGET_ESTALE,
621     [ECANCELED]		= TARGET_ECANCELED,
622     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
623     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
624 #ifdef ENOKEY
625     [ENOKEY]		= TARGET_ENOKEY,
626 #endif
627 #ifdef EKEYEXPIRED
628     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
629 #endif
630 #ifdef EKEYREVOKED
631     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
632 #endif
633 #ifdef EKEYREJECTED
634     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
635 #endif
636 #ifdef EOWNERDEAD
637     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
638 #endif
639 #ifdef ENOTRECOVERABLE
640     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
641 #endif
642 #ifdef ENOMSG
643     [ENOMSG]            = TARGET_ENOMSG,
644 #endif
645 #ifdef ERKFILL
646     [ERFKILL]           = TARGET_ERFKILL,
647 #endif
648 #ifdef EHWPOISON
649     [EHWPOISON]         = TARGET_EHWPOISON,
650 #endif
651 };
652 
653 static inline int host_to_target_errno(int err)
654 {
655     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
656         host_to_target_errno_table[err]) {
657         return host_to_target_errno_table[err];
658     }
659     return err;
660 }
661 
662 static inline int target_to_host_errno(int err)
663 {
664     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
665         target_to_host_errno_table[err]) {
666         return target_to_host_errno_table[err];
667     }
668     return err;
669 }
670 
671 static inline abi_long get_errno(abi_long ret)
672 {
673     if (ret == -1)
674         return -host_to_target_errno(errno);
675     else
676         return ret;
677 }
678 
679 const char *target_strerror(int err)
680 {
681     if (err == TARGET_ERESTARTSYS) {
682         return "To be restarted";
683     }
684     if (err == TARGET_QEMU_ESIGRETURN) {
685         return "Successful exit from sigreturn";
686     }
687 
688     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
689         return NULL;
690     }
691     return strerror(target_to_host_errno(err));
692 }
693 
694 #define safe_syscall0(type, name) \
695 static type safe_##name(void) \
696 { \
697     return safe_syscall(__NR_##name); \
698 }
699 
700 #define safe_syscall1(type, name, type1, arg1) \
701 static type safe_##name(type1 arg1) \
702 { \
703     return safe_syscall(__NR_##name, arg1); \
704 }
705 
706 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
707 static type safe_##name(type1 arg1, type2 arg2) \
708 { \
709     return safe_syscall(__NR_##name, arg1, arg2); \
710 }
711 
712 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
714 { \
715     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
716 }
717 
718 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
719     type4, arg4) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
721 { \
722     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
723 }
724 
725 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
726     type4, arg4, type5, arg5) \
727 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
728     type5 arg5) \
729 { \
730     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
731 }
732 
733 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
734     type4, arg4, type5, arg5, type6, arg6) \
735 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
736     type5 arg5, type6 arg6) \
737 { \
738     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
739 }
740 
741 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
742 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
743 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
744               int, flags, mode_t, mode)
745 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
746 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
747               struct rusage *, rusage)
748 #endif
749 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
750               int, options, struct rusage *, rusage)
751 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
752 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
753     defined(TARGET_NR_pselect6)
754 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
755               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
756 #endif
757 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
758 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
759               struct timespec *, tsp, const sigset_t *, sigmask,
760               size_t, sigsetsize)
761 #endif
762 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
763               int, maxevents, int, timeout, const sigset_t *, sigmask,
764               size_t, sigsetsize)
765 #ifdef TARGET_NR_futex
766 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
767               const struct timespec *,timeout,int *,uaddr2,int,val3)
768 #endif
769 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
770 safe_syscall2(int, kill, pid_t, pid, int, sig)
771 safe_syscall2(int, tkill, int, tid, int, sig)
772 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
773 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
774 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
775 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
776               unsigned long, pos_l, unsigned long, pos_h)
777 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
778               unsigned long, pos_l, unsigned long, pos_h)
779 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
780               socklen_t, addrlen)
781 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
782               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
783 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
784               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
785 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
786 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
787 safe_syscall2(int, flock, int, fd, int, operation)
788 #ifdef TARGET_NR_rt_sigtimedwait
789 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
790               const struct timespec *, uts, size_t, sigsetsize)
791 #endif
792 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
793               int, flags)
794 #if defined(TARGET_NR_nanosleep)
795 safe_syscall2(int, nanosleep, const struct timespec *, req,
796               struct timespec *, rem)
797 #endif
798 #ifdef TARGET_NR_clock_nanosleep
799 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
800               const struct timespec *, req, struct timespec *, rem)
801 #endif
802 #ifdef __NR_ipc
803 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
804               void *, ptr, long, fifth)
805 #endif
806 #ifdef __NR_msgsnd
807 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
808               int, flags)
809 #endif
810 #ifdef __NR_msgrcv
811 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
812               long, msgtype, int, flags)
813 #endif
814 #ifdef __NR_semtimedop
815 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
816               unsigned, nsops, const struct timespec *, timeout)
817 #endif
818 #ifdef TARGET_NR_mq_timedsend
819 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
820               size_t, len, unsigned, prio, const struct timespec *, timeout)
821 #endif
822 #ifdef TARGET_NR_mq_timedreceive
823 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
824               size_t, len, unsigned *, prio, const struct timespec *, timeout)
825 #endif
826 /* We do ioctl like this rather than via safe_syscall3 to preserve the
827  * "third argument might be integer or pointer or not present" behaviour of
828  * the libc function.
829  */
830 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
831 /* Similarly for fcntl. Note that callers must always:
832  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
833  *  use the flock64 struct rather than unsuffixed flock
834  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
835  */
836 #ifdef __NR_fcntl64
837 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
838 #else
839 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
840 #endif
841 
842 static inline int host_to_target_sock_type(int host_type)
843 {
844     int target_type;
845 
846     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
847     case SOCK_DGRAM:
848         target_type = TARGET_SOCK_DGRAM;
849         break;
850     case SOCK_STREAM:
851         target_type = TARGET_SOCK_STREAM;
852         break;
853     default:
854         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
855         break;
856     }
857 
858 #if defined(SOCK_CLOEXEC)
859     if (host_type & SOCK_CLOEXEC) {
860         target_type |= TARGET_SOCK_CLOEXEC;
861     }
862 #endif
863 
864 #if defined(SOCK_NONBLOCK)
865     if (host_type & SOCK_NONBLOCK) {
866         target_type |= TARGET_SOCK_NONBLOCK;
867     }
868 #endif
869 
870     return target_type;
871 }
872 
873 static abi_ulong target_brk;
874 static abi_ulong target_original_brk;
875 static abi_ulong brk_page;
876 
877 void target_set_brk(abi_ulong new_brk)
878 {
879     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
880     brk_page = HOST_PAGE_ALIGN(target_brk);
881 }
882 
883 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
884 #define DEBUGF_BRK(message, args...)
885 
886 /* do_brk() must return target values and target errnos. */
887 abi_long do_brk(abi_ulong new_brk)
888 {
889     abi_long mapped_addr;
890     abi_ulong new_alloc_size;
891 
892     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
893 
894     if (!new_brk) {
895         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
896         return target_brk;
897     }
898     if (new_brk < target_original_brk) {
899         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
900                    target_brk);
901         return target_brk;
902     }
903 
904     /* If the new brk is less than the highest page reserved to the
905      * target heap allocation, set it and we're almost done...  */
906     if (new_brk <= brk_page) {
907         /* Heap contents are initialized to zero, as for anonymous
908          * mapped pages.  */
909         if (new_brk > target_brk) {
910             memset(g2h(target_brk), 0, new_brk - target_brk);
911         }
912 	target_brk = new_brk;
913         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
914 	return target_brk;
915     }
916 
917     /* We need to allocate more memory after the brk... Note that
918      * we don't use MAP_FIXED because that will map over the top of
919      * any existing mapping (like the one with the host libc or qemu
920      * itself); instead we treat "mapped but at wrong address" as
921      * a failure and unmap again.
922      */
923     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
924     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
925                                         PROT_READ|PROT_WRITE,
926                                         MAP_ANON|MAP_PRIVATE, 0, 0));
927 
928     if (mapped_addr == brk_page) {
929         /* Heap contents are initialized to zero, as for anonymous
930          * mapped pages.  Technically the new pages are already
931          * initialized to zero since they *are* anonymous mapped
932          * pages, however we have to take care with the contents that
933          * come from the remaining part of the previous page: it may
934          * contains garbage data due to a previous heap usage (grown
935          * then shrunken).  */
936         memset(g2h(target_brk), 0, brk_page - target_brk);
937 
938         target_brk = new_brk;
939         brk_page = HOST_PAGE_ALIGN(target_brk);
940         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
941             target_brk);
942         return target_brk;
943     } else if (mapped_addr != -1) {
944         /* Mapped but at wrong address, meaning there wasn't actually
945          * enough space for this brk.
946          */
947         target_munmap(mapped_addr, new_alloc_size);
948         mapped_addr = -1;
949         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
950     }
951     else {
952         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
953     }
954 
955 #if defined(TARGET_ALPHA)
956     /* We (partially) emulate OSF/1 on Alpha, which requires we
957        return a proper errno, not an unchanged brk value.  */
958     return -TARGET_ENOMEM;
959 #endif
960     /* For everything else, return the previous break. */
961     return target_brk;
962 }
963 
964 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
965     defined(TARGET_NR_pselect6)
966 static inline abi_long copy_from_user_fdset(fd_set *fds,
967                                             abi_ulong target_fds_addr,
968                                             int n)
969 {
970     int i, nw, j, k;
971     abi_ulong b, *target_fds;
972 
973     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
974     if (!(target_fds = lock_user(VERIFY_READ,
975                                  target_fds_addr,
976                                  sizeof(abi_ulong) * nw,
977                                  1)))
978         return -TARGET_EFAULT;
979 
980     FD_ZERO(fds);
981     k = 0;
982     for (i = 0; i < nw; i++) {
983         /* grab the abi_ulong */
984         __get_user(b, &target_fds[i]);
985         for (j = 0; j < TARGET_ABI_BITS; j++) {
986             /* check the bit inside the abi_ulong */
987             if ((b >> j) & 1)
988                 FD_SET(k, fds);
989             k++;
990         }
991     }
992 
993     unlock_user(target_fds, target_fds_addr, 0);
994 
995     return 0;
996 }
997 
998 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
999                                                  abi_ulong target_fds_addr,
1000                                                  int n)
1001 {
1002     if (target_fds_addr) {
1003         if (copy_from_user_fdset(fds, target_fds_addr, n))
1004             return -TARGET_EFAULT;
1005         *fds_ptr = fds;
1006     } else {
1007         *fds_ptr = NULL;
1008     }
1009     return 0;
1010 }
1011 
1012 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1013                                           const fd_set *fds,
1014                                           int n)
1015 {
1016     int i, nw, j, k;
1017     abi_long v;
1018     abi_ulong *target_fds;
1019 
1020     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1021     if (!(target_fds = lock_user(VERIFY_WRITE,
1022                                  target_fds_addr,
1023                                  sizeof(abi_ulong) * nw,
1024                                  0)))
1025         return -TARGET_EFAULT;
1026 
1027     k = 0;
1028     for (i = 0; i < nw; i++) {
1029         v = 0;
1030         for (j = 0; j < TARGET_ABI_BITS; j++) {
1031             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1032             k++;
1033         }
1034         __put_user(v, &target_fds[i]);
1035     }
1036 
1037     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1038 
1039     return 0;
1040 }
1041 #endif
1042 
1043 #if defined(__alpha__)
1044 #define HOST_HZ 1024
1045 #else
1046 #define HOST_HZ 100
1047 #endif
1048 
1049 static inline abi_long host_to_target_clock_t(long ticks)
1050 {
1051 #if HOST_HZ == TARGET_HZ
1052     return ticks;
1053 #else
1054     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1055 #endif
1056 }
1057 
1058 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1059                                              const struct rusage *rusage)
1060 {
1061     struct target_rusage *target_rusage;
1062 
1063     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1064         return -TARGET_EFAULT;
1065     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1066     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1067     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1068     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1069     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1070     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1071     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1072     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1073     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1074     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1075     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1076     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1077     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1078     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1079     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1080     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1081     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1082     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1083     unlock_user_struct(target_rusage, target_addr, 1);
1084 
1085     return 0;
1086 }
1087 
1088 #ifdef TARGET_NR_setrlimit
1089 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1090 {
1091     abi_ulong target_rlim_swap;
1092     rlim_t result;
1093 
1094     target_rlim_swap = tswapal(target_rlim);
1095     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1096         return RLIM_INFINITY;
1097 
1098     result = target_rlim_swap;
1099     if (target_rlim_swap != (rlim_t)result)
1100         return RLIM_INFINITY;
1101 
1102     return result;
1103 }
1104 #endif
1105 
1106 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1107 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1108 {
1109     abi_ulong target_rlim_swap;
1110     abi_ulong result;
1111 
1112     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1113         target_rlim_swap = TARGET_RLIM_INFINITY;
1114     else
1115         target_rlim_swap = rlim;
1116     result = tswapal(target_rlim_swap);
1117 
1118     return result;
1119 }
1120 #endif
1121 
1122 static inline int target_to_host_resource(int code)
1123 {
1124     switch (code) {
1125     case TARGET_RLIMIT_AS:
1126         return RLIMIT_AS;
1127     case TARGET_RLIMIT_CORE:
1128         return RLIMIT_CORE;
1129     case TARGET_RLIMIT_CPU:
1130         return RLIMIT_CPU;
1131     case TARGET_RLIMIT_DATA:
1132         return RLIMIT_DATA;
1133     case TARGET_RLIMIT_FSIZE:
1134         return RLIMIT_FSIZE;
1135     case TARGET_RLIMIT_LOCKS:
1136         return RLIMIT_LOCKS;
1137     case TARGET_RLIMIT_MEMLOCK:
1138         return RLIMIT_MEMLOCK;
1139     case TARGET_RLIMIT_MSGQUEUE:
1140         return RLIMIT_MSGQUEUE;
1141     case TARGET_RLIMIT_NICE:
1142         return RLIMIT_NICE;
1143     case TARGET_RLIMIT_NOFILE:
1144         return RLIMIT_NOFILE;
1145     case TARGET_RLIMIT_NPROC:
1146         return RLIMIT_NPROC;
1147     case TARGET_RLIMIT_RSS:
1148         return RLIMIT_RSS;
1149     case TARGET_RLIMIT_RTPRIO:
1150         return RLIMIT_RTPRIO;
1151     case TARGET_RLIMIT_SIGPENDING:
1152         return RLIMIT_SIGPENDING;
1153     case TARGET_RLIMIT_STACK:
1154         return RLIMIT_STACK;
1155     default:
1156         return code;
1157     }
1158 }
1159 
1160 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1161                                               abi_ulong target_tv_addr)
1162 {
1163     struct target_timeval *target_tv;
1164 
1165     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1166         return -TARGET_EFAULT;
1167     }
1168 
1169     __get_user(tv->tv_sec, &target_tv->tv_sec);
1170     __get_user(tv->tv_usec, &target_tv->tv_usec);
1171 
1172     unlock_user_struct(target_tv, target_tv_addr, 0);
1173 
1174     return 0;
1175 }
1176 
1177 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1178                                             const struct timeval *tv)
1179 {
1180     struct target_timeval *target_tv;
1181 
1182     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1183         return -TARGET_EFAULT;
1184     }
1185 
1186     __put_user(tv->tv_sec, &target_tv->tv_sec);
1187     __put_user(tv->tv_usec, &target_tv->tv_usec);
1188 
1189     unlock_user_struct(target_tv, target_tv_addr, 1);
1190 
1191     return 0;
1192 }
1193 
1194 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1195                                              const struct timeval *tv)
1196 {
1197     struct target__kernel_sock_timeval *target_tv;
1198 
1199     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1200         return -TARGET_EFAULT;
1201     }
1202 
1203     __put_user(tv->tv_sec, &target_tv->tv_sec);
1204     __put_user(tv->tv_usec, &target_tv->tv_usec);
1205 
1206     unlock_user_struct(target_tv, target_tv_addr, 1);
1207 
1208     return 0;
1209 }
1210 
1211 #if defined(TARGET_NR_futex) || \
1212     defined(TARGET_NR_rt_sigtimedwait) || \
1213     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1214     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1215     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1216     defined(TARGET_NR_mq_timedreceive)
1217 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1218                                                abi_ulong target_addr)
1219 {
1220     struct target_timespec *target_ts;
1221 
1222     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1223         return -TARGET_EFAULT;
1224     }
1225     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1226     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1227     unlock_user_struct(target_ts, target_addr, 0);
1228     return 0;
1229 }
1230 #endif
1231 
1232 #if defined(TARGET_NR_clock_settime64)
1233 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1234                                                  abi_ulong target_addr)
1235 {
1236     struct target__kernel_timespec *target_ts;
1237 
1238     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1239         return -TARGET_EFAULT;
1240     }
1241     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1242     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1243     unlock_user_struct(target_ts, target_addr, 0);
1244     return 0;
1245 }
1246 #endif
1247 
1248 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1249                                                struct timespec *host_ts)
1250 {
1251     struct target_timespec *target_ts;
1252 
1253     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1254         return -TARGET_EFAULT;
1255     }
1256     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1257     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1258     unlock_user_struct(target_ts, target_addr, 1);
1259     return 0;
1260 }
1261 
1262 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1263                                                  struct timespec *host_ts)
1264 {
1265     struct target__kernel_timespec *target_ts;
1266 
1267     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1268         return -TARGET_EFAULT;
1269     }
1270     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1271     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1272     unlock_user_struct(target_ts, target_addr, 1);
1273     return 0;
1274 }
1275 
1276 #if defined(TARGET_NR_settimeofday)
1277 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1278                                                abi_ulong target_tz_addr)
1279 {
1280     struct target_timezone *target_tz;
1281 
1282     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1283         return -TARGET_EFAULT;
1284     }
1285 
1286     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1287     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1288 
1289     unlock_user_struct(target_tz, target_tz_addr, 0);
1290 
1291     return 0;
1292 }
1293 #endif
1294 
1295 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1296 #include <mqueue.h>
1297 
1298 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1299                                               abi_ulong target_mq_attr_addr)
1300 {
1301     struct target_mq_attr *target_mq_attr;
1302 
1303     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1304                           target_mq_attr_addr, 1))
1305         return -TARGET_EFAULT;
1306 
1307     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1308     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1309     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1310     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1311 
1312     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1313 
1314     return 0;
1315 }
1316 
1317 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1318                                             const struct mq_attr *attr)
1319 {
1320     struct target_mq_attr *target_mq_attr;
1321 
1322     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1323                           target_mq_attr_addr, 0))
1324         return -TARGET_EFAULT;
1325 
1326     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1327     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1328     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1329     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1330 
1331     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1332 
1333     return 0;
1334 }
1335 #endif
1336 
1337 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1338 /* do_select() must return target values and target errnos. */
1339 static abi_long do_select(int n,
1340                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1341                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1342 {
1343     fd_set rfds, wfds, efds;
1344     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1345     struct timeval tv;
1346     struct timespec ts, *ts_ptr;
1347     abi_long ret;
1348 
1349     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1350     if (ret) {
1351         return ret;
1352     }
1353     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1354     if (ret) {
1355         return ret;
1356     }
1357     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1358     if (ret) {
1359         return ret;
1360     }
1361 
1362     if (target_tv_addr) {
1363         if (copy_from_user_timeval(&tv, target_tv_addr))
1364             return -TARGET_EFAULT;
1365         ts.tv_sec = tv.tv_sec;
1366         ts.tv_nsec = tv.tv_usec * 1000;
1367         ts_ptr = &ts;
1368     } else {
1369         ts_ptr = NULL;
1370     }
1371 
1372     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1373                                   ts_ptr, NULL));
1374 
1375     if (!is_error(ret)) {
1376         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1377             return -TARGET_EFAULT;
1378         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1379             return -TARGET_EFAULT;
1380         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1381             return -TARGET_EFAULT;
1382 
1383         if (target_tv_addr) {
1384             tv.tv_sec = ts.tv_sec;
1385             tv.tv_usec = ts.tv_nsec / 1000;
1386             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1387                 return -TARGET_EFAULT;
1388             }
1389         }
1390     }
1391 
1392     return ret;
1393 }
1394 
1395 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1396 static abi_long do_old_select(abi_ulong arg1)
1397 {
1398     struct target_sel_arg_struct *sel;
1399     abi_ulong inp, outp, exp, tvp;
1400     long nsel;
1401 
1402     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1403         return -TARGET_EFAULT;
1404     }
1405 
1406     nsel = tswapal(sel->n);
1407     inp = tswapal(sel->inp);
1408     outp = tswapal(sel->outp);
1409     exp = tswapal(sel->exp);
1410     tvp = tswapal(sel->tvp);
1411 
1412     unlock_user_struct(sel, arg1, 0);
1413 
1414     return do_select(nsel, inp, outp, exp, tvp);
1415 }
1416 #endif
1417 #endif
1418 
1419 static abi_long do_pipe2(int host_pipe[], int flags)
1420 {
1421 #ifdef CONFIG_PIPE2
1422     return pipe2(host_pipe, flags);
1423 #else
1424     return -ENOSYS;
1425 #endif
1426 }
1427 
1428 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1429                         int flags, int is_pipe2)
1430 {
1431     int host_pipe[2];
1432     abi_long ret;
1433     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1434 
1435     if (is_error(ret))
1436         return get_errno(ret);
1437 
1438     /* Several targets have special calling conventions for the original
1439        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1440     if (!is_pipe2) {
1441 #if defined(TARGET_ALPHA)
1442         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1443         return host_pipe[0];
1444 #elif defined(TARGET_MIPS)
1445         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1446         return host_pipe[0];
1447 #elif defined(TARGET_SH4)
1448         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1449         return host_pipe[0];
1450 #elif defined(TARGET_SPARC)
1451         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1452         return host_pipe[0];
1453 #endif
1454     }
1455 
1456     if (put_user_s32(host_pipe[0], pipedes)
1457         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1458         return -TARGET_EFAULT;
1459     return get_errno(ret);
1460 }
1461 
1462 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1463                                               abi_ulong target_addr,
1464                                               socklen_t len)
1465 {
1466     struct target_ip_mreqn *target_smreqn;
1467 
1468     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1469     if (!target_smreqn)
1470         return -TARGET_EFAULT;
1471     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1472     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1473     if (len == sizeof(struct target_ip_mreqn))
1474         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1475     unlock_user(target_smreqn, target_addr, 0);
1476 
1477     return 0;
1478 }
1479 
1480 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1481                                                abi_ulong target_addr,
1482                                                socklen_t len)
1483 {
1484     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1485     sa_family_t sa_family;
1486     struct target_sockaddr *target_saddr;
1487 
1488     if (fd_trans_target_to_host_addr(fd)) {
1489         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1490     }
1491 
1492     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1493     if (!target_saddr)
1494         return -TARGET_EFAULT;
1495 
1496     sa_family = tswap16(target_saddr->sa_family);
1497 
1498     /* Oops. The caller might send a incomplete sun_path; sun_path
1499      * must be terminated by \0 (see the manual page), but
1500      * unfortunately it is quite common to specify sockaddr_un
1501      * length as "strlen(x->sun_path)" while it should be
1502      * "strlen(...) + 1". We'll fix that here if needed.
1503      * Linux kernel has a similar feature.
1504      */
1505 
1506     if (sa_family == AF_UNIX) {
1507         if (len < unix_maxlen && len > 0) {
1508             char *cp = (char*)target_saddr;
1509 
1510             if ( cp[len-1] && !cp[len] )
1511                 len++;
1512         }
1513         if (len > unix_maxlen)
1514             len = unix_maxlen;
1515     }
1516 
1517     memcpy(addr, target_saddr, len);
1518     addr->sa_family = sa_family;
1519     if (sa_family == AF_NETLINK) {
1520         struct sockaddr_nl *nladdr;
1521 
1522         nladdr = (struct sockaddr_nl *)addr;
1523         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1524         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1525     } else if (sa_family == AF_PACKET) {
1526 	struct target_sockaddr_ll *lladdr;
1527 
1528 	lladdr = (struct target_sockaddr_ll *)addr;
1529 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1530 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1531     }
1532     unlock_user(target_saddr, target_addr, 0);
1533 
1534     return 0;
1535 }
1536 
1537 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1538                                                struct sockaddr *addr,
1539                                                socklen_t len)
1540 {
1541     struct target_sockaddr *target_saddr;
1542 
1543     if (len == 0) {
1544         return 0;
1545     }
1546     assert(addr);
1547 
1548     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1549     if (!target_saddr)
1550         return -TARGET_EFAULT;
1551     memcpy(target_saddr, addr, len);
1552     if (len >= offsetof(struct target_sockaddr, sa_family) +
1553         sizeof(target_saddr->sa_family)) {
1554         target_saddr->sa_family = tswap16(addr->sa_family);
1555     }
1556     if (addr->sa_family == AF_NETLINK &&
1557         len >= sizeof(struct target_sockaddr_nl)) {
1558         struct target_sockaddr_nl *target_nl =
1559                (struct target_sockaddr_nl *)target_saddr;
1560         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1561         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1562     } else if (addr->sa_family == AF_PACKET) {
1563         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1564         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1565         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1566     } else if (addr->sa_family == AF_INET6 &&
1567                len >= sizeof(struct target_sockaddr_in6)) {
1568         struct target_sockaddr_in6 *target_in6 =
1569                (struct target_sockaddr_in6 *)target_saddr;
1570         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1571     }
1572     unlock_user(target_saddr, target_addr, len);
1573 
1574     return 0;
1575 }
1576 
1577 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1578                                            struct target_msghdr *target_msgh)
1579 {
1580     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1581     abi_long msg_controllen;
1582     abi_ulong target_cmsg_addr;
1583     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1584     socklen_t space = 0;
1585 
1586     msg_controllen = tswapal(target_msgh->msg_controllen);
1587     if (msg_controllen < sizeof (struct target_cmsghdr))
1588         goto the_end;
1589     target_cmsg_addr = tswapal(target_msgh->msg_control);
1590     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1591     target_cmsg_start = target_cmsg;
1592     if (!target_cmsg)
1593         return -TARGET_EFAULT;
1594 
1595     while (cmsg && target_cmsg) {
1596         void *data = CMSG_DATA(cmsg);
1597         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1598 
1599         int len = tswapal(target_cmsg->cmsg_len)
1600             - sizeof(struct target_cmsghdr);
1601 
1602         space += CMSG_SPACE(len);
1603         if (space > msgh->msg_controllen) {
1604             space -= CMSG_SPACE(len);
1605             /* This is a QEMU bug, since we allocated the payload
1606              * area ourselves (unlike overflow in host-to-target
1607              * conversion, which is just the guest giving us a buffer
1608              * that's too small). It can't happen for the payload types
1609              * we currently support; if it becomes an issue in future
1610              * we would need to improve our allocation strategy to
1611              * something more intelligent than "twice the size of the
1612              * target buffer we're reading from".
1613              */
1614             qemu_log_mask(LOG_UNIMP,
1615                           ("Unsupported ancillary data %d/%d: "
1616                            "unhandled msg size\n"),
1617                           tswap32(target_cmsg->cmsg_level),
1618                           tswap32(target_cmsg->cmsg_type));
1619             break;
1620         }
1621 
1622         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1623             cmsg->cmsg_level = SOL_SOCKET;
1624         } else {
1625             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1626         }
1627         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1628         cmsg->cmsg_len = CMSG_LEN(len);
1629 
1630         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1631             int *fd = (int *)data;
1632             int *target_fd = (int *)target_data;
1633             int i, numfds = len / sizeof(int);
1634 
1635             for (i = 0; i < numfds; i++) {
1636                 __get_user(fd[i], target_fd + i);
1637             }
1638         } else if (cmsg->cmsg_level == SOL_SOCKET
1639                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1640             struct ucred *cred = (struct ucred *)data;
1641             struct target_ucred *target_cred =
1642                 (struct target_ucred *)target_data;
1643 
1644             __get_user(cred->pid, &target_cred->pid);
1645             __get_user(cred->uid, &target_cred->uid);
1646             __get_user(cred->gid, &target_cred->gid);
1647         } else {
1648             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1649                           cmsg->cmsg_level, cmsg->cmsg_type);
1650             memcpy(data, target_data, len);
1651         }
1652 
1653         cmsg = CMSG_NXTHDR(msgh, cmsg);
1654         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1655                                          target_cmsg_start);
1656     }
1657     unlock_user(target_cmsg, target_cmsg_addr, 0);
1658  the_end:
1659     msgh->msg_controllen = space;
1660     return 0;
1661 }
1662 
1663 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1664                                            struct msghdr *msgh)
1665 {
1666     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1667     abi_long msg_controllen;
1668     abi_ulong target_cmsg_addr;
1669     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1670     socklen_t space = 0;
1671 
1672     msg_controllen = tswapal(target_msgh->msg_controllen);
1673     if (msg_controllen < sizeof (struct target_cmsghdr))
1674         goto the_end;
1675     target_cmsg_addr = tswapal(target_msgh->msg_control);
1676     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1677     target_cmsg_start = target_cmsg;
1678     if (!target_cmsg)
1679         return -TARGET_EFAULT;
1680 
1681     while (cmsg && target_cmsg) {
1682         void *data = CMSG_DATA(cmsg);
1683         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1684 
1685         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1686         int tgt_len, tgt_space;
1687 
1688         /* We never copy a half-header but may copy half-data;
1689          * this is Linux's behaviour in put_cmsg(). Note that
1690          * truncation here is a guest problem (which we report
1691          * to the guest via the CTRUNC bit), unlike truncation
1692          * in target_to_host_cmsg, which is a QEMU bug.
1693          */
1694         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1695             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1696             break;
1697         }
1698 
1699         if (cmsg->cmsg_level == SOL_SOCKET) {
1700             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1701         } else {
1702             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1703         }
1704         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1705 
1706         /* Payload types which need a different size of payload on
1707          * the target must adjust tgt_len here.
1708          */
1709         tgt_len = len;
1710         switch (cmsg->cmsg_level) {
1711         case SOL_SOCKET:
1712             switch (cmsg->cmsg_type) {
1713             case SO_TIMESTAMP:
1714                 tgt_len = sizeof(struct target_timeval);
1715                 break;
1716             default:
1717                 break;
1718             }
1719             break;
1720         default:
1721             break;
1722         }
1723 
1724         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1725             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1726             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1727         }
1728 
1729         /* We must now copy-and-convert len bytes of payload
1730          * into tgt_len bytes of destination space. Bear in mind
1731          * that in both source and destination we may be dealing
1732          * with a truncated value!
1733          */
1734         switch (cmsg->cmsg_level) {
1735         case SOL_SOCKET:
1736             switch (cmsg->cmsg_type) {
1737             case SCM_RIGHTS:
1738             {
1739                 int *fd = (int *)data;
1740                 int *target_fd = (int *)target_data;
1741                 int i, numfds = tgt_len / sizeof(int);
1742 
1743                 for (i = 0; i < numfds; i++) {
1744                     __put_user(fd[i], target_fd + i);
1745                 }
1746                 break;
1747             }
1748             case SO_TIMESTAMP:
1749             {
1750                 struct timeval *tv = (struct timeval *)data;
1751                 struct target_timeval *target_tv =
1752                     (struct target_timeval *)target_data;
1753 
1754                 if (len != sizeof(struct timeval) ||
1755                     tgt_len != sizeof(struct target_timeval)) {
1756                     goto unimplemented;
1757                 }
1758 
1759                 /* copy struct timeval to target */
1760                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1761                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1762                 break;
1763             }
1764             case SCM_CREDENTIALS:
1765             {
1766                 struct ucred *cred = (struct ucred *)data;
1767                 struct target_ucred *target_cred =
1768                     (struct target_ucred *)target_data;
1769 
1770                 __put_user(cred->pid, &target_cred->pid);
1771                 __put_user(cred->uid, &target_cred->uid);
1772                 __put_user(cred->gid, &target_cred->gid);
1773                 break;
1774             }
1775             default:
1776                 goto unimplemented;
1777             }
1778             break;
1779 
1780         case SOL_IP:
1781             switch (cmsg->cmsg_type) {
1782             case IP_TTL:
1783             {
1784                 uint32_t *v = (uint32_t *)data;
1785                 uint32_t *t_int = (uint32_t *)target_data;
1786 
1787                 if (len != sizeof(uint32_t) ||
1788                     tgt_len != sizeof(uint32_t)) {
1789                     goto unimplemented;
1790                 }
1791                 __put_user(*v, t_int);
1792                 break;
1793             }
1794             case IP_RECVERR:
1795             {
1796                 struct errhdr_t {
1797                    struct sock_extended_err ee;
1798                    struct sockaddr_in offender;
1799                 };
1800                 struct errhdr_t *errh = (struct errhdr_t *)data;
1801                 struct errhdr_t *target_errh =
1802                     (struct errhdr_t *)target_data;
1803 
1804                 if (len != sizeof(struct errhdr_t) ||
1805                     tgt_len != sizeof(struct errhdr_t)) {
1806                     goto unimplemented;
1807                 }
1808                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1809                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1810                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1811                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1812                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1813                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1814                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1815                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1816                     (void *) &errh->offender, sizeof(errh->offender));
1817                 break;
1818             }
1819             default:
1820                 goto unimplemented;
1821             }
1822             break;
1823 
1824         case SOL_IPV6:
1825             switch (cmsg->cmsg_type) {
1826             case IPV6_HOPLIMIT:
1827             {
1828                 uint32_t *v = (uint32_t *)data;
1829                 uint32_t *t_int = (uint32_t *)target_data;
1830 
1831                 if (len != sizeof(uint32_t) ||
1832                     tgt_len != sizeof(uint32_t)) {
1833                     goto unimplemented;
1834                 }
1835                 __put_user(*v, t_int);
1836                 break;
1837             }
1838             case IPV6_RECVERR:
1839             {
1840                 struct errhdr6_t {
1841                    struct sock_extended_err ee;
1842                    struct sockaddr_in6 offender;
1843                 };
1844                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1845                 struct errhdr6_t *target_errh =
1846                     (struct errhdr6_t *)target_data;
1847 
1848                 if (len != sizeof(struct errhdr6_t) ||
1849                     tgt_len != sizeof(struct errhdr6_t)) {
1850                     goto unimplemented;
1851                 }
1852                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1853                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1854                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1855                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1856                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1857                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1858                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1859                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1860                     (void *) &errh->offender, sizeof(errh->offender));
1861                 break;
1862             }
1863             default:
1864                 goto unimplemented;
1865             }
1866             break;
1867 
1868         default:
1869         unimplemented:
1870             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1871                           cmsg->cmsg_level, cmsg->cmsg_type);
1872             memcpy(target_data, data, MIN(len, tgt_len));
1873             if (tgt_len > len) {
1874                 memset(target_data + len, 0, tgt_len - len);
1875             }
1876         }
1877 
1878         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1879         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1880         if (msg_controllen < tgt_space) {
1881             tgt_space = msg_controllen;
1882         }
1883         msg_controllen -= tgt_space;
1884         space += tgt_space;
1885         cmsg = CMSG_NXTHDR(msgh, cmsg);
1886         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1887                                          target_cmsg_start);
1888     }
1889     unlock_user(target_cmsg, target_cmsg_addr, space);
1890  the_end:
1891     target_msgh->msg_controllen = tswapal(space);
1892     return 0;
1893 }
1894 
1895 /* do_setsockopt() Must return target values and target errnos. */
1896 static abi_long do_setsockopt(int sockfd, int level, int optname,
1897                               abi_ulong optval_addr, socklen_t optlen)
1898 {
1899     abi_long ret;
1900     int val;
1901     struct ip_mreqn *ip_mreq;
1902     struct ip_mreq_source *ip_mreq_source;
1903 
1904     switch(level) {
1905     case SOL_TCP:
1906         /* TCP options all take an 'int' value.  */
1907         if (optlen < sizeof(uint32_t))
1908             return -TARGET_EINVAL;
1909 
1910         if (get_user_u32(val, optval_addr))
1911             return -TARGET_EFAULT;
1912         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1913         break;
1914     case SOL_IP:
1915         switch(optname) {
1916         case IP_TOS:
1917         case IP_TTL:
1918         case IP_HDRINCL:
1919         case IP_ROUTER_ALERT:
1920         case IP_RECVOPTS:
1921         case IP_RETOPTS:
1922         case IP_PKTINFO:
1923         case IP_MTU_DISCOVER:
1924         case IP_RECVERR:
1925         case IP_RECVTTL:
1926         case IP_RECVTOS:
1927 #ifdef IP_FREEBIND
1928         case IP_FREEBIND:
1929 #endif
1930         case IP_MULTICAST_TTL:
1931         case IP_MULTICAST_LOOP:
1932             val = 0;
1933             if (optlen >= sizeof(uint32_t)) {
1934                 if (get_user_u32(val, optval_addr))
1935                     return -TARGET_EFAULT;
1936             } else if (optlen >= 1) {
1937                 if (get_user_u8(val, optval_addr))
1938                     return -TARGET_EFAULT;
1939             }
1940             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1941             break;
1942         case IP_ADD_MEMBERSHIP:
1943         case IP_DROP_MEMBERSHIP:
1944             if (optlen < sizeof (struct target_ip_mreq) ||
1945                 optlen > sizeof (struct target_ip_mreqn))
1946                 return -TARGET_EINVAL;
1947 
1948             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1949             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1950             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1951             break;
1952 
1953         case IP_BLOCK_SOURCE:
1954         case IP_UNBLOCK_SOURCE:
1955         case IP_ADD_SOURCE_MEMBERSHIP:
1956         case IP_DROP_SOURCE_MEMBERSHIP:
1957             if (optlen != sizeof (struct target_ip_mreq_source))
1958                 return -TARGET_EINVAL;
1959 
1960             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1961             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1962             unlock_user (ip_mreq_source, optval_addr, 0);
1963             break;
1964 
1965         default:
1966             goto unimplemented;
1967         }
1968         break;
1969     case SOL_IPV6:
1970         switch (optname) {
1971         case IPV6_MTU_DISCOVER:
1972         case IPV6_MTU:
1973         case IPV6_V6ONLY:
1974         case IPV6_RECVPKTINFO:
1975         case IPV6_UNICAST_HOPS:
1976         case IPV6_MULTICAST_HOPS:
1977         case IPV6_MULTICAST_LOOP:
1978         case IPV6_RECVERR:
1979         case IPV6_RECVHOPLIMIT:
1980         case IPV6_2292HOPLIMIT:
1981         case IPV6_CHECKSUM:
1982         case IPV6_ADDRFORM:
1983         case IPV6_2292PKTINFO:
1984         case IPV6_RECVTCLASS:
1985         case IPV6_RECVRTHDR:
1986         case IPV6_2292RTHDR:
1987         case IPV6_RECVHOPOPTS:
1988         case IPV6_2292HOPOPTS:
1989         case IPV6_RECVDSTOPTS:
1990         case IPV6_2292DSTOPTS:
1991         case IPV6_TCLASS:
1992 #ifdef IPV6_RECVPATHMTU
1993         case IPV6_RECVPATHMTU:
1994 #endif
1995 #ifdef IPV6_TRANSPARENT
1996         case IPV6_TRANSPARENT:
1997 #endif
1998 #ifdef IPV6_FREEBIND
1999         case IPV6_FREEBIND:
2000 #endif
2001 #ifdef IPV6_RECVORIGDSTADDR
2002         case IPV6_RECVORIGDSTADDR:
2003 #endif
2004             val = 0;
2005             if (optlen < sizeof(uint32_t)) {
2006                 return -TARGET_EINVAL;
2007             }
2008             if (get_user_u32(val, optval_addr)) {
2009                 return -TARGET_EFAULT;
2010             }
2011             ret = get_errno(setsockopt(sockfd, level, optname,
2012                                        &val, sizeof(val)));
2013             break;
2014         case IPV6_PKTINFO:
2015         {
2016             struct in6_pktinfo pki;
2017 
2018             if (optlen < sizeof(pki)) {
2019                 return -TARGET_EINVAL;
2020             }
2021 
2022             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2023                 return -TARGET_EFAULT;
2024             }
2025 
2026             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2027 
2028             ret = get_errno(setsockopt(sockfd, level, optname,
2029                                        &pki, sizeof(pki)));
2030             break;
2031         }
2032         case IPV6_ADD_MEMBERSHIP:
2033         case IPV6_DROP_MEMBERSHIP:
2034         {
2035             struct ipv6_mreq ipv6mreq;
2036 
2037             if (optlen < sizeof(ipv6mreq)) {
2038                 return -TARGET_EINVAL;
2039             }
2040 
2041             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2042                 return -TARGET_EFAULT;
2043             }
2044 
2045             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2046 
2047             ret = get_errno(setsockopt(sockfd, level, optname,
2048                                        &ipv6mreq, sizeof(ipv6mreq)));
2049             break;
2050         }
2051         default:
2052             goto unimplemented;
2053         }
2054         break;
2055     case SOL_ICMPV6:
2056         switch (optname) {
2057         case ICMPV6_FILTER:
2058         {
2059             struct icmp6_filter icmp6f;
2060 
2061             if (optlen > sizeof(icmp6f)) {
2062                 optlen = sizeof(icmp6f);
2063             }
2064 
2065             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2066                 return -TARGET_EFAULT;
2067             }
2068 
2069             for (val = 0; val < 8; val++) {
2070                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2071             }
2072 
2073             ret = get_errno(setsockopt(sockfd, level, optname,
2074                                        &icmp6f, optlen));
2075             break;
2076         }
2077         default:
2078             goto unimplemented;
2079         }
2080         break;
2081     case SOL_RAW:
2082         switch (optname) {
2083         case ICMP_FILTER:
2084         case IPV6_CHECKSUM:
2085             /* those take an u32 value */
2086             if (optlen < sizeof(uint32_t)) {
2087                 return -TARGET_EINVAL;
2088             }
2089 
2090             if (get_user_u32(val, optval_addr)) {
2091                 return -TARGET_EFAULT;
2092             }
2093             ret = get_errno(setsockopt(sockfd, level, optname,
2094                                        &val, sizeof(val)));
2095             break;
2096 
2097         default:
2098             goto unimplemented;
2099         }
2100         break;
2101 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2102     case SOL_ALG:
2103         switch (optname) {
2104         case ALG_SET_KEY:
2105         {
2106             char *alg_key = g_malloc(optlen);
2107 
2108             if (!alg_key) {
2109                 return -TARGET_ENOMEM;
2110             }
2111             if (copy_from_user(alg_key, optval_addr, optlen)) {
2112                 g_free(alg_key);
2113                 return -TARGET_EFAULT;
2114             }
2115             ret = get_errno(setsockopt(sockfd, level, optname,
2116                                        alg_key, optlen));
2117             g_free(alg_key);
2118             break;
2119         }
2120         case ALG_SET_AEAD_AUTHSIZE:
2121         {
2122             ret = get_errno(setsockopt(sockfd, level, optname,
2123                                        NULL, optlen));
2124             break;
2125         }
2126         default:
2127             goto unimplemented;
2128         }
2129         break;
2130 #endif
2131     case TARGET_SOL_SOCKET:
2132         switch (optname) {
2133         case TARGET_SO_RCVTIMEO:
2134         {
2135                 struct timeval tv;
2136 
2137                 optname = SO_RCVTIMEO;
2138 
2139 set_timeout:
2140                 if (optlen != sizeof(struct target_timeval)) {
2141                     return -TARGET_EINVAL;
2142                 }
2143 
2144                 if (copy_from_user_timeval(&tv, optval_addr)) {
2145                     return -TARGET_EFAULT;
2146                 }
2147 
2148                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2149                                 &tv, sizeof(tv)));
2150                 return ret;
2151         }
2152         case TARGET_SO_SNDTIMEO:
2153                 optname = SO_SNDTIMEO;
2154                 goto set_timeout;
2155         case TARGET_SO_ATTACH_FILTER:
2156         {
2157                 struct target_sock_fprog *tfprog;
2158                 struct target_sock_filter *tfilter;
2159                 struct sock_fprog fprog;
2160                 struct sock_filter *filter;
2161                 int i;
2162 
2163                 if (optlen != sizeof(*tfprog)) {
2164                     return -TARGET_EINVAL;
2165                 }
2166                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2167                     return -TARGET_EFAULT;
2168                 }
2169                 if (!lock_user_struct(VERIFY_READ, tfilter,
2170                                       tswapal(tfprog->filter), 0)) {
2171                     unlock_user_struct(tfprog, optval_addr, 1);
2172                     return -TARGET_EFAULT;
2173                 }
2174 
2175                 fprog.len = tswap16(tfprog->len);
2176                 filter = g_try_new(struct sock_filter, fprog.len);
2177                 if (filter == NULL) {
2178                     unlock_user_struct(tfilter, tfprog->filter, 1);
2179                     unlock_user_struct(tfprog, optval_addr, 1);
2180                     return -TARGET_ENOMEM;
2181                 }
2182                 for (i = 0; i < fprog.len; i++) {
2183                     filter[i].code = tswap16(tfilter[i].code);
2184                     filter[i].jt = tfilter[i].jt;
2185                     filter[i].jf = tfilter[i].jf;
2186                     filter[i].k = tswap32(tfilter[i].k);
2187                 }
2188                 fprog.filter = filter;
2189 
2190                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2191                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2192                 g_free(filter);
2193 
2194                 unlock_user_struct(tfilter, tfprog->filter, 1);
2195                 unlock_user_struct(tfprog, optval_addr, 1);
2196                 return ret;
2197         }
2198 	case TARGET_SO_BINDTODEVICE:
2199 	{
2200 		char *dev_ifname, *addr_ifname;
2201 
2202 		if (optlen > IFNAMSIZ - 1) {
2203 		    optlen = IFNAMSIZ - 1;
2204 		}
2205 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2206 		if (!dev_ifname) {
2207 		    return -TARGET_EFAULT;
2208 		}
2209 		optname = SO_BINDTODEVICE;
2210 		addr_ifname = alloca(IFNAMSIZ);
2211 		memcpy(addr_ifname, dev_ifname, optlen);
2212 		addr_ifname[optlen] = 0;
2213 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2214                                            addr_ifname, optlen));
2215 		unlock_user (dev_ifname, optval_addr, 0);
2216 		return ret;
2217 	}
2218         case TARGET_SO_LINGER:
2219         {
2220                 struct linger lg;
2221                 struct target_linger *tlg;
2222 
2223                 if (optlen != sizeof(struct target_linger)) {
2224                     return -TARGET_EINVAL;
2225                 }
2226                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2227                     return -TARGET_EFAULT;
2228                 }
2229                 __get_user(lg.l_onoff, &tlg->l_onoff);
2230                 __get_user(lg.l_linger, &tlg->l_linger);
2231                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2232                                 &lg, sizeof(lg)));
2233                 unlock_user_struct(tlg, optval_addr, 0);
2234                 return ret;
2235         }
2236             /* Options with 'int' argument.  */
2237         case TARGET_SO_DEBUG:
2238 		optname = SO_DEBUG;
2239 		break;
2240         case TARGET_SO_REUSEADDR:
2241 		optname = SO_REUSEADDR;
2242 		break;
2243 #ifdef SO_REUSEPORT
2244         case TARGET_SO_REUSEPORT:
2245                 optname = SO_REUSEPORT;
2246                 break;
2247 #endif
2248         case TARGET_SO_TYPE:
2249 		optname = SO_TYPE;
2250 		break;
2251         case TARGET_SO_ERROR:
2252 		optname = SO_ERROR;
2253 		break;
2254         case TARGET_SO_DONTROUTE:
2255 		optname = SO_DONTROUTE;
2256 		break;
2257         case TARGET_SO_BROADCAST:
2258 		optname = SO_BROADCAST;
2259 		break;
2260         case TARGET_SO_SNDBUF:
2261 		optname = SO_SNDBUF;
2262 		break;
2263         case TARGET_SO_SNDBUFFORCE:
2264                 optname = SO_SNDBUFFORCE;
2265                 break;
2266         case TARGET_SO_RCVBUF:
2267 		optname = SO_RCVBUF;
2268 		break;
2269         case TARGET_SO_RCVBUFFORCE:
2270                 optname = SO_RCVBUFFORCE;
2271                 break;
2272         case TARGET_SO_KEEPALIVE:
2273 		optname = SO_KEEPALIVE;
2274 		break;
2275         case TARGET_SO_OOBINLINE:
2276 		optname = SO_OOBINLINE;
2277 		break;
2278         case TARGET_SO_NO_CHECK:
2279 		optname = SO_NO_CHECK;
2280 		break;
2281         case TARGET_SO_PRIORITY:
2282 		optname = SO_PRIORITY;
2283 		break;
2284 #ifdef SO_BSDCOMPAT
2285         case TARGET_SO_BSDCOMPAT:
2286 		optname = SO_BSDCOMPAT;
2287 		break;
2288 #endif
2289         case TARGET_SO_PASSCRED:
2290 		optname = SO_PASSCRED;
2291 		break;
2292         case TARGET_SO_PASSSEC:
2293                 optname = SO_PASSSEC;
2294                 break;
2295         case TARGET_SO_TIMESTAMP:
2296 		optname = SO_TIMESTAMP;
2297 		break;
2298         case TARGET_SO_RCVLOWAT:
2299 		optname = SO_RCVLOWAT;
2300 		break;
2301         default:
2302             goto unimplemented;
2303         }
2304 	if (optlen < sizeof(uint32_t))
2305             return -TARGET_EINVAL;
2306 
2307 	if (get_user_u32(val, optval_addr))
2308             return -TARGET_EFAULT;
2309 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2310         break;
2311 #ifdef SOL_NETLINK
2312     case SOL_NETLINK:
2313         switch (optname) {
2314         case NETLINK_PKTINFO:
2315         case NETLINK_ADD_MEMBERSHIP:
2316         case NETLINK_DROP_MEMBERSHIP:
2317         case NETLINK_BROADCAST_ERROR:
2318         case NETLINK_NO_ENOBUFS:
2319 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2320         case NETLINK_LISTEN_ALL_NSID:
2321         case NETLINK_CAP_ACK:
2322 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2323 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2324         case NETLINK_EXT_ACK:
2325 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2326 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2327         case NETLINK_GET_STRICT_CHK:
2328 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2329             break;
2330         default:
2331             goto unimplemented;
2332         }
2333         val = 0;
2334         if (optlen < sizeof(uint32_t)) {
2335             return -TARGET_EINVAL;
2336         }
2337         if (get_user_u32(val, optval_addr)) {
2338             return -TARGET_EFAULT;
2339         }
2340         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2341                                    sizeof(val)));
2342         break;
2343 #endif /* SOL_NETLINK */
2344     default:
2345     unimplemented:
2346         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2347                       level, optname);
2348         ret = -TARGET_ENOPROTOOPT;
2349     }
2350     return ret;
2351 }
2352 
2353 /* do_getsockopt() Must return target values and target errnos. */
2354 static abi_long do_getsockopt(int sockfd, int level, int optname,
2355                               abi_ulong optval_addr, abi_ulong optlen)
2356 {
2357     abi_long ret;
2358     int len, val;
2359     socklen_t lv;
2360 
2361     switch(level) {
2362     case TARGET_SOL_SOCKET:
2363         level = SOL_SOCKET;
2364         switch (optname) {
2365         /* These don't just return a single integer */
2366         case TARGET_SO_PEERNAME:
2367             goto unimplemented;
2368         case TARGET_SO_RCVTIMEO: {
2369             struct timeval tv;
2370             socklen_t tvlen;
2371 
2372             optname = SO_RCVTIMEO;
2373 
2374 get_timeout:
2375             if (get_user_u32(len, optlen)) {
2376                 return -TARGET_EFAULT;
2377             }
2378             if (len < 0) {
2379                 return -TARGET_EINVAL;
2380             }
2381 
2382             tvlen = sizeof(tv);
2383             ret = get_errno(getsockopt(sockfd, level, optname,
2384                                        &tv, &tvlen));
2385             if (ret < 0) {
2386                 return ret;
2387             }
2388             if (len > sizeof(struct target_timeval)) {
2389                 len = sizeof(struct target_timeval);
2390             }
2391             if (copy_to_user_timeval(optval_addr, &tv)) {
2392                 return -TARGET_EFAULT;
2393             }
2394             if (put_user_u32(len, optlen)) {
2395                 return -TARGET_EFAULT;
2396             }
2397             break;
2398         }
2399         case TARGET_SO_SNDTIMEO:
2400             optname = SO_SNDTIMEO;
2401             goto get_timeout;
2402         case TARGET_SO_PEERCRED: {
2403             struct ucred cr;
2404             socklen_t crlen;
2405             struct target_ucred *tcr;
2406 
2407             if (get_user_u32(len, optlen)) {
2408                 return -TARGET_EFAULT;
2409             }
2410             if (len < 0) {
2411                 return -TARGET_EINVAL;
2412             }
2413 
2414             crlen = sizeof(cr);
2415             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2416                                        &cr, &crlen));
2417             if (ret < 0) {
2418                 return ret;
2419             }
2420             if (len > crlen) {
2421                 len = crlen;
2422             }
2423             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2424                 return -TARGET_EFAULT;
2425             }
2426             __put_user(cr.pid, &tcr->pid);
2427             __put_user(cr.uid, &tcr->uid);
2428             __put_user(cr.gid, &tcr->gid);
2429             unlock_user_struct(tcr, optval_addr, 1);
2430             if (put_user_u32(len, optlen)) {
2431                 return -TARGET_EFAULT;
2432             }
2433             break;
2434         }
2435         case TARGET_SO_PEERSEC: {
2436             char *name;
2437 
2438             if (get_user_u32(len, optlen)) {
2439                 return -TARGET_EFAULT;
2440             }
2441             if (len < 0) {
2442                 return -TARGET_EINVAL;
2443             }
2444             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2445             if (!name) {
2446                 return -TARGET_EFAULT;
2447             }
2448             lv = len;
2449             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2450                                        name, &lv));
2451             if (put_user_u32(lv, optlen)) {
2452                 ret = -TARGET_EFAULT;
2453             }
2454             unlock_user(name, optval_addr, lv);
2455             break;
2456         }
2457         case TARGET_SO_LINGER:
2458         {
2459             struct linger lg;
2460             socklen_t lglen;
2461             struct target_linger *tlg;
2462 
2463             if (get_user_u32(len, optlen)) {
2464                 return -TARGET_EFAULT;
2465             }
2466             if (len < 0) {
2467                 return -TARGET_EINVAL;
2468             }
2469 
2470             lglen = sizeof(lg);
2471             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2472                                        &lg, &lglen));
2473             if (ret < 0) {
2474                 return ret;
2475             }
2476             if (len > lglen) {
2477                 len = lglen;
2478             }
2479             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2480                 return -TARGET_EFAULT;
2481             }
2482             __put_user(lg.l_onoff, &tlg->l_onoff);
2483             __put_user(lg.l_linger, &tlg->l_linger);
2484             unlock_user_struct(tlg, optval_addr, 1);
2485             if (put_user_u32(len, optlen)) {
2486                 return -TARGET_EFAULT;
2487             }
2488             break;
2489         }
2490         /* Options with 'int' argument.  */
2491         case TARGET_SO_DEBUG:
2492             optname = SO_DEBUG;
2493             goto int_case;
2494         case TARGET_SO_REUSEADDR:
2495             optname = SO_REUSEADDR;
2496             goto int_case;
2497 #ifdef SO_REUSEPORT
2498         case TARGET_SO_REUSEPORT:
2499             optname = SO_REUSEPORT;
2500             goto int_case;
2501 #endif
2502         case TARGET_SO_TYPE:
2503             optname = SO_TYPE;
2504             goto int_case;
2505         case TARGET_SO_ERROR:
2506             optname = SO_ERROR;
2507             goto int_case;
2508         case TARGET_SO_DONTROUTE:
2509             optname = SO_DONTROUTE;
2510             goto int_case;
2511         case TARGET_SO_BROADCAST:
2512             optname = SO_BROADCAST;
2513             goto int_case;
2514         case TARGET_SO_SNDBUF:
2515             optname = SO_SNDBUF;
2516             goto int_case;
2517         case TARGET_SO_RCVBUF:
2518             optname = SO_RCVBUF;
2519             goto int_case;
2520         case TARGET_SO_KEEPALIVE:
2521             optname = SO_KEEPALIVE;
2522             goto int_case;
2523         case TARGET_SO_OOBINLINE:
2524             optname = SO_OOBINLINE;
2525             goto int_case;
2526         case TARGET_SO_NO_CHECK:
2527             optname = SO_NO_CHECK;
2528             goto int_case;
2529         case TARGET_SO_PRIORITY:
2530             optname = SO_PRIORITY;
2531             goto int_case;
2532 #ifdef SO_BSDCOMPAT
2533         case TARGET_SO_BSDCOMPAT:
2534             optname = SO_BSDCOMPAT;
2535             goto int_case;
2536 #endif
2537         case TARGET_SO_PASSCRED:
2538             optname = SO_PASSCRED;
2539             goto int_case;
2540         case TARGET_SO_TIMESTAMP:
2541             optname = SO_TIMESTAMP;
2542             goto int_case;
2543         case TARGET_SO_RCVLOWAT:
2544             optname = SO_RCVLOWAT;
2545             goto int_case;
2546         case TARGET_SO_ACCEPTCONN:
2547             optname = SO_ACCEPTCONN;
2548             goto int_case;
2549         default:
2550             goto int_case;
2551         }
2552         break;
2553     case SOL_TCP:
2554         /* TCP options all take an 'int' value.  */
2555     int_case:
2556         if (get_user_u32(len, optlen))
2557             return -TARGET_EFAULT;
2558         if (len < 0)
2559             return -TARGET_EINVAL;
2560         lv = sizeof(lv);
2561         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2562         if (ret < 0)
2563             return ret;
2564         if (optname == SO_TYPE) {
2565             val = host_to_target_sock_type(val);
2566         }
2567         if (len > lv)
2568             len = lv;
2569         if (len == 4) {
2570             if (put_user_u32(val, optval_addr))
2571                 return -TARGET_EFAULT;
2572         } else {
2573             if (put_user_u8(val, optval_addr))
2574                 return -TARGET_EFAULT;
2575         }
2576         if (put_user_u32(len, optlen))
2577             return -TARGET_EFAULT;
2578         break;
2579     case SOL_IP:
2580         switch(optname) {
2581         case IP_TOS:
2582         case IP_TTL:
2583         case IP_HDRINCL:
2584         case IP_ROUTER_ALERT:
2585         case IP_RECVOPTS:
2586         case IP_RETOPTS:
2587         case IP_PKTINFO:
2588         case IP_MTU_DISCOVER:
2589         case IP_RECVERR:
2590         case IP_RECVTOS:
2591 #ifdef IP_FREEBIND
2592         case IP_FREEBIND:
2593 #endif
2594         case IP_MULTICAST_TTL:
2595         case IP_MULTICAST_LOOP:
2596             if (get_user_u32(len, optlen))
2597                 return -TARGET_EFAULT;
2598             if (len < 0)
2599                 return -TARGET_EINVAL;
2600             lv = sizeof(lv);
2601             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2602             if (ret < 0)
2603                 return ret;
2604             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2605                 len = 1;
2606                 if (put_user_u32(len, optlen)
2607                     || put_user_u8(val, optval_addr))
2608                     return -TARGET_EFAULT;
2609             } else {
2610                 if (len > sizeof(int))
2611                     len = sizeof(int);
2612                 if (put_user_u32(len, optlen)
2613                     || put_user_u32(val, optval_addr))
2614                     return -TARGET_EFAULT;
2615             }
2616             break;
2617         default:
2618             ret = -TARGET_ENOPROTOOPT;
2619             break;
2620         }
2621         break;
2622     case SOL_IPV6:
2623         switch (optname) {
2624         case IPV6_MTU_DISCOVER:
2625         case IPV6_MTU:
2626         case IPV6_V6ONLY:
2627         case IPV6_RECVPKTINFO:
2628         case IPV6_UNICAST_HOPS:
2629         case IPV6_MULTICAST_HOPS:
2630         case IPV6_MULTICAST_LOOP:
2631         case IPV6_RECVERR:
2632         case IPV6_RECVHOPLIMIT:
2633         case IPV6_2292HOPLIMIT:
2634         case IPV6_CHECKSUM:
2635         case IPV6_ADDRFORM:
2636         case IPV6_2292PKTINFO:
2637         case IPV6_RECVTCLASS:
2638         case IPV6_RECVRTHDR:
2639         case IPV6_2292RTHDR:
2640         case IPV6_RECVHOPOPTS:
2641         case IPV6_2292HOPOPTS:
2642         case IPV6_RECVDSTOPTS:
2643         case IPV6_2292DSTOPTS:
2644         case IPV6_TCLASS:
2645 #ifdef IPV6_RECVPATHMTU
2646         case IPV6_RECVPATHMTU:
2647 #endif
2648 #ifdef IPV6_TRANSPARENT
2649         case IPV6_TRANSPARENT:
2650 #endif
2651 #ifdef IPV6_FREEBIND
2652         case IPV6_FREEBIND:
2653 #endif
2654 #ifdef IPV6_RECVORIGDSTADDR
2655         case IPV6_RECVORIGDSTADDR:
2656 #endif
2657             if (get_user_u32(len, optlen))
2658                 return -TARGET_EFAULT;
2659             if (len < 0)
2660                 return -TARGET_EINVAL;
2661             lv = sizeof(lv);
2662             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2663             if (ret < 0)
2664                 return ret;
2665             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2666                 len = 1;
2667                 if (put_user_u32(len, optlen)
2668                     || put_user_u8(val, optval_addr))
2669                     return -TARGET_EFAULT;
2670             } else {
2671                 if (len > sizeof(int))
2672                     len = sizeof(int);
2673                 if (put_user_u32(len, optlen)
2674                     || put_user_u32(val, optval_addr))
2675                     return -TARGET_EFAULT;
2676             }
2677             break;
2678         default:
2679             ret = -TARGET_ENOPROTOOPT;
2680             break;
2681         }
2682         break;
2683 #ifdef SOL_NETLINK
2684     case SOL_NETLINK:
2685         switch (optname) {
2686         case NETLINK_PKTINFO:
2687         case NETLINK_BROADCAST_ERROR:
2688         case NETLINK_NO_ENOBUFS:
2689 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2690         case NETLINK_LISTEN_ALL_NSID:
2691         case NETLINK_CAP_ACK:
2692 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2693 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2694         case NETLINK_EXT_ACK:
2695 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2696 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2697         case NETLINK_GET_STRICT_CHK:
2698 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2699             if (get_user_u32(len, optlen)) {
2700                 return -TARGET_EFAULT;
2701             }
2702             if (len != sizeof(val)) {
2703                 return -TARGET_EINVAL;
2704             }
2705             lv = len;
2706             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2707             if (ret < 0) {
2708                 return ret;
2709             }
2710             if (put_user_u32(lv, optlen)
2711                 || put_user_u32(val, optval_addr)) {
2712                 return -TARGET_EFAULT;
2713             }
2714             break;
2715 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2716         case NETLINK_LIST_MEMBERSHIPS:
2717         {
2718             uint32_t *results;
2719             int i;
2720             if (get_user_u32(len, optlen)) {
2721                 return -TARGET_EFAULT;
2722             }
2723             if (len < 0) {
2724                 return -TARGET_EINVAL;
2725             }
2726             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2727             if (!results) {
2728                 return -TARGET_EFAULT;
2729             }
2730             lv = len;
2731             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2732             if (ret < 0) {
2733                 unlock_user(results, optval_addr, 0);
2734                 return ret;
2735             }
2736             /* swap host endianess to target endianess. */
2737             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2738                 results[i] = tswap32(results[i]);
2739             }
2740             if (put_user_u32(lv, optlen)) {
2741                 return -TARGET_EFAULT;
2742             }
2743             unlock_user(results, optval_addr, 0);
2744             break;
2745         }
2746 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2747         default:
2748             goto unimplemented;
2749         }
2750         break;
2751 #endif /* SOL_NETLINK */
2752     default:
2753     unimplemented:
2754         qemu_log_mask(LOG_UNIMP,
2755                       "getsockopt level=%d optname=%d not yet supported\n",
2756                       level, optname);
2757         ret = -TARGET_EOPNOTSUPP;
2758         break;
2759     }
2760     return ret;
2761 }
2762 
2763 /* Convert target low/high pair representing file offset into the host
2764  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2765  * as the kernel doesn't handle them either.
2766  */
2767 static void target_to_host_low_high(abi_ulong tlow,
2768                                     abi_ulong thigh,
2769                                     unsigned long *hlow,
2770                                     unsigned long *hhigh)
2771 {
2772     uint64_t off = tlow |
2773         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2774         TARGET_LONG_BITS / 2;
2775 
2776     *hlow = off;
2777     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2778 }
2779 
2780 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2781                                 abi_ulong count, int copy)
2782 {
2783     struct target_iovec *target_vec;
2784     struct iovec *vec;
2785     abi_ulong total_len, max_len;
2786     int i;
2787     int err = 0;
2788     bool bad_address = false;
2789 
2790     if (count == 0) {
2791         errno = 0;
2792         return NULL;
2793     }
2794     if (count > IOV_MAX) {
2795         errno = EINVAL;
2796         return NULL;
2797     }
2798 
2799     vec = g_try_new0(struct iovec, count);
2800     if (vec == NULL) {
2801         errno = ENOMEM;
2802         return NULL;
2803     }
2804 
2805     target_vec = lock_user(VERIFY_READ, target_addr,
2806                            count * sizeof(struct target_iovec), 1);
2807     if (target_vec == NULL) {
2808         err = EFAULT;
2809         goto fail2;
2810     }
2811 
2812     /* ??? If host page size > target page size, this will result in a
2813        value larger than what we can actually support.  */
2814     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2815     total_len = 0;
2816 
2817     for (i = 0; i < count; i++) {
2818         abi_ulong base = tswapal(target_vec[i].iov_base);
2819         abi_long len = tswapal(target_vec[i].iov_len);
2820 
2821         if (len < 0) {
2822             err = EINVAL;
2823             goto fail;
2824         } else if (len == 0) {
2825             /* Zero length pointer is ignored.  */
2826             vec[i].iov_base = 0;
2827         } else {
2828             vec[i].iov_base = lock_user(type, base, len, copy);
2829             /* If the first buffer pointer is bad, this is a fault.  But
2830              * subsequent bad buffers will result in a partial write; this
2831              * is realized by filling the vector with null pointers and
2832              * zero lengths. */
2833             if (!vec[i].iov_base) {
2834                 if (i == 0) {
2835                     err = EFAULT;
2836                     goto fail;
2837                 } else {
2838                     bad_address = true;
2839                 }
2840             }
2841             if (bad_address) {
2842                 len = 0;
2843             }
2844             if (len > max_len - total_len) {
2845                 len = max_len - total_len;
2846             }
2847         }
2848         vec[i].iov_len = len;
2849         total_len += len;
2850     }
2851 
2852     unlock_user(target_vec, target_addr, 0);
2853     return vec;
2854 
2855  fail:
2856     while (--i >= 0) {
2857         if (tswapal(target_vec[i].iov_len) > 0) {
2858             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2859         }
2860     }
2861     unlock_user(target_vec, target_addr, 0);
2862  fail2:
2863     g_free(vec);
2864     errno = err;
2865     return NULL;
2866 }
2867 
2868 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2869                          abi_ulong count, int copy)
2870 {
2871     struct target_iovec *target_vec;
2872     int i;
2873 
2874     target_vec = lock_user(VERIFY_READ, target_addr,
2875                            count * sizeof(struct target_iovec), 1);
2876     if (target_vec) {
2877         for (i = 0; i < count; i++) {
2878             abi_ulong base = tswapal(target_vec[i].iov_base);
2879             abi_long len = tswapal(target_vec[i].iov_len);
2880             if (len < 0) {
2881                 break;
2882             }
2883             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2884         }
2885         unlock_user(target_vec, target_addr, 0);
2886     }
2887 
2888     g_free(vec);
2889 }
2890 
2891 static inline int target_to_host_sock_type(int *type)
2892 {
2893     int host_type = 0;
2894     int target_type = *type;
2895 
2896     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2897     case TARGET_SOCK_DGRAM:
2898         host_type = SOCK_DGRAM;
2899         break;
2900     case TARGET_SOCK_STREAM:
2901         host_type = SOCK_STREAM;
2902         break;
2903     default:
2904         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2905         break;
2906     }
2907     if (target_type & TARGET_SOCK_CLOEXEC) {
2908 #if defined(SOCK_CLOEXEC)
2909         host_type |= SOCK_CLOEXEC;
2910 #else
2911         return -TARGET_EINVAL;
2912 #endif
2913     }
2914     if (target_type & TARGET_SOCK_NONBLOCK) {
2915 #if defined(SOCK_NONBLOCK)
2916         host_type |= SOCK_NONBLOCK;
2917 #elif !defined(O_NONBLOCK)
2918         return -TARGET_EINVAL;
2919 #endif
2920     }
2921     *type = host_type;
2922     return 0;
2923 }
2924 
2925 /* Try to emulate socket type flags after socket creation.  */
2926 static int sock_flags_fixup(int fd, int target_type)
2927 {
2928 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2929     if (target_type & TARGET_SOCK_NONBLOCK) {
2930         int flags = fcntl(fd, F_GETFL);
2931         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2932             close(fd);
2933             return -TARGET_EINVAL;
2934         }
2935     }
2936 #endif
2937     return fd;
2938 }
2939 
2940 /* do_socket() Must return target values and target errnos. */
2941 static abi_long do_socket(int domain, int type, int protocol)
2942 {
2943     int target_type = type;
2944     int ret;
2945 
2946     ret = target_to_host_sock_type(&type);
2947     if (ret) {
2948         return ret;
2949     }
2950 
2951     if (domain == PF_NETLINK && !(
2952 #ifdef CONFIG_RTNETLINK
2953          protocol == NETLINK_ROUTE ||
2954 #endif
2955          protocol == NETLINK_KOBJECT_UEVENT ||
2956          protocol == NETLINK_AUDIT)) {
2957         return -EPFNOSUPPORT;
2958     }
2959 
2960     if (domain == AF_PACKET ||
2961         (domain == AF_INET && type == SOCK_PACKET)) {
2962         protocol = tswap16(protocol);
2963     }
2964 
2965     ret = get_errno(socket(domain, type, protocol));
2966     if (ret >= 0) {
2967         ret = sock_flags_fixup(ret, target_type);
2968         if (type == SOCK_PACKET) {
2969             /* Manage an obsolete case :
2970              * if socket type is SOCK_PACKET, bind by name
2971              */
2972             fd_trans_register(ret, &target_packet_trans);
2973         } else if (domain == PF_NETLINK) {
2974             switch (protocol) {
2975 #ifdef CONFIG_RTNETLINK
2976             case NETLINK_ROUTE:
2977                 fd_trans_register(ret, &target_netlink_route_trans);
2978                 break;
2979 #endif
2980             case NETLINK_KOBJECT_UEVENT:
2981                 /* nothing to do: messages are strings */
2982                 break;
2983             case NETLINK_AUDIT:
2984                 fd_trans_register(ret, &target_netlink_audit_trans);
2985                 break;
2986             default:
2987                 g_assert_not_reached();
2988             }
2989         }
2990     }
2991     return ret;
2992 }
2993 
2994 /* do_bind() Must return target values and target errnos. */
2995 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2996                         socklen_t addrlen)
2997 {
2998     void *addr;
2999     abi_long ret;
3000 
3001     if ((int)addrlen < 0) {
3002         return -TARGET_EINVAL;
3003     }
3004 
3005     addr = alloca(addrlen+1);
3006 
3007     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3008     if (ret)
3009         return ret;
3010 
3011     return get_errno(bind(sockfd, addr, addrlen));
3012 }
3013 
3014 /* do_connect() Must return target values and target errnos. */
3015 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3016                            socklen_t addrlen)
3017 {
3018     void *addr;
3019     abi_long ret;
3020 
3021     if ((int)addrlen < 0) {
3022         return -TARGET_EINVAL;
3023     }
3024 
3025     addr = alloca(addrlen+1);
3026 
3027     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3028     if (ret)
3029         return ret;
3030 
3031     return get_errno(safe_connect(sockfd, addr, addrlen));
3032 }
3033 
3034 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3035 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3036                                       int flags, int send)
3037 {
3038     abi_long ret, len;
3039     struct msghdr msg;
3040     abi_ulong count;
3041     struct iovec *vec;
3042     abi_ulong target_vec;
3043 
3044     if (msgp->msg_name) {
3045         msg.msg_namelen = tswap32(msgp->msg_namelen);
3046         msg.msg_name = alloca(msg.msg_namelen+1);
3047         ret = target_to_host_sockaddr(fd, msg.msg_name,
3048                                       tswapal(msgp->msg_name),
3049                                       msg.msg_namelen);
3050         if (ret == -TARGET_EFAULT) {
3051             /* For connected sockets msg_name and msg_namelen must
3052              * be ignored, so returning EFAULT immediately is wrong.
3053              * Instead, pass a bad msg_name to the host kernel, and
3054              * let it decide whether to return EFAULT or not.
3055              */
3056             msg.msg_name = (void *)-1;
3057         } else if (ret) {
3058             goto out2;
3059         }
3060     } else {
3061         msg.msg_name = NULL;
3062         msg.msg_namelen = 0;
3063     }
3064     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3065     msg.msg_control = alloca(msg.msg_controllen);
3066     memset(msg.msg_control, 0, msg.msg_controllen);
3067 
3068     msg.msg_flags = tswap32(msgp->msg_flags);
3069 
3070     count = tswapal(msgp->msg_iovlen);
3071     target_vec = tswapal(msgp->msg_iov);
3072 
3073     if (count > IOV_MAX) {
3074         /* sendrcvmsg returns a different errno for this condition than
3075          * readv/writev, so we must catch it here before lock_iovec() does.
3076          */
3077         ret = -TARGET_EMSGSIZE;
3078         goto out2;
3079     }
3080 
3081     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3082                      target_vec, count, send);
3083     if (vec == NULL) {
3084         ret = -host_to_target_errno(errno);
3085         goto out2;
3086     }
3087     msg.msg_iovlen = count;
3088     msg.msg_iov = vec;
3089 
3090     if (send) {
3091         if (fd_trans_target_to_host_data(fd)) {
3092             void *host_msg;
3093 
3094             host_msg = g_malloc(msg.msg_iov->iov_len);
3095             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3096             ret = fd_trans_target_to_host_data(fd)(host_msg,
3097                                                    msg.msg_iov->iov_len);
3098             if (ret >= 0) {
3099                 msg.msg_iov->iov_base = host_msg;
3100                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3101             }
3102             g_free(host_msg);
3103         } else {
3104             ret = target_to_host_cmsg(&msg, msgp);
3105             if (ret == 0) {
3106                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3107             }
3108         }
3109     } else {
3110         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3111         if (!is_error(ret)) {
3112             len = ret;
3113             if (fd_trans_host_to_target_data(fd)) {
3114                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3115                                                MIN(msg.msg_iov->iov_len, len));
3116             } else {
3117                 ret = host_to_target_cmsg(msgp, &msg);
3118             }
3119             if (!is_error(ret)) {
3120                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3121                 msgp->msg_flags = tswap32(msg.msg_flags);
3122                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3123                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3124                                     msg.msg_name, msg.msg_namelen);
3125                     if (ret) {
3126                         goto out;
3127                     }
3128                 }
3129 
3130                 ret = len;
3131             }
3132         }
3133     }
3134 
3135 out:
3136     unlock_iovec(vec, target_vec, count, !send);
3137 out2:
3138     return ret;
3139 }
3140 
3141 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3142                                int flags, int send)
3143 {
3144     abi_long ret;
3145     struct target_msghdr *msgp;
3146 
3147     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3148                           msgp,
3149                           target_msg,
3150                           send ? 1 : 0)) {
3151         return -TARGET_EFAULT;
3152     }
3153     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3154     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3155     return ret;
3156 }
3157 
3158 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3159  * so it might not have this *mmsg-specific flag either.
3160  */
3161 #ifndef MSG_WAITFORONE
3162 #define MSG_WAITFORONE 0x10000
3163 #endif
3164 
3165 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3166                                 unsigned int vlen, unsigned int flags,
3167                                 int send)
3168 {
3169     struct target_mmsghdr *mmsgp;
3170     abi_long ret = 0;
3171     int i;
3172 
3173     if (vlen > UIO_MAXIOV) {
3174         vlen = UIO_MAXIOV;
3175     }
3176 
3177     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3178     if (!mmsgp) {
3179         return -TARGET_EFAULT;
3180     }
3181 
3182     for (i = 0; i < vlen; i++) {
3183         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3184         if (is_error(ret)) {
3185             break;
3186         }
3187         mmsgp[i].msg_len = tswap32(ret);
3188         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3189         if (flags & MSG_WAITFORONE) {
3190             flags |= MSG_DONTWAIT;
3191         }
3192     }
3193 
3194     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3195 
3196     /* Return number of datagrams sent if we sent any at all;
3197      * otherwise return the error.
3198      */
3199     if (i) {
3200         return i;
3201     }
3202     return ret;
3203 }
3204 
3205 /* do_accept4() Must return target values and target errnos. */
3206 static abi_long do_accept4(int fd, abi_ulong target_addr,
3207                            abi_ulong target_addrlen_addr, int flags)
3208 {
3209     socklen_t addrlen, ret_addrlen;
3210     void *addr;
3211     abi_long ret;
3212     int host_flags;
3213 
3214     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3215 
3216     if (target_addr == 0) {
3217         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3218     }
3219 
3220     /* linux returns EINVAL if addrlen pointer is invalid */
3221     if (get_user_u32(addrlen, target_addrlen_addr))
3222         return -TARGET_EINVAL;
3223 
3224     if ((int)addrlen < 0) {
3225         return -TARGET_EINVAL;
3226     }
3227 
3228     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3229         return -TARGET_EINVAL;
3230 
3231     addr = alloca(addrlen);
3232 
3233     ret_addrlen = addrlen;
3234     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3235     if (!is_error(ret)) {
3236         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3237         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3238             ret = -TARGET_EFAULT;
3239         }
3240     }
3241     return ret;
3242 }
3243 
3244 /* do_getpeername() Must return target values and target errnos. */
3245 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3246                                abi_ulong target_addrlen_addr)
3247 {
3248     socklen_t addrlen, ret_addrlen;
3249     void *addr;
3250     abi_long ret;
3251 
3252     if (get_user_u32(addrlen, target_addrlen_addr))
3253         return -TARGET_EFAULT;
3254 
3255     if ((int)addrlen < 0) {
3256         return -TARGET_EINVAL;
3257     }
3258 
3259     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3260         return -TARGET_EFAULT;
3261 
3262     addr = alloca(addrlen);
3263 
3264     ret_addrlen = addrlen;
3265     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3266     if (!is_error(ret)) {
3267         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3268         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3269             ret = -TARGET_EFAULT;
3270         }
3271     }
3272     return ret;
3273 }
3274 
3275 /* do_getsockname() Must return target values and target errnos. */
3276 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3277                                abi_ulong target_addrlen_addr)
3278 {
3279     socklen_t addrlen, ret_addrlen;
3280     void *addr;
3281     abi_long ret;
3282 
3283     if (get_user_u32(addrlen, target_addrlen_addr))
3284         return -TARGET_EFAULT;
3285 
3286     if ((int)addrlen < 0) {
3287         return -TARGET_EINVAL;
3288     }
3289 
3290     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3291         return -TARGET_EFAULT;
3292 
3293     addr = alloca(addrlen);
3294 
3295     ret_addrlen = addrlen;
3296     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3297     if (!is_error(ret)) {
3298         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3299         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3300             ret = -TARGET_EFAULT;
3301         }
3302     }
3303     return ret;
3304 }
3305 
3306 /* do_socketpair() Must return target values and target errnos. */
3307 static abi_long do_socketpair(int domain, int type, int protocol,
3308                               abi_ulong target_tab_addr)
3309 {
3310     int tab[2];
3311     abi_long ret;
3312 
3313     target_to_host_sock_type(&type);
3314 
3315     ret = get_errno(socketpair(domain, type, protocol, tab));
3316     if (!is_error(ret)) {
3317         if (put_user_s32(tab[0], target_tab_addr)
3318             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3319             ret = -TARGET_EFAULT;
3320     }
3321     return ret;
3322 }
3323 
3324 /* do_sendto() Must return target values and target errnos. */
3325 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3326                           abi_ulong target_addr, socklen_t addrlen)
3327 {
3328     void *addr;
3329     void *host_msg;
3330     void *copy_msg = NULL;
3331     abi_long ret;
3332 
3333     if ((int)addrlen < 0) {
3334         return -TARGET_EINVAL;
3335     }
3336 
3337     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3338     if (!host_msg)
3339         return -TARGET_EFAULT;
3340     if (fd_trans_target_to_host_data(fd)) {
3341         copy_msg = host_msg;
3342         host_msg = g_malloc(len);
3343         memcpy(host_msg, copy_msg, len);
3344         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3345         if (ret < 0) {
3346             goto fail;
3347         }
3348     }
3349     if (target_addr) {
3350         addr = alloca(addrlen+1);
3351         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3352         if (ret) {
3353             goto fail;
3354         }
3355         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3356     } else {
3357         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3358     }
3359 fail:
3360     if (copy_msg) {
3361         g_free(host_msg);
3362         host_msg = copy_msg;
3363     }
3364     unlock_user(host_msg, msg, 0);
3365     return ret;
3366 }
3367 
3368 /* do_recvfrom() Must return target values and target errnos. */
3369 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3370                             abi_ulong target_addr,
3371                             abi_ulong target_addrlen)
3372 {
3373     socklen_t addrlen, ret_addrlen;
3374     void *addr;
3375     void *host_msg;
3376     abi_long ret;
3377 
3378     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3379     if (!host_msg)
3380         return -TARGET_EFAULT;
3381     if (target_addr) {
3382         if (get_user_u32(addrlen, target_addrlen)) {
3383             ret = -TARGET_EFAULT;
3384             goto fail;
3385         }
3386         if ((int)addrlen < 0) {
3387             ret = -TARGET_EINVAL;
3388             goto fail;
3389         }
3390         addr = alloca(addrlen);
3391         ret_addrlen = addrlen;
3392         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3393                                       addr, &ret_addrlen));
3394     } else {
3395         addr = NULL; /* To keep compiler quiet.  */
3396         addrlen = 0; /* To keep compiler quiet.  */
3397         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3398     }
3399     if (!is_error(ret)) {
3400         if (fd_trans_host_to_target_data(fd)) {
3401             abi_long trans;
3402             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3403             if (is_error(trans)) {
3404                 ret = trans;
3405                 goto fail;
3406             }
3407         }
3408         if (target_addr) {
3409             host_to_target_sockaddr(target_addr, addr,
3410                                     MIN(addrlen, ret_addrlen));
3411             if (put_user_u32(ret_addrlen, target_addrlen)) {
3412                 ret = -TARGET_EFAULT;
3413                 goto fail;
3414             }
3415         }
3416         unlock_user(host_msg, msg, len);
3417     } else {
3418 fail:
3419         unlock_user(host_msg, msg, 0);
3420     }
3421     return ret;
3422 }
3423 
3424 #ifdef TARGET_NR_socketcall
3425 /* do_socketcall() must return target values and target errnos. */
3426 static abi_long do_socketcall(int num, abi_ulong vptr)
3427 {
3428     static const unsigned nargs[] = { /* number of arguments per operation */
3429         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3430         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3431         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3432         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3433         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3434         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3435         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3436         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3437         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3438         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3439         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3440         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3441         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3442         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3443         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3444         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3445         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3446         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3447         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3448         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3449     };
3450     abi_long a[6]; /* max 6 args */
3451     unsigned i;
3452 
3453     /* check the range of the first argument num */
3454     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3455     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3456         return -TARGET_EINVAL;
3457     }
3458     /* ensure we have space for args */
3459     if (nargs[num] > ARRAY_SIZE(a)) {
3460         return -TARGET_EINVAL;
3461     }
3462     /* collect the arguments in a[] according to nargs[] */
3463     for (i = 0; i < nargs[num]; ++i) {
3464         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3465             return -TARGET_EFAULT;
3466         }
3467     }
3468     /* now when we have the args, invoke the appropriate underlying function */
3469     switch (num) {
3470     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3471         return do_socket(a[0], a[1], a[2]);
3472     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3473         return do_bind(a[0], a[1], a[2]);
3474     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3475         return do_connect(a[0], a[1], a[2]);
3476     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3477         return get_errno(listen(a[0], a[1]));
3478     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3479         return do_accept4(a[0], a[1], a[2], 0);
3480     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3481         return do_getsockname(a[0], a[1], a[2]);
3482     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3483         return do_getpeername(a[0], a[1], a[2]);
3484     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3485         return do_socketpair(a[0], a[1], a[2], a[3]);
3486     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3487         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3488     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3489         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3490     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3491         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3492     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3493         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3494     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3495         return get_errno(shutdown(a[0], a[1]));
3496     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3497         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3498     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3499         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3500     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3501         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3502     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3503         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3504     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3505         return do_accept4(a[0], a[1], a[2], a[3]);
3506     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3507         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3508     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3509         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3510     default:
3511         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3512         return -TARGET_EINVAL;
3513     }
3514 }
3515 #endif
3516 
3517 #define N_SHM_REGIONS	32
3518 
3519 static struct shm_region {
3520     abi_ulong start;
3521     abi_ulong size;
3522     bool in_use;
3523 } shm_regions[N_SHM_REGIONS];
3524 
3525 #ifndef TARGET_SEMID64_DS
3526 /* asm-generic version of this struct */
3527 struct target_semid64_ds
3528 {
3529   struct target_ipc_perm sem_perm;
3530   abi_ulong sem_otime;
3531 #if TARGET_ABI_BITS == 32
3532   abi_ulong __unused1;
3533 #endif
3534   abi_ulong sem_ctime;
3535 #if TARGET_ABI_BITS == 32
3536   abi_ulong __unused2;
3537 #endif
3538   abi_ulong sem_nsems;
3539   abi_ulong __unused3;
3540   abi_ulong __unused4;
3541 };
3542 #endif
3543 
3544 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3545                                                abi_ulong target_addr)
3546 {
3547     struct target_ipc_perm *target_ip;
3548     struct target_semid64_ds *target_sd;
3549 
3550     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3551         return -TARGET_EFAULT;
3552     target_ip = &(target_sd->sem_perm);
3553     host_ip->__key = tswap32(target_ip->__key);
3554     host_ip->uid = tswap32(target_ip->uid);
3555     host_ip->gid = tswap32(target_ip->gid);
3556     host_ip->cuid = tswap32(target_ip->cuid);
3557     host_ip->cgid = tswap32(target_ip->cgid);
3558 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3559     host_ip->mode = tswap32(target_ip->mode);
3560 #else
3561     host_ip->mode = tswap16(target_ip->mode);
3562 #endif
3563 #if defined(TARGET_PPC)
3564     host_ip->__seq = tswap32(target_ip->__seq);
3565 #else
3566     host_ip->__seq = tswap16(target_ip->__seq);
3567 #endif
3568     unlock_user_struct(target_sd, target_addr, 0);
3569     return 0;
3570 }
3571 
3572 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3573                                                struct ipc_perm *host_ip)
3574 {
3575     struct target_ipc_perm *target_ip;
3576     struct target_semid64_ds *target_sd;
3577 
3578     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3579         return -TARGET_EFAULT;
3580     target_ip = &(target_sd->sem_perm);
3581     target_ip->__key = tswap32(host_ip->__key);
3582     target_ip->uid = tswap32(host_ip->uid);
3583     target_ip->gid = tswap32(host_ip->gid);
3584     target_ip->cuid = tswap32(host_ip->cuid);
3585     target_ip->cgid = tswap32(host_ip->cgid);
3586 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3587     target_ip->mode = tswap32(host_ip->mode);
3588 #else
3589     target_ip->mode = tswap16(host_ip->mode);
3590 #endif
3591 #if defined(TARGET_PPC)
3592     target_ip->__seq = tswap32(host_ip->__seq);
3593 #else
3594     target_ip->__seq = tswap16(host_ip->__seq);
3595 #endif
3596     unlock_user_struct(target_sd, target_addr, 1);
3597     return 0;
3598 }
3599 
3600 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3601                                                abi_ulong target_addr)
3602 {
3603     struct target_semid64_ds *target_sd;
3604 
3605     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3606         return -TARGET_EFAULT;
3607     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3608         return -TARGET_EFAULT;
3609     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3610     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3611     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3612     unlock_user_struct(target_sd, target_addr, 0);
3613     return 0;
3614 }
3615 
3616 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3617                                                struct semid_ds *host_sd)
3618 {
3619     struct target_semid64_ds *target_sd;
3620 
3621     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3622         return -TARGET_EFAULT;
3623     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3624         return -TARGET_EFAULT;
3625     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3626     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3627     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3628     unlock_user_struct(target_sd, target_addr, 1);
3629     return 0;
3630 }
3631 
3632 struct target_seminfo {
3633     int semmap;
3634     int semmni;
3635     int semmns;
3636     int semmnu;
3637     int semmsl;
3638     int semopm;
3639     int semume;
3640     int semusz;
3641     int semvmx;
3642     int semaem;
3643 };
3644 
3645 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3646                                               struct seminfo *host_seminfo)
3647 {
3648     struct target_seminfo *target_seminfo;
3649     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3650         return -TARGET_EFAULT;
3651     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3652     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3653     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3654     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3655     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3656     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3657     __put_user(host_seminfo->semume, &target_seminfo->semume);
3658     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3659     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3660     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3661     unlock_user_struct(target_seminfo, target_addr, 1);
3662     return 0;
3663 }
3664 
3665 union semun {
3666 	int val;
3667 	struct semid_ds *buf;
3668 	unsigned short *array;
3669 	struct seminfo *__buf;
3670 };
3671 
3672 union target_semun {
3673 	int val;
3674 	abi_ulong buf;
3675 	abi_ulong array;
3676 	abi_ulong __buf;
3677 };
3678 
3679 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3680                                                abi_ulong target_addr)
3681 {
3682     int nsems;
3683     unsigned short *array;
3684     union semun semun;
3685     struct semid_ds semid_ds;
3686     int i, ret;
3687 
3688     semun.buf = &semid_ds;
3689 
3690     ret = semctl(semid, 0, IPC_STAT, semun);
3691     if (ret == -1)
3692         return get_errno(ret);
3693 
3694     nsems = semid_ds.sem_nsems;
3695 
3696     *host_array = g_try_new(unsigned short, nsems);
3697     if (!*host_array) {
3698         return -TARGET_ENOMEM;
3699     }
3700     array = lock_user(VERIFY_READ, target_addr,
3701                       nsems*sizeof(unsigned short), 1);
3702     if (!array) {
3703         g_free(*host_array);
3704         return -TARGET_EFAULT;
3705     }
3706 
3707     for(i=0; i<nsems; i++) {
3708         __get_user((*host_array)[i], &array[i]);
3709     }
3710     unlock_user(array, target_addr, 0);
3711 
3712     return 0;
3713 }
3714 
3715 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3716                                                unsigned short **host_array)
3717 {
3718     int nsems;
3719     unsigned short *array;
3720     union semun semun;
3721     struct semid_ds semid_ds;
3722     int i, ret;
3723 
3724     semun.buf = &semid_ds;
3725 
3726     ret = semctl(semid, 0, IPC_STAT, semun);
3727     if (ret == -1)
3728         return get_errno(ret);
3729 
3730     nsems = semid_ds.sem_nsems;
3731 
3732     array = lock_user(VERIFY_WRITE, target_addr,
3733                       nsems*sizeof(unsigned short), 0);
3734     if (!array)
3735         return -TARGET_EFAULT;
3736 
3737     for(i=0; i<nsems; i++) {
3738         __put_user((*host_array)[i], &array[i]);
3739     }
3740     g_free(*host_array);
3741     unlock_user(array, target_addr, 1);
3742 
3743     return 0;
3744 }
3745 
3746 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3747                                  abi_ulong target_arg)
3748 {
3749     union target_semun target_su = { .buf = target_arg };
3750     union semun arg;
3751     struct semid_ds dsarg;
3752     unsigned short *array = NULL;
3753     struct seminfo seminfo;
3754     abi_long ret = -TARGET_EINVAL;
3755     abi_long err;
3756     cmd &= 0xff;
3757 
3758     switch( cmd ) {
3759 	case GETVAL:
3760 	case SETVAL:
3761             /* In 64 bit cross-endian situations, we will erroneously pick up
3762              * the wrong half of the union for the "val" element.  To rectify
3763              * this, the entire 8-byte structure is byteswapped, followed by
3764 	     * a swap of the 4 byte val field. In other cases, the data is
3765 	     * already in proper host byte order. */
3766 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3767 		target_su.buf = tswapal(target_su.buf);
3768 		arg.val = tswap32(target_su.val);
3769 	    } else {
3770 		arg.val = target_su.val;
3771 	    }
3772             ret = get_errno(semctl(semid, semnum, cmd, arg));
3773             break;
3774 	case GETALL:
3775 	case SETALL:
3776             err = target_to_host_semarray(semid, &array, target_su.array);
3777             if (err)
3778                 return err;
3779             arg.array = array;
3780             ret = get_errno(semctl(semid, semnum, cmd, arg));
3781             err = host_to_target_semarray(semid, target_su.array, &array);
3782             if (err)
3783                 return err;
3784             break;
3785 	case IPC_STAT:
3786 	case IPC_SET:
3787 	case SEM_STAT:
3788             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3789             if (err)
3790                 return err;
3791             arg.buf = &dsarg;
3792             ret = get_errno(semctl(semid, semnum, cmd, arg));
3793             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3794             if (err)
3795                 return err;
3796             break;
3797 	case IPC_INFO:
3798 	case SEM_INFO:
3799             arg.__buf = &seminfo;
3800             ret = get_errno(semctl(semid, semnum, cmd, arg));
3801             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3802             if (err)
3803                 return err;
3804             break;
3805 	case IPC_RMID:
3806 	case GETPID:
3807 	case GETNCNT:
3808 	case GETZCNT:
3809             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3810             break;
3811     }
3812 
3813     return ret;
3814 }
3815 
3816 struct target_sembuf {
3817     unsigned short sem_num;
3818     short sem_op;
3819     short sem_flg;
3820 };
3821 
3822 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3823                                              abi_ulong target_addr,
3824                                              unsigned nsops)
3825 {
3826     struct target_sembuf *target_sembuf;
3827     int i;
3828 
3829     target_sembuf = lock_user(VERIFY_READ, target_addr,
3830                               nsops*sizeof(struct target_sembuf), 1);
3831     if (!target_sembuf)
3832         return -TARGET_EFAULT;
3833 
3834     for(i=0; i<nsops; i++) {
3835         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3836         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3837         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3838     }
3839 
3840     unlock_user(target_sembuf, target_addr, 0);
3841 
3842     return 0;
3843 }
3844 
3845 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3846 {
3847     struct sembuf sops[nsops];
3848     abi_long ret;
3849 
3850     if (target_to_host_sembuf(sops, ptr, nsops))
3851         return -TARGET_EFAULT;
3852 
3853     ret = -TARGET_ENOSYS;
3854 #ifdef __NR_semtimedop
3855     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3856 #endif
3857 #ifdef __NR_ipc
3858     if (ret == -TARGET_ENOSYS) {
3859         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3860     }
3861 #endif
3862     return ret;
3863 }
3864 
3865 struct target_msqid_ds
3866 {
3867     struct target_ipc_perm msg_perm;
3868     abi_ulong msg_stime;
3869 #if TARGET_ABI_BITS == 32
3870     abi_ulong __unused1;
3871 #endif
3872     abi_ulong msg_rtime;
3873 #if TARGET_ABI_BITS == 32
3874     abi_ulong __unused2;
3875 #endif
3876     abi_ulong msg_ctime;
3877 #if TARGET_ABI_BITS == 32
3878     abi_ulong __unused3;
3879 #endif
3880     abi_ulong __msg_cbytes;
3881     abi_ulong msg_qnum;
3882     abi_ulong msg_qbytes;
3883     abi_ulong msg_lspid;
3884     abi_ulong msg_lrpid;
3885     abi_ulong __unused4;
3886     abi_ulong __unused5;
3887 };
3888 
3889 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3890                                                abi_ulong target_addr)
3891 {
3892     struct target_msqid_ds *target_md;
3893 
3894     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3895         return -TARGET_EFAULT;
3896     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3897         return -TARGET_EFAULT;
3898     host_md->msg_stime = tswapal(target_md->msg_stime);
3899     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3900     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3901     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3902     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3903     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3904     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3905     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3906     unlock_user_struct(target_md, target_addr, 0);
3907     return 0;
3908 }
3909 
3910 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3911                                                struct msqid_ds *host_md)
3912 {
3913     struct target_msqid_ds *target_md;
3914 
3915     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3916         return -TARGET_EFAULT;
3917     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3918         return -TARGET_EFAULT;
3919     target_md->msg_stime = tswapal(host_md->msg_stime);
3920     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3921     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3922     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3923     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3924     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3925     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3926     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3927     unlock_user_struct(target_md, target_addr, 1);
3928     return 0;
3929 }
3930 
3931 struct target_msginfo {
3932     int msgpool;
3933     int msgmap;
3934     int msgmax;
3935     int msgmnb;
3936     int msgmni;
3937     int msgssz;
3938     int msgtql;
3939     unsigned short int msgseg;
3940 };
3941 
3942 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3943                                               struct msginfo *host_msginfo)
3944 {
3945     struct target_msginfo *target_msginfo;
3946     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3947         return -TARGET_EFAULT;
3948     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3949     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3950     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3951     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3952     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3953     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3954     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3955     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3956     unlock_user_struct(target_msginfo, target_addr, 1);
3957     return 0;
3958 }
3959 
3960 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3961 {
3962     struct msqid_ds dsarg;
3963     struct msginfo msginfo;
3964     abi_long ret = -TARGET_EINVAL;
3965 
3966     cmd &= 0xff;
3967 
3968     switch (cmd) {
3969     case IPC_STAT:
3970     case IPC_SET:
3971     case MSG_STAT:
3972         if (target_to_host_msqid_ds(&dsarg,ptr))
3973             return -TARGET_EFAULT;
3974         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3975         if (host_to_target_msqid_ds(ptr,&dsarg))
3976             return -TARGET_EFAULT;
3977         break;
3978     case IPC_RMID:
3979         ret = get_errno(msgctl(msgid, cmd, NULL));
3980         break;
3981     case IPC_INFO:
3982     case MSG_INFO:
3983         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3984         if (host_to_target_msginfo(ptr, &msginfo))
3985             return -TARGET_EFAULT;
3986         break;
3987     }
3988 
3989     return ret;
3990 }
3991 
3992 struct target_msgbuf {
3993     abi_long mtype;
3994     char	mtext[1];
3995 };
3996 
3997 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3998                                  ssize_t msgsz, int msgflg)
3999 {
4000     struct target_msgbuf *target_mb;
4001     struct msgbuf *host_mb;
4002     abi_long ret = 0;
4003 
4004     if (msgsz < 0) {
4005         return -TARGET_EINVAL;
4006     }
4007 
4008     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4009         return -TARGET_EFAULT;
4010     host_mb = g_try_malloc(msgsz + sizeof(long));
4011     if (!host_mb) {
4012         unlock_user_struct(target_mb, msgp, 0);
4013         return -TARGET_ENOMEM;
4014     }
4015     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4016     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4017     ret = -TARGET_ENOSYS;
4018 #ifdef __NR_msgsnd
4019     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4020 #endif
4021 #ifdef __NR_ipc
4022     if (ret == -TARGET_ENOSYS) {
4023         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4024                                  host_mb, 0));
4025     }
4026 #endif
4027     g_free(host_mb);
4028     unlock_user_struct(target_mb, msgp, 0);
4029 
4030     return ret;
4031 }
4032 
4033 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4034                                  ssize_t msgsz, abi_long msgtyp,
4035                                  int msgflg)
4036 {
4037     struct target_msgbuf *target_mb;
4038     char *target_mtext;
4039     struct msgbuf *host_mb;
4040     abi_long ret = 0;
4041 
4042     if (msgsz < 0) {
4043         return -TARGET_EINVAL;
4044     }
4045 
4046     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4047         return -TARGET_EFAULT;
4048 
4049     host_mb = g_try_malloc(msgsz + sizeof(long));
4050     if (!host_mb) {
4051         ret = -TARGET_ENOMEM;
4052         goto end;
4053     }
4054     ret = -TARGET_ENOSYS;
4055 #ifdef __NR_msgrcv
4056     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4057 #endif
4058 #ifdef __NR_ipc
4059     if (ret == -TARGET_ENOSYS) {
4060         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4061                         msgflg, host_mb, msgtyp));
4062     }
4063 #endif
4064 
4065     if (ret > 0) {
4066         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4067         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4068         if (!target_mtext) {
4069             ret = -TARGET_EFAULT;
4070             goto end;
4071         }
4072         memcpy(target_mb->mtext, host_mb->mtext, ret);
4073         unlock_user(target_mtext, target_mtext_addr, ret);
4074     }
4075 
4076     target_mb->mtype = tswapal(host_mb->mtype);
4077 
4078 end:
4079     if (target_mb)
4080         unlock_user_struct(target_mb, msgp, 1);
4081     g_free(host_mb);
4082     return ret;
4083 }
4084 
4085 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4086                                                abi_ulong target_addr)
4087 {
4088     struct target_shmid_ds *target_sd;
4089 
4090     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4091         return -TARGET_EFAULT;
4092     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4093         return -TARGET_EFAULT;
4094     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4095     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4096     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4097     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4098     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4099     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4100     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4101     unlock_user_struct(target_sd, target_addr, 0);
4102     return 0;
4103 }
4104 
4105 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4106                                                struct shmid_ds *host_sd)
4107 {
4108     struct target_shmid_ds *target_sd;
4109 
4110     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4111         return -TARGET_EFAULT;
4112     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4113         return -TARGET_EFAULT;
4114     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4115     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4116     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4117     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4118     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4119     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4120     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4121     unlock_user_struct(target_sd, target_addr, 1);
4122     return 0;
4123 }
4124 
4125 struct  target_shminfo {
4126     abi_ulong shmmax;
4127     abi_ulong shmmin;
4128     abi_ulong shmmni;
4129     abi_ulong shmseg;
4130     abi_ulong shmall;
4131 };
4132 
4133 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4134                                               struct shminfo *host_shminfo)
4135 {
4136     struct target_shminfo *target_shminfo;
4137     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4138         return -TARGET_EFAULT;
4139     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4140     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4141     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4142     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4143     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4144     unlock_user_struct(target_shminfo, target_addr, 1);
4145     return 0;
4146 }
4147 
4148 struct target_shm_info {
4149     int used_ids;
4150     abi_ulong shm_tot;
4151     abi_ulong shm_rss;
4152     abi_ulong shm_swp;
4153     abi_ulong swap_attempts;
4154     abi_ulong swap_successes;
4155 };
4156 
4157 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4158                                                struct shm_info *host_shm_info)
4159 {
4160     struct target_shm_info *target_shm_info;
4161     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4162         return -TARGET_EFAULT;
4163     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4164     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4165     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4166     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4167     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4168     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4169     unlock_user_struct(target_shm_info, target_addr, 1);
4170     return 0;
4171 }
4172 
4173 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4174 {
4175     struct shmid_ds dsarg;
4176     struct shminfo shminfo;
4177     struct shm_info shm_info;
4178     abi_long ret = -TARGET_EINVAL;
4179 
4180     cmd &= 0xff;
4181 
4182     switch(cmd) {
4183     case IPC_STAT:
4184     case IPC_SET:
4185     case SHM_STAT:
4186         if (target_to_host_shmid_ds(&dsarg, buf))
4187             return -TARGET_EFAULT;
4188         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4189         if (host_to_target_shmid_ds(buf, &dsarg))
4190             return -TARGET_EFAULT;
4191         break;
4192     case IPC_INFO:
4193         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4194         if (host_to_target_shminfo(buf, &shminfo))
4195             return -TARGET_EFAULT;
4196         break;
4197     case SHM_INFO:
4198         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4199         if (host_to_target_shm_info(buf, &shm_info))
4200             return -TARGET_EFAULT;
4201         break;
4202     case IPC_RMID:
4203     case SHM_LOCK:
4204     case SHM_UNLOCK:
4205         ret = get_errno(shmctl(shmid, cmd, NULL));
4206         break;
4207     }
4208 
4209     return ret;
4210 }
4211 
4212 #ifndef TARGET_FORCE_SHMLBA
4213 /* For most architectures, SHMLBA is the same as the page size;
4214  * some architectures have larger values, in which case they should
4215  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4216  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4217  * and defining its own value for SHMLBA.
4218  *
4219  * The kernel also permits SHMLBA to be set by the architecture to a
4220  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4221  * this means that addresses are rounded to the large size if
4222  * SHM_RND is set but addresses not aligned to that size are not rejected
4223  * as long as they are at least page-aligned. Since the only architecture
4224  * which uses this is ia64 this code doesn't provide for that oddity.
4225  */
4226 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4227 {
4228     return TARGET_PAGE_SIZE;
4229 }
4230 #endif
4231 
4232 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4233                                  int shmid, abi_ulong shmaddr, int shmflg)
4234 {
4235     abi_long raddr;
4236     void *host_raddr;
4237     struct shmid_ds shm_info;
4238     int i,ret;
4239     abi_ulong shmlba;
4240 
4241     /* find out the length of the shared memory segment */
4242     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4243     if (is_error(ret)) {
4244         /* can't get length, bail out */
4245         return ret;
4246     }
4247 
4248     shmlba = target_shmlba(cpu_env);
4249 
4250     if (shmaddr & (shmlba - 1)) {
4251         if (shmflg & SHM_RND) {
4252             shmaddr &= ~(shmlba - 1);
4253         } else {
4254             return -TARGET_EINVAL;
4255         }
4256     }
4257     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4258         return -TARGET_EINVAL;
4259     }
4260 
4261     mmap_lock();
4262 
4263     if (shmaddr)
4264         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4265     else {
4266         abi_ulong mmap_start;
4267 
4268         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4269         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4270 
4271         if (mmap_start == -1) {
4272             errno = ENOMEM;
4273             host_raddr = (void *)-1;
4274         } else
4275             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4276     }
4277 
4278     if (host_raddr == (void *)-1) {
4279         mmap_unlock();
4280         return get_errno((long)host_raddr);
4281     }
4282     raddr=h2g((unsigned long)host_raddr);
4283 
4284     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4285                    PAGE_VALID | PAGE_READ |
4286                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4287 
4288     for (i = 0; i < N_SHM_REGIONS; i++) {
4289         if (!shm_regions[i].in_use) {
4290             shm_regions[i].in_use = true;
4291             shm_regions[i].start = raddr;
4292             shm_regions[i].size = shm_info.shm_segsz;
4293             break;
4294         }
4295     }
4296 
4297     mmap_unlock();
4298     return raddr;
4299 
4300 }
4301 
4302 static inline abi_long do_shmdt(abi_ulong shmaddr)
4303 {
4304     int i;
4305     abi_long rv;
4306 
4307     mmap_lock();
4308 
4309     for (i = 0; i < N_SHM_REGIONS; ++i) {
4310         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4311             shm_regions[i].in_use = false;
4312             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4313             break;
4314         }
4315     }
4316     rv = get_errno(shmdt(g2h(shmaddr)));
4317 
4318     mmap_unlock();
4319 
4320     return rv;
4321 }
4322 
4323 #ifdef TARGET_NR_ipc
4324 /* ??? This only works with linear mappings.  */
4325 /* do_ipc() must return target values and target errnos. */
4326 static abi_long do_ipc(CPUArchState *cpu_env,
4327                        unsigned int call, abi_long first,
4328                        abi_long second, abi_long third,
4329                        abi_long ptr, abi_long fifth)
4330 {
4331     int version;
4332     abi_long ret = 0;
4333 
4334     version = call >> 16;
4335     call &= 0xffff;
4336 
4337     switch (call) {
4338     case IPCOP_semop:
4339         ret = do_semop(first, ptr, second);
4340         break;
4341 
4342     case IPCOP_semget:
4343         ret = get_errno(semget(first, second, third));
4344         break;
4345 
4346     case IPCOP_semctl: {
4347         /* The semun argument to semctl is passed by value, so dereference the
4348          * ptr argument. */
4349         abi_ulong atptr;
4350         get_user_ual(atptr, ptr);
4351         ret = do_semctl(first, second, third, atptr);
4352         break;
4353     }
4354 
4355     case IPCOP_msgget:
4356         ret = get_errno(msgget(first, second));
4357         break;
4358 
4359     case IPCOP_msgsnd:
4360         ret = do_msgsnd(first, ptr, second, third);
4361         break;
4362 
4363     case IPCOP_msgctl:
4364         ret = do_msgctl(first, second, ptr);
4365         break;
4366 
4367     case IPCOP_msgrcv:
4368         switch (version) {
4369         case 0:
4370             {
4371                 struct target_ipc_kludge {
4372                     abi_long msgp;
4373                     abi_long msgtyp;
4374                 } *tmp;
4375 
4376                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4377                     ret = -TARGET_EFAULT;
4378                     break;
4379                 }
4380 
4381                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4382 
4383                 unlock_user_struct(tmp, ptr, 0);
4384                 break;
4385             }
4386         default:
4387             ret = do_msgrcv(first, ptr, second, fifth, third);
4388         }
4389         break;
4390 
4391     case IPCOP_shmat:
4392         switch (version) {
4393         default:
4394         {
4395             abi_ulong raddr;
4396             raddr = do_shmat(cpu_env, first, ptr, second);
4397             if (is_error(raddr))
4398                 return get_errno(raddr);
4399             if (put_user_ual(raddr, third))
4400                 return -TARGET_EFAULT;
4401             break;
4402         }
4403         case 1:
4404             ret = -TARGET_EINVAL;
4405             break;
4406         }
4407 	break;
4408     case IPCOP_shmdt:
4409         ret = do_shmdt(ptr);
4410 	break;
4411 
4412     case IPCOP_shmget:
4413 	/* IPC_* flag values are the same on all linux platforms */
4414 	ret = get_errno(shmget(first, second, third));
4415 	break;
4416 
4417 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4418     case IPCOP_shmctl:
4419         ret = do_shmctl(first, second, ptr);
4420         break;
4421     default:
4422         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4423                       call, version);
4424 	ret = -TARGET_ENOSYS;
4425 	break;
4426     }
4427     return ret;
4428 }
4429 #endif
4430 
4431 /* kernel structure types definitions */
4432 
4433 #define STRUCT(name, ...) STRUCT_ ## name,
4434 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4435 enum {
4436 #include "syscall_types.h"
4437 STRUCT_MAX
4438 };
4439 #undef STRUCT
4440 #undef STRUCT_SPECIAL
4441 
4442 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4443 #define STRUCT_SPECIAL(name)
4444 #include "syscall_types.h"
4445 #undef STRUCT
4446 #undef STRUCT_SPECIAL
4447 
4448 typedef struct IOCTLEntry IOCTLEntry;
4449 
4450 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4451                              int fd, int cmd, abi_long arg);
4452 
4453 struct IOCTLEntry {
4454     int target_cmd;
4455     unsigned int host_cmd;
4456     const char *name;
4457     int access;
4458     do_ioctl_fn *do_ioctl;
4459     const argtype arg_type[5];
4460 };
4461 
4462 #define IOC_R 0x0001
4463 #define IOC_W 0x0002
4464 #define IOC_RW (IOC_R | IOC_W)
4465 
4466 #define MAX_STRUCT_SIZE 4096
4467 
4468 #ifdef CONFIG_FIEMAP
4469 /* So fiemap access checks don't overflow on 32 bit systems.
4470  * This is very slightly smaller than the limit imposed by
4471  * the underlying kernel.
4472  */
4473 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4474                             / sizeof(struct fiemap_extent))
4475 
4476 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4477                                        int fd, int cmd, abi_long arg)
4478 {
4479     /* The parameter for this ioctl is a struct fiemap followed
4480      * by an array of struct fiemap_extent whose size is set
4481      * in fiemap->fm_extent_count. The array is filled in by the
4482      * ioctl.
4483      */
4484     int target_size_in, target_size_out;
4485     struct fiemap *fm;
4486     const argtype *arg_type = ie->arg_type;
4487     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4488     void *argptr, *p;
4489     abi_long ret;
4490     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4491     uint32_t outbufsz;
4492     int free_fm = 0;
4493 
4494     assert(arg_type[0] == TYPE_PTR);
4495     assert(ie->access == IOC_RW);
4496     arg_type++;
4497     target_size_in = thunk_type_size(arg_type, 0);
4498     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4499     if (!argptr) {
4500         return -TARGET_EFAULT;
4501     }
4502     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4503     unlock_user(argptr, arg, 0);
4504     fm = (struct fiemap *)buf_temp;
4505     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4506         return -TARGET_EINVAL;
4507     }
4508 
4509     outbufsz = sizeof (*fm) +
4510         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4511 
4512     if (outbufsz > MAX_STRUCT_SIZE) {
4513         /* We can't fit all the extents into the fixed size buffer.
4514          * Allocate one that is large enough and use it instead.
4515          */
4516         fm = g_try_malloc(outbufsz);
4517         if (!fm) {
4518             return -TARGET_ENOMEM;
4519         }
4520         memcpy(fm, buf_temp, sizeof(struct fiemap));
4521         free_fm = 1;
4522     }
4523     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4524     if (!is_error(ret)) {
4525         target_size_out = target_size_in;
4526         /* An extent_count of 0 means we were only counting the extents
4527          * so there are no structs to copy
4528          */
4529         if (fm->fm_extent_count != 0) {
4530             target_size_out += fm->fm_mapped_extents * extent_size;
4531         }
4532         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4533         if (!argptr) {
4534             ret = -TARGET_EFAULT;
4535         } else {
4536             /* Convert the struct fiemap */
4537             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4538             if (fm->fm_extent_count != 0) {
4539                 p = argptr + target_size_in;
4540                 /* ...and then all the struct fiemap_extents */
4541                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4542                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4543                                   THUNK_TARGET);
4544                     p += extent_size;
4545                 }
4546             }
4547             unlock_user(argptr, arg, target_size_out);
4548         }
4549     }
4550     if (free_fm) {
4551         g_free(fm);
4552     }
4553     return ret;
4554 }
4555 #endif
4556 
4557 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4558                                 int fd, int cmd, abi_long arg)
4559 {
4560     const argtype *arg_type = ie->arg_type;
4561     int target_size;
4562     void *argptr;
4563     int ret;
4564     struct ifconf *host_ifconf;
4565     uint32_t outbufsz;
4566     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4567     int target_ifreq_size;
4568     int nb_ifreq;
4569     int free_buf = 0;
4570     int i;
4571     int target_ifc_len;
4572     abi_long target_ifc_buf;
4573     int host_ifc_len;
4574     char *host_ifc_buf;
4575 
4576     assert(arg_type[0] == TYPE_PTR);
4577     assert(ie->access == IOC_RW);
4578 
4579     arg_type++;
4580     target_size = thunk_type_size(arg_type, 0);
4581 
4582     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4583     if (!argptr)
4584         return -TARGET_EFAULT;
4585     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4586     unlock_user(argptr, arg, 0);
4587 
4588     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4589     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4590     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4591 
4592     if (target_ifc_buf != 0) {
4593         target_ifc_len = host_ifconf->ifc_len;
4594         nb_ifreq = target_ifc_len / target_ifreq_size;
4595         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4596 
4597         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4598         if (outbufsz > MAX_STRUCT_SIZE) {
4599             /*
4600              * We can't fit all the extents into the fixed size buffer.
4601              * Allocate one that is large enough and use it instead.
4602              */
4603             host_ifconf = malloc(outbufsz);
4604             if (!host_ifconf) {
4605                 return -TARGET_ENOMEM;
4606             }
4607             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4608             free_buf = 1;
4609         }
4610         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4611 
4612         host_ifconf->ifc_len = host_ifc_len;
4613     } else {
4614       host_ifc_buf = NULL;
4615     }
4616     host_ifconf->ifc_buf = host_ifc_buf;
4617 
4618     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4619     if (!is_error(ret)) {
4620 	/* convert host ifc_len to target ifc_len */
4621 
4622         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4623         target_ifc_len = nb_ifreq * target_ifreq_size;
4624         host_ifconf->ifc_len = target_ifc_len;
4625 
4626 	/* restore target ifc_buf */
4627 
4628         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4629 
4630 	/* copy struct ifconf to target user */
4631 
4632         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4633         if (!argptr)
4634             return -TARGET_EFAULT;
4635         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4636         unlock_user(argptr, arg, target_size);
4637 
4638         if (target_ifc_buf != 0) {
4639             /* copy ifreq[] to target user */
4640             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4641             for (i = 0; i < nb_ifreq ; i++) {
4642                 thunk_convert(argptr + i * target_ifreq_size,
4643                               host_ifc_buf + i * sizeof(struct ifreq),
4644                               ifreq_arg_type, THUNK_TARGET);
4645             }
4646             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4647         }
4648     }
4649 
4650     if (free_buf) {
4651         free(host_ifconf);
4652     }
4653 
4654     return ret;
4655 }
4656 
4657 #if defined(CONFIG_USBFS)
4658 #if HOST_LONG_BITS > 64
4659 #error USBDEVFS thunks do not support >64 bit hosts yet.
4660 #endif
4661 struct live_urb {
4662     uint64_t target_urb_adr;
4663     uint64_t target_buf_adr;
4664     char *target_buf_ptr;
4665     struct usbdevfs_urb host_urb;
4666 };
4667 
4668 static GHashTable *usbdevfs_urb_hashtable(void)
4669 {
4670     static GHashTable *urb_hashtable;
4671 
4672     if (!urb_hashtable) {
4673         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4674     }
4675     return urb_hashtable;
4676 }
4677 
4678 static void urb_hashtable_insert(struct live_urb *urb)
4679 {
4680     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4681     g_hash_table_insert(urb_hashtable, urb, urb);
4682 }
4683 
4684 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4685 {
4686     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4687     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4688 }
4689 
4690 static void urb_hashtable_remove(struct live_urb *urb)
4691 {
4692     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4693     g_hash_table_remove(urb_hashtable, urb);
4694 }
4695 
4696 static abi_long
4697 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4698                           int fd, int cmd, abi_long arg)
4699 {
4700     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4701     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4702     struct live_urb *lurb;
4703     void *argptr;
4704     uint64_t hurb;
4705     int target_size;
4706     uintptr_t target_urb_adr;
4707     abi_long ret;
4708 
4709     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4710 
4711     memset(buf_temp, 0, sizeof(uint64_t));
4712     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4713     if (is_error(ret)) {
4714         return ret;
4715     }
4716 
4717     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4718     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4719     if (!lurb->target_urb_adr) {
4720         return -TARGET_EFAULT;
4721     }
4722     urb_hashtable_remove(lurb);
4723     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4724         lurb->host_urb.buffer_length);
4725     lurb->target_buf_ptr = NULL;
4726 
4727     /* restore the guest buffer pointer */
4728     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4729 
4730     /* update the guest urb struct */
4731     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4732     if (!argptr) {
4733         g_free(lurb);
4734         return -TARGET_EFAULT;
4735     }
4736     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4737     unlock_user(argptr, lurb->target_urb_adr, target_size);
4738 
4739     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4740     /* write back the urb handle */
4741     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4742     if (!argptr) {
4743         g_free(lurb);
4744         return -TARGET_EFAULT;
4745     }
4746 
4747     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4748     target_urb_adr = lurb->target_urb_adr;
4749     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4750     unlock_user(argptr, arg, target_size);
4751 
4752     g_free(lurb);
4753     return ret;
4754 }
4755 
4756 static abi_long
4757 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4758                              uint8_t *buf_temp __attribute__((unused)),
4759                              int fd, int cmd, abi_long arg)
4760 {
4761     struct live_urb *lurb;
4762 
4763     /* map target address back to host URB with metadata. */
4764     lurb = urb_hashtable_lookup(arg);
4765     if (!lurb) {
4766         return -TARGET_EFAULT;
4767     }
4768     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4769 }
4770 
4771 static abi_long
4772 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4773                             int fd, int cmd, abi_long arg)
4774 {
4775     const argtype *arg_type = ie->arg_type;
4776     int target_size;
4777     abi_long ret;
4778     void *argptr;
4779     int rw_dir;
4780     struct live_urb *lurb;
4781 
4782     /*
4783      * each submitted URB needs to map to a unique ID for the
4784      * kernel, and that unique ID needs to be a pointer to
4785      * host memory.  hence, we need to malloc for each URB.
4786      * isochronous transfers have a variable length struct.
4787      */
4788     arg_type++;
4789     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4790 
4791     /* construct host copy of urb and metadata */
4792     lurb = g_try_malloc0(sizeof(struct live_urb));
4793     if (!lurb) {
4794         return -TARGET_ENOMEM;
4795     }
4796 
4797     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4798     if (!argptr) {
4799         g_free(lurb);
4800         return -TARGET_EFAULT;
4801     }
4802     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4803     unlock_user(argptr, arg, 0);
4804 
4805     lurb->target_urb_adr = arg;
4806     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4807 
4808     /* buffer space used depends on endpoint type so lock the entire buffer */
4809     /* control type urbs should check the buffer contents for true direction */
4810     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4811     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4812         lurb->host_urb.buffer_length, 1);
4813     if (lurb->target_buf_ptr == NULL) {
4814         g_free(lurb);
4815         return -TARGET_EFAULT;
4816     }
4817 
4818     /* update buffer pointer in host copy */
4819     lurb->host_urb.buffer = lurb->target_buf_ptr;
4820 
4821     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4822     if (is_error(ret)) {
4823         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4824         g_free(lurb);
4825     } else {
4826         urb_hashtable_insert(lurb);
4827     }
4828 
4829     return ret;
4830 }
4831 #endif /* CONFIG_USBFS */
4832 
4833 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4834                             int cmd, abi_long arg)
4835 {
4836     void *argptr;
4837     struct dm_ioctl *host_dm;
4838     abi_long guest_data;
4839     uint32_t guest_data_size;
4840     int target_size;
4841     const argtype *arg_type = ie->arg_type;
4842     abi_long ret;
4843     void *big_buf = NULL;
4844     char *host_data;
4845 
4846     arg_type++;
4847     target_size = thunk_type_size(arg_type, 0);
4848     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4849     if (!argptr) {
4850         ret = -TARGET_EFAULT;
4851         goto out;
4852     }
4853     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4854     unlock_user(argptr, arg, 0);
4855 
4856     /* buf_temp is too small, so fetch things into a bigger buffer */
4857     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4858     memcpy(big_buf, buf_temp, target_size);
4859     buf_temp = big_buf;
4860     host_dm = big_buf;
4861 
4862     guest_data = arg + host_dm->data_start;
4863     if ((guest_data - arg) < 0) {
4864         ret = -TARGET_EINVAL;
4865         goto out;
4866     }
4867     guest_data_size = host_dm->data_size - host_dm->data_start;
4868     host_data = (char*)host_dm + host_dm->data_start;
4869 
4870     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4871     if (!argptr) {
4872         ret = -TARGET_EFAULT;
4873         goto out;
4874     }
4875 
4876     switch (ie->host_cmd) {
4877     case DM_REMOVE_ALL:
4878     case DM_LIST_DEVICES:
4879     case DM_DEV_CREATE:
4880     case DM_DEV_REMOVE:
4881     case DM_DEV_SUSPEND:
4882     case DM_DEV_STATUS:
4883     case DM_DEV_WAIT:
4884     case DM_TABLE_STATUS:
4885     case DM_TABLE_CLEAR:
4886     case DM_TABLE_DEPS:
4887     case DM_LIST_VERSIONS:
4888         /* no input data */
4889         break;
4890     case DM_DEV_RENAME:
4891     case DM_DEV_SET_GEOMETRY:
4892         /* data contains only strings */
4893         memcpy(host_data, argptr, guest_data_size);
4894         break;
4895     case DM_TARGET_MSG:
4896         memcpy(host_data, argptr, guest_data_size);
4897         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4898         break;
4899     case DM_TABLE_LOAD:
4900     {
4901         void *gspec = argptr;
4902         void *cur_data = host_data;
4903         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4904         int spec_size = thunk_type_size(arg_type, 0);
4905         int i;
4906 
4907         for (i = 0; i < host_dm->target_count; i++) {
4908             struct dm_target_spec *spec = cur_data;
4909             uint32_t next;
4910             int slen;
4911 
4912             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4913             slen = strlen((char*)gspec + spec_size) + 1;
4914             next = spec->next;
4915             spec->next = sizeof(*spec) + slen;
4916             strcpy((char*)&spec[1], gspec + spec_size);
4917             gspec += next;
4918             cur_data += spec->next;
4919         }
4920         break;
4921     }
4922     default:
4923         ret = -TARGET_EINVAL;
4924         unlock_user(argptr, guest_data, 0);
4925         goto out;
4926     }
4927     unlock_user(argptr, guest_data, 0);
4928 
4929     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4930     if (!is_error(ret)) {
4931         guest_data = arg + host_dm->data_start;
4932         guest_data_size = host_dm->data_size - host_dm->data_start;
4933         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4934         switch (ie->host_cmd) {
4935         case DM_REMOVE_ALL:
4936         case DM_DEV_CREATE:
4937         case DM_DEV_REMOVE:
4938         case DM_DEV_RENAME:
4939         case DM_DEV_SUSPEND:
4940         case DM_DEV_STATUS:
4941         case DM_TABLE_LOAD:
4942         case DM_TABLE_CLEAR:
4943         case DM_TARGET_MSG:
4944         case DM_DEV_SET_GEOMETRY:
4945             /* no return data */
4946             break;
4947         case DM_LIST_DEVICES:
4948         {
4949             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4950             uint32_t remaining_data = guest_data_size;
4951             void *cur_data = argptr;
4952             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4953             int nl_size = 12; /* can't use thunk_size due to alignment */
4954 
4955             while (1) {
4956                 uint32_t next = nl->next;
4957                 if (next) {
4958                     nl->next = nl_size + (strlen(nl->name) + 1);
4959                 }
4960                 if (remaining_data < nl->next) {
4961                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4962                     break;
4963                 }
4964                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4965                 strcpy(cur_data + nl_size, nl->name);
4966                 cur_data += nl->next;
4967                 remaining_data -= nl->next;
4968                 if (!next) {
4969                     break;
4970                 }
4971                 nl = (void*)nl + next;
4972             }
4973             break;
4974         }
4975         case DM_DEV_WAIT:
4976         case DM_TABLE_STATUS:
4977         {
4978             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4979             void *cur_data = argptr;
4980             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4981             int spec_size = thunk_type_size(arg_type, 0);
4982             int i;
4983 
4984             for (i = 0; i < host_dm->target_count; i++) {
4985                 uint32_t next = spec->next;
4986                 int slen = strlen((char*)&spec[1]) + 1;
4987                 spec->next = (cur_data - argptr) + spec_size + slen;
4988                 if (guest_data_size < spec->next) {
4989                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4990                     break;
4991                 }
4992                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4993                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4994                 cur_data = argptr + spec->next;
4995                 spec = (void*)host_dm + host_dm->data_start + next;
4996             }
4997             break;
4998         }
4999         case DM_TABLE_DEPS:
5000         {
5001             void *hdata = (void*)host_dm + host_dm->data_start;
5002             int count = *(uint32_t*)hdata;
5003             uint64_t *hdev = hdata + 8;
5004             uint64_t *gdev = argptr + 8;
5005             int i;
5006 
5007             *(uint32_t*)argptr = tswap32(count);
5008             for (i = 0; i < count; i++) {
5009                 *gdev = tswap64(*hdev);
5010                 gdev++;
5011                 hdev++;
5012             }
5013             break;
5014         }
5015         case DM_LIST_VERSIONS:
5016         {
5017             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5018             uint32_t remaining_data = guest_data_size;
5019             void *cur_data = argptr;
5020             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5021             int vers_size = thunk_type_size(arg_type, 0);
5022 
5023             while (1) {
5024                 uint32_t next = vers->next;
5025                 if (next) {
5026                     vers->next = vers_size + (strlen(vers->name) + 1);
5027                 }
5028                 if (remaining_data < vers->next) {
5029                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5030                     break;
5031                 }
5032                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5033                 strcpy(cur_data + vers_size, vers->name);
5034                 cur_data += vers->next;
5035                 remaining_data -= vers->next;
5036                 if (!next) {
5037                     break;
5038                 }
5039                 vers = (void*)vers + next;
5040             }
5041             break;
5042         }
5043         default:
5044             unlock_user(argptr, guest_data, 0);
5045             ret = -TARGET_EINVAL;
5046             goto out;
5047         }
5048         unlock_user(argptr, guest_data, guest_data_size);
5049 
5050         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5051         if (!argptr) {
5052             ret = -TARGET_EFAULT;
5053             goto out;
5054         }
5055         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5056         unlock_user(argptr, arg, target_size);
5057     }
5058 out:
5059     g_free(big_buf);
5060     return ret;
5061 }
5062 
5063 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5064                                int cmd, abi_long arg)
5065 {
5066     void *argptr;
5067     int target_size;
5068     const argtype *arg_type = ie->arg_type;
5069     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5070     abi_long ret;
5071 
5072     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5073     struct blkpg_partition host_part;
5074 
5075     /* Read and convert blkpg */
5076     arg_type++;
5077     target_size = thunk_type_size(arg_type, 0);
5078     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5079     if (!argptr) {
5080         ret = -TARGET_EFAULT;
5081         goto out;
5082     }
5083     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5084     unlock_user(argptr, arg, 0);
5085 
5086     switch (host_blkpg->op) {
5087     case BLKPG_ADD_PARTITION:
5088     case BLKPG_DEL_PARTITION:
5089         /* payload is struct blkpg_partition */
5090         break;
5091     default:
5092         /* Unknown opcode */
5093         ret = -TARGET_EINVAL;
5094         goto out;
5095     }
5096 
5097     /* Read and convert blkpg->data */
5098     arg = (abi_long)(uintptr_t)host_blkpg->data;
5099     target_size = thunk_type_size(part_arg_type, 0);
5100     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5101     if (!argptr) {
5102         ret = -TARGET_EFAULT;
5103         goto out;
5104     }
5105     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5106     unlock_user(argptr, arg, 0);
5107 
5108     /* Swizzle the data pointer to our local copy and call! */
5109     host_blkpg->data = &host_part;
5110     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5111 
5112 out:
5113     return ret;
5114 }
5115 
5116 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5117                                 int fd, int cmd, abi_long arg)
5118 {
5119     const argtype *arg_type = ie->arg_type;
5120     const StructEntry *se;
5121     const argtype *field_types;
5122     const int *dst_offsets, *src_offsets;
5123     int target_size;
5124     void *argptr;
5125     abi_ulong *target_rt_dev_ptr = NULL;
5126     unsigned long *host_rt_dev_ptr = NULL;
5127     abi_long ret;
5128     int i;
5129 
5130     assert(ie->access == IOC_W);
5131     assert(*arg_type == TYPE_PTR);
5132     arg_type++;
5133     assert(*arg_type == TYPE_STRUCT);
5134     target_size = thunk_type_size(arg_type, 0);
5135     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5136     if (!argptr) {
5137         return -TARGET_EFAULT;
5138     }
5139     arg_type++;
5140     assert(*arg_type == (int)STRUCT_rtentry);
5141     se = struct_entries + *arg_type++;
5142     assert(se->convert[0] == NULL);
5143     /* convert struct here to be able to catch rt_dev string */
5144     field_types = se->field_types;
5145     dst_offsets = se->field_offsets[THUNK_HOST];
5146     src_offsets = se->field_offsets[THUNK_TARGET];
5147     for (i = 0; i < se->nb_fields; i++) {
5148         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5149             assert(*field_types == TYPE_PTRVOID);
5150             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5151             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5152             if (*target_rt_dev_ptr != 0) {
5153                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5154                                                   tswapal(*target_rt_dev_ptr));
5155                 if (!*host_rt_dev_ptr) {
5156                     unlock_user(argptr, arg, 0);
5157                     return -TARGET_EFAULT;
5158                 }
5159             } else {
5160                 *host_rt_dev_ptr = 0;
5161             }
5162             field_types++;
5163             continue;
5164         }
5165         field_types = thunk_convert(buf_temp + dst_offsets[i],
5166                                     argptr + src_offsets[i],
5167                                     field_types, THUNK_HOST);
5168     }
5169     unlock_user(argptr, arg, 0);
5170 
5171     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5172 
5173     assert(host_rt_dev_ptr != NULL);
5174     assert(target_rt_dev_ptr != NULL);
5175     if (*host_rt_dev_ptr != 0) {
5176         unlock_user((void *)*host_rt_dev_ptr,
5177                     *target_rt_dev_ptr, 0);
5178     }
5179     return ret;
5180 }
5181 
5182 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5183                                      int fd, int cmd, abi_long arg)
5184 {
5185     int sig = target_to_host_signal(arg);
5186     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5187 }
5188 
5189 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5190                                     int fd, int cmd, abi_long arg)
5191 {
5192     struct timeval tv;
5193     abi_long ret;
5194 
5195     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5196     if (is_error(ret)) {
5197         return ret;
5198     }
5199 
5200     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5201         if (copy_to_user_timeval(arg, &tv)) {
5202             return -TARGET_EFAULT;
5203         }
5204     } else {
5205         if (copy_to_user_timeval64(arg, &tv)) {
5206             return -TARGET_EFAULT;
5207         }
5208     }
5209 
5210     return ret;
5211 }
5212 
5213 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5214                                       int fd, int cmd, abi_long arg)
5215 {
5216     struct timespec ts;
5217     abi_long ret;
5218 
5219     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5220     if (is_error(ret)) {
5221         return ret;
5222     }
5223 
5224     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5225         if (host_to_target_timespec(arg, &ts)) {
5226             return -TARGET_EFAULT;
5227         }
5228     } else{
5229         if (host_to_target_timespec64(arg, &ts)) {
5230             return -TARGET_EFAULT;
5231         }
5232     }
5233 
5234     return ret;
5235 }
5236 
5237 #ifdef TIOCGPTPEER
5238 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5239                                      int fd, int cmd, abi_long arg)
5240 {
5241     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5242     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5243 }
5244 #endif
5245 
5246 static IOCTLEntry ioctl_entries[] = {
5247 #define IOCTL(cmd, access, ...) \
5248     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5249 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5250     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5251 #define IOCTL_IGNORE(cmd) \
5252     { TARGET_ ## cmd, 0, #cmd },
5253 #include "ioctls.h"
5254     { 0, 0, },
5255 };
5256 
5257 /* ??? Implement proper locking for ioctls.  */
5258 /* do_ioctl() Must return target values and target errnos. */
5259 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5260 {
5261     const IOCTLEntry *ie;
5262     const argtype *arg_type;
5263     abi_long ret;
5264     uint8_t buf_temp[MAX_STRUCT_SIZE];
5265     int target_size;
5266     void *argptr;
5267 
5268     ie = ioctl_entries;
5269     for(;;) {
5270         if (ie->target_cmd == 0) {
5271             qemu_log_mask(
5272                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5273             return -TARGET_ENOSYS;
5274         }
5275         if (ie->target_cmd == cmd)
5276             break;
5277         ie++;
5278     }
5279     arg_type = ie->arg_type;
5280     if (ie->do_ioctl) {
5281         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5282     } else if (!ie->host_cmd) {
5283         /* Some architectures define BSD ioctls in their headers
5284            that are not implemented in Linux.  */
5285         return -TARGET_ENOSYS;
5286     }
5287 
5288     switch(arg_type[0]) {
5289     case TYPE_NULL:
5290         /* no argument */
5291         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5292         break;
5293     case TYPE_PTRVOID:
5294     case TYPE_INT:
5295     case TYPE_LONG:
5296     case TYPE_ULONG:
5297         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5298         break;
5299     case TYPE_PTR:
5300         arg_type++;
5301         target_size = thunk_type_size(arg_type, 0);
5302         switch(ie->access) {
5303         case IOC_R:
5304             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5305             if (!is_error(ret)) {
5306                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5307                 if (!argptr)
5308                     return -TARGET_EFAULT;
5309                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5310                 unlock_user(argptr, arg, target_size);
5311             }
5312             break;
5313         case IOC_W:
5314             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5315             if (!argptr)
5316                 return -TARGET_EFAULT;
5317             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5318             unlock_user(argptr, arg, 0);
5319             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5320             break;
5321         default:
5322         case IOC_RW:
5323             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5324             if (!argptr)
5325                 return -TARGET_EFAULT;
5326             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5327             unlock_user(argptr, arg, 0);
5328             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5329             if (!is_error(ret)) {
5330                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5331                 if (!argptr)
5332                     return -TARGET_EFAULT;
5333                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5334                 unlock_user(argptr, arg, target_size);
5335             }
5336             break;
5337         }
5338         break;
5339     default:
5340         qemu_log_mask(LOG_UNIMP,
5341                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5342                       (long)cmd, arg_type[0]);
5343         ret = -TARGET_ENOSYS;
5344         break;
5345     }
5346     return ret;
5347 }
5348 
5349 static const bitmask_transtbl iflag_tbl[] = {
5350         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5351         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5352         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5353         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5354         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5355         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5356         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5357         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5358         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5359         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5360         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5361         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5362         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5363         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5364         { 0, 0, 0, 0 }
5365 };
5366 
5367 static const bitmask_transtbl oflag_tbl[] = {
5368 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5369 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5370 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5371 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5372 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5373 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5374 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5375 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5376 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5377 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5378 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5379 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5380 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5381 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5382 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5383 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5384 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5385 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5386 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5387 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5388 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5389 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5390 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5391 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5392 	{ 0, 0, 0, 0 }
5393 };
5394 
5395 static const bitmask_transtbl cflag_tbl[] = {
5396 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5397 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5398 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5399 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5400 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5401 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5402 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5403 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5404 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5405 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5406 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5407 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5408 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5409 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5410 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5411 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5412 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5413 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5414 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5415 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5416 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5417 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5418 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5419 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5420 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5421 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5422 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5423 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5424 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5425 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5426 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5427 	{ 0, 0, 0, 0 }
5428 };
5429 
5430 static const bitmask_transtbl lflag_tbl[] = {
5431 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5432 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5433 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5434 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5435 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5436 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5437 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5438 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5439 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5440 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5441 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5442 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5443 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5444 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5445 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5446 	{ 0, 0, 0, 0 }
5447 };
5448 
5449 static void target_to_host_termios (void *dst, const void *src)
5450 {
5451     struct host_termios *host = dst;
5452     const struct target_termios *target = src;
5453 
5454     host->c_iflag =
5455         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5456     host->c_oflag =
5457         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5458     host->c_cflag =
5459         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5460     host->c_lflag =
5461         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5462     host->c_line = target->c_line;
5463 
5464     memset(host->c_cc, 0, sizeof(host->c_cc));
5465     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5466     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5467     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5468     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5469     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5470     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5471     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5472     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5473     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5474     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5475     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5476     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5477     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5478     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5479     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5480     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5481     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5482 }
5483 
5484 static void host_to_target_termios (void *dst, const void *src)
5485 {
5486     struct target_termios *target = dst;
5487     const struct host_termios *host = src;
5488 
5489     target->c_iflag =
5490         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5491     target->c_oflag =
5492         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5493     target->c_cflag =
5494         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5495     target->c_lflag =
5496         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5497     target->c_line = host->c_line;
5498 
5499     memset(target->c_cc, 0, sizeof(target->c_cc));
5500     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5501     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5502     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5503     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5504     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5505     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5506     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5507     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5508     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5509     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5510     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5511     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5512     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5513     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5514     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5515     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5516     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5517 }
5518 
5519 static const StructEntry struct_termios_def = {
5520     .convert = { host_to_target_termios, target_to_host_termios },
5521     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5522     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5523 };
5524 
5525 static bitmask_transtbl mmap_flags_tbl[] = {
5526     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5527     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5528     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5529     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5530       MAP_ANONYMOUS, MAP_ANONYMOUS },
5531     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5532       MAP_GROWSDOWN, MAP_GROWSDOWN },
5533     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5534       MAP_DENYWRITE, MAP_DENYWRITE },
5535     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5536       MAP_EXECUTABLE, MAP_EXECUTABLE },
5537     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5538     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5539       MAP_NORESERVE, MAP_NORESERVE },
5540     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5541     /* MAP_STACK had been ignored by the kernel for quite some time.
5542        Recognize it for the target insofar as we do not want to pass
5543        it through to the host.  */
5544     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5545     { 0, 0, 0, 0 }
5546 };
5547 
5548 /*
5549  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5550  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5551  */
5552 #if defined(TARGET_I386)
5553 
5554 /* NOTE: there is really one LDT for all the threads */
5555 static uint8_t *ldt_table;
5556 
5557 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5558 {
5559     int size;
5560     void *p;
5561 
5562     if (!ldt_table)
5563         return 0;
5564     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5565     if (size > bytecount)
5566         size = bytecount;
5567     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5568     if (!p)
5569         return -TARGET_EFAULT;
5570     /* ??? Should this by byteswapped?  */
5571     memcpy(p, ldt_table, size);
5572     unlock_user(p, ptr, size);
5573     return size;
5574 }
5575 
5576 /* XXX: add locking support */
5577 static abi_long write_ldt(CPUX86State *env,
5578                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5579 {
5580     struct target_modify_ldt_ldt_s ldt_info;
5581     struct target_modify_ldt_ldt_s *target_ldt_info;
5582     int seg_32bit, contents, read_exec_only, limit_in_pages;
5583     int seg_not_present, useable, lm;
5584     uint32_t *lp, entry_1, entry_2;
5585 
5586     if (bytecount != sizeof(ldt_info))
5587         return -TARGET_EINVAL;
5588     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5589         return -TARGET_EFAULT;
5590     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5591     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5592     ldt_info.limit = tswap32(target_ldt_info->limit);
5593     ldt_info.flags = tswap32(target_ldt_info->flags);
5594     unlock_user_struct(target_ldt_info, ptr, 0);
5595 
5596     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5597         return -TARGET_EINVAL;
5598     seg_32bit = ldt_info.flags & 1;
5599     contents = (ldt_info.flags >> 1) & 3;
5600     read_exec_only = (ldt_info.flags >> 3) & 1;
5601     limit_in_pages = (ldt_info.flags >> 4) & 1;
5602     seg_not_present = (ldt_info.flags >> 5) & 1;
5603     useable = (ldt_info.flags >> 6) & 1;
5604 #ifdef TARGET_ABI32
5605     lm = 0;
5606 #else
5607     lm = (ldt_info.flags >> 7) & 1;
5608 #endif
5609     if (contents == 3) {
5610         if (oldmode)
5611             return -TARGET_EINVAL;
5612         if (seg_not_present == 0)
5613             return -TARGET_EINVAL;
5614     }
5615     /* allocate the LDT */
5616     if (!ldt_table) {
5617         env->ldt.base = target_mmap(0,
5618                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5619                                     PROT_READ|PROT_WRITE,
5620                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5621         if (env->ldt.base == -1)
5622             return -TARGET_ENOMEM;
5623         memset(g2h(env->ldt.base), 0,
5624                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5625         env->ldt.limit = 0xffff;
5626         ldt_table = g2h(env->ldt.base);
5627     }
5628 
5629     /* NOTE: same code as Linux kernel */
5630     /* Allow LDTs to be cleared by the user. */
5631     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5632         if (oldmode ||
5633             (contents == 0		&&
5634              read_exec_only == 1	&&
5635              seg_32bit == 0		&&
5636              limit_in_pages == 0	&&
5637              seg_not_present == 1	&&
5638              useable == 0 )) {
5639             entry_1 = 0;
5640             entry_2 = 0;
5641             goto install;
5642         }
5643     }
5644 
5645     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5646         (ldt_info.limit & 0x0ffff);
5647     entry_2 = (ldt_info.base_addr & 0xff000000) |
5648         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5649         (ldt_info.limit & 0xf0000) |
5650         ((read_exec_only ^ 1) << 9) |
5651         (contents << 10) |
5652         ((seg_not_present ^ 1) << 15) |
5653         (seg_32bit << 22) |
5654         (limit_in_pages << 23) |
5655         (lm << 21) |
5656         0x7000;
5657     if (!oldmode)
5658         entry_2 |= (useable << 20);
5659 
5660     /* Install the new entry ...  */
5661 install:
5662     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5663     lp[0] = tswap32(entry_1);
5664     lp[1] = tswap32(entry_2);
5665     return 0;
5666 }
5667 
5668 /* specific and weird i386 syscalls */
5669 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5670                               unsigned long bytecount)
5671 {
5672     abi_long ret;
5673 
5674     switch (func) {
5675     case 0:
5676         ret = read_ldt(ptr, bytecount);
5677         break;
5678     case 1:
5679         ret = write_ldt(env, ptr, bytecount, 1);
5680         break;
5681     case 0x11:
5682         ret = write_ldt(env, ptr, bytecount, 0);
5683         break;
5684     default:
5685         ret = -TARGET_ENOSYS;
5686         break;
5687     }
5688     return ret;
5689 }
5690 
5691 #if defined(TARGET_ABI32)
5692 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5693 {
5694     uint64_t *gdt_table = g2h(env->gdt.base);
5695     struct target_modify_ldt_ldt_s ldt_info;
5696     struct target_modify_ldt_ldt_s *target_ldt_info;
5697     int seg_32bit, contents, read_exec_only, limit_in_pages;
5698     int seg_not_present, useable, lm;
5699     uint32_t *lp, entry_1, entry_2;
5700     int i;
5701 
5702     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5703     if (!target_ldt_info)
5704         return -TARGET_EFAULT;
5705     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5706     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5707     ldt_info.limit = tswap32(target_ldt_info->limit);
5708     ldt_info.flags = tswap32(target_ldt_info->flags);
5709     if (ldt_info.entry_number == -1) {
5710         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5711             if (gdt_table[i] == 0) {
5712                 ldt_info.entry_number = i;
5713                 target_ldt_info->entry_number = tswap32(i);
5714                 break;
5715             }
5716         }
5717     }
5718     unlock_user_struct(target_ldt_info, ptr, 1);
5719 
5720     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5721         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5722            return -TARGET_EINVAL;
5723     seg_32bit = ldt_info.flags & 1;
5724     contents = (ldt_info.flags >> 1) & 3;
5725     read_exec_only = (ldt_info.flags >> 3) & 1;
5726     limit_in_pages = (ldt_info.flags >> 4) & 1;
5727     seg_not_present = (ldt_info.flags >> 5) & 1;
5728     useable = (ldt_info.flags >> 6) & 1;
5729 #ifdef TARGET_ABI32
5730     lm = 0;
5731 #else
5732     lm = (ldt_info.flags >> 7) & 1;
5733 #endif
5734 
5735     if (contents == 3) {
5736         if (seg_not_present == 0)
5737             return -TARGET_EINVAL;
5738     }
5739 
5740     /* NOTE: same code as Linux kernel */
5741     /* Allow LDTs to be cleared by the user. */
5742     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5743         if ((contents == 0             &&
5744              read_exec_only == 1       &&
5745              seg_32bit == 0            &&
5746              limit_in_pages == 0       &&
5747              seg_not_present == 1      &&
5748              useable == 0 )) {
5749             entry_1 = 0;
5750             entry_2 = 0;
5751             goto install;
5752         }
5753     }
5754 
5755     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5756         (ldt_info.limit & 0x0ffff);
5757     entry_2 = (ldt_info.base_addr & 0xff000000) |
5758         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5759         (ldt_info.limit & 0xf0000) |
5760         ((read_exec_only ^ 1) << 9) |
5761         (contents << 10) |
5762         ((seg_not_present ^ 1) << 15) |
5763         (seg_32bit << 22) |
5764         (limit_in_pages << 23) |
5765         (useable << 20) |
5766         (lm << 21) |
5767         0x7000;
5768 
5769     /* Install the new entry ...  */
5770 install:
5771     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5772     lp[0] = tswap32(entry_1);
5773     lp[1] = tswap32(entry_2);
5774     return 0;
5775 }
5776 
5777 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5778 {
5779     struct target_modify_ldt_ldt_s *target_ldt_info;
5780     uint64_t *gdt_table = g2h(env->gdt.base);
5781     uint32_t base_addr, limit, flags;
5782     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5783     int seg_not_present, useable, lm;
5784     uint32_t *lp, entry_1, entry_2;
5785 
5786     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5787     if (!target_ldt_info)
5788         return -TARGET_EFAULT;
5789     idx = tswap32(target_ldt_info->entry_number);
5790     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5791         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5792         unlock_user_struct(target_ldt_info, ptr, 1);
5793         return -TARGET_EINVAL;
5794     }
5795     lp = (uint32_t *)(gdt_table + idx);
5796     entry_1 = tswap32(lp[0]);
5797     entry_2 = tswap32(lp[1]);
5798 
5799     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5800     contents = (entry_2 >> 10) & 3;
5801     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5802     seg_32bit = (entry_2 >> 22) & 1;
5803     limit_in_pages = (entry_2 >> 23) & 1;
5804     useable = (entry_2 >> 20) & 1;
5805 #ifdef TARGET_ABI32
5806     lm = 0;
5807 #else
5808     lm = (entry_2 >> 21) & 1;
5809 #endif
5810     flags = (seg_32bit << 0) | (contents << 1) |
5811         (read_exec_only << 3) | (limit_in_pages << 4) |
5812         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5813     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5814     base_addr = (entry_1 >> 16) |
5815         (entry_2 & 0xff000000) |
5816         ((entry_2 & 0xff) << 16);
5817     target_ldt_info->base_addr = tswapal(base_addr);
5818     target_ldt_info->limit = tswap32(limit);
5819     target_ldt_info->flags = tswap32(flags);
5820     unlock_user_struct(target_ldt_info, ptr, 1);
5821     return 0;
5822 }
5823 
5824 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5825 {
5826     return -ENOSYS;
5827 }
5828 #else
5829 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5830 {
5831     abi_long ret = 0;
5832     abi_ulong val;
5833     int idx;
5834 
5835     switch(code) {
5836     case TARGET_ARCH_SET_GS:
5837     case TARGET_ARCH_SET_FS:
5838         if (code == TARGET_ARCH_SET_GS)
5839             idx = R_GS;
5840         else
5841             idx = R_FS;
5842         cpu_x86_load_seg(env, idx, 0);
5843         env->segs[idx].base = addr;
5844         break;
5845     case TARGET_ARCH_GET_GS:
5846     case TARGET_ARCH_GET_FS:
5847         if (code == TARGET_ARCH_GET_GS)
5848             idx = R_GS;
5849         else
5850             idx = R_FS;
5851         val = env->segs[idx].base;
5852         if (put_user(val, addr, abi_ulong))
5853             ret = -TARGET_EFAULT;
5854         break;
5855     default:
5856         ret = -TARGET_EINVAL;
5857         break;
5858     }
5859     return ret;
5860 }
5861 #endif /* defined(TARGET_ABI32 */
5862 
5863 #endif /* defined(TARGET_I386) */
5864 
5865 #define NEW_STACK_SIZE 0x40000
5866 
5867 
5868 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5869 typedef struct {
5870     CPUArchState *env;
5871     pthread_mutex_t mutex;
5872     pthread_cond_t cond;
5873     pthread_t thread;
5874     uint32_t tid;
5875     abi_ulong child_tidptr;
5876     abi_ulong parent_tidptr;
5877     sigset_t sigmask;
5878 } new_thread_info;
5879 
5880 static void *clone_func(void *arg)
5881 {
5882     new_thread_info *info = arg;
5883     CPUArchState *env;
5884     CPUState *cpu;
5885     TaskState *ts;
5886 
5887     rcu_register_thread();
5888     tcg_register_thread();
5889     env = info->env;
5890     cpu = env_cpu(env);
5891     thread_cpu = cpu;
5892     ts = (TaskState *)cpu->opaque;
5893     info->tid = sys_gettid();
5894     task_settid(ts);
5895     if (info->child_tidptr)
5896         put_user_u32(info->tid, info->child_tidptr);
5897     if (info->parent_tidptr)
5898         put_user_u32(info->tid, info->parent_tidptr);
5899     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5900     /* Enable signals.  */
5901     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5902     /* Signal to the parent that we're ready.  */
5903     pthread_mutex_lock(&info->mutex);
5904     pthread_cond_broadcast(&info->cond);
5905     pthread_mutex_unlock(&info->mutex);
5906     /* Wait until the parent has finished initializing the tls state.  */
5907     pthread_mutex_lock(&clone_lock);
5908     pthread_mutex_unlock(&clone_lock);
5909     cpu_loop(env);
5910     /* never exits */
5911     return NULL;
5912 }
5913 
5914 /* do_fork() Must return host values and target errnos (unlike most
5915    do_*() functions). */
5916 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5917                    abi_ulong parent_tidptr, target_ulong newtls,
5918                    abi_ulong child_tidptr)
5919 {
5920     CPUState *cpu = env_cpu(env);
5921     int ret;
5922     TaskState *ts;
5923     CPUState *new_cpu;
5924     CPUArchState *new_env;
5925     sigset_t sigmask;
5926 
5927     flags &= ~CLONE_IGNORED_FLAGS;
5928 
5929     /* Emulate vfork() with fork() */
5930     if (flags & CLONE_VFORK)
5931         flags &= ~(CLONE_VFORK | CLONE_VM);
5932 
5933     if (flags & CLONE_VM) {
5934         TaskState *parent_ts = (TaskState *)cpu->opaque;
5935         new_thread_info info;
5936         pthread_attr_t attr;
5937 
5938         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5939             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5940             return -TARGET_EINVAL;
5941         }
5942 
5943         ts = g_new0(TaskState, 1);
5944         init_task_state(ts);
5945 
5946         /* Grab a mutex so that thread setup appears atomic.  */
5947         pthread_mutex_lock(&clone_lock);
5948 
5949         /* we create a new CPU instance. */
5950         new_env = cpu_copy(env);
5951         /* Init regs that differ from the parent.  */
5952         cpu_clone_regs_child(new_env, newsp, flags);
5953         cpu_clone_regs_parent(env, flags);
5954         new_cpu = env_cpu(new_env);
5955         new_cpu->opaque = ts;
5956         ts->bprm = parent_ts->bprm;
5957         ts->info = parent_ts->info;
5958         ts->signal_mask = parent_ts->signal_mask;
5959 
5960         if (flags & CLONE_CHILD_CLEARTID) {
5961             ts->child_tidptr = child_tidptr;
5962         }
5963 
5964         if (flags & CLONE_SETTLS) {
5965             cpu_set_tls (new_env, newtls);
5966         }
5967 
5968         memset(&info, 0, sizeof(info));
5969         pthread_mutex_init(&info.mutex, NULL);
5970         pthread_mutex_lock(&info.mutex);
5971         pthread_cond_init(&info.cond, NULL);
5972         info.env = new_env;
5973         if (flags & CLONE_CHILD_SETTID) {
5974             info.child_tidptr = child_tidptr;
5975         }
5976         if (flags & CLONE_PARENT_SETTID) {
5977             info.parent_tidptr = parent_tidptr;
5978         }
5979 
5980         ret = pthread_attr_init(&attr);
5981         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5982         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5983         /* It is not safe to deliver signals until the child has finished
5984            initializing, so temporarily block all signals.  */
5985         sigfillset(&sigmask);
5986         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5987         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5988 
5989         /* If this is our first additional thread, we need to ensure we
5990          * generate code for parallel execution and flush old translations.
5991          */
5992         if (!parallel_cpus) {
5993             parallel_cpus = true;
5994             tb_flush(cpu);
5995         }
5996 
5997         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5998         /* TODO: Free new CPU state if thread creation failed.  */
5999 
6000         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6001         pthread_attr_destroy(&attr);
6002         if (ret == 0) {
6003             /* Wait for the child to initialize.  */
6004             pthread_cond_wait(&info.cond, &info.mutex);
6005             ret = info.tid;
6006         } else {
6007             ret = -1;
6008         }
6009         pthread_mutex_unlock(&info.mutex);
6010         pthread_cond_destroy(&info.cond);
6011         pthread_mutex_destroy(&info.mutex);
6012         pthread_mutex_unlock(&clone_lock);
6013     } else {
6014         /* if no CLONE_VM, we consider it is a fork */
6015         if (flags & CLONE_INVALID_FORK_FLAGS) {
6016             return -TARGET_EINVAL;
6017         }
6018 
6019         /* We can't support custom termination signals */
6020         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6021             return -TARGET_EINVAL;
6022         }
6023 
6024         if (block_signals()) {
6025             return -TARGET_ERESTARTSYS;
6026         }
6027 
6028         fork_start();
6029         ret = fork();
6030         if (ret == 0) {
6031             /* Child Process.  */
6032             cpu_clone_regs_child(env, newsp, flags);
6033             fork_end(1);
6034             /* There is a race condition here.  The parent process could
6035                theoretically read the TID in the child process before the child
6036                tid is set.  This would require using either ptrace
6037                (not implemented) or having *_tidptr to point at a shared memory
6038                mapping.  We can't repeat the spinlock hack used above because
6039                the child process gets its own copy of the lock.  */
6040             if (flags & CLONE_CHILD_SETTID)
6041                 put_user_u32(sys_gettid(), child_tidptr);
6042             if (flags & CLONE_PARENT_SETTID)
6043                 put_user_u32(sys_gettid(), parent_tidptr);
6044             ts = (TaskState *)cpu->opaque;
6045             if (flags & CLONE_SETTLS)
6046                 cpu_set_tls (env, newtls);
6047             if (flags & CLONE_CHILD_CLEARTID)
6048                 ts->child_tidptr = child_tidptr;
6049         } else {
6050             cpu_clone_regs_parent(env, flags);
6051             fork_end(0);
6052         }
6053     }
6054     return ret;
6055 }
6056 
6057 /* warning : doesn't handle linux specific flags... */
6058 static int target_to_host_fcntl_cmd(int cmd)
6059 {
6060     int ret;
6061 
6062     switch(cmd) {
6063     case TARGET_F_DUPFD:
6064     case TARGET_F_GETFD:
6065     case TARGET_F_SETFD:
6066     case TARGET_F_GETFL:
6067     case TARGET_F_SETFL:
6068         ret = cmd;
6069         break;
6070     case TARGET_F_GETLK:
6071         ret = F_GETLK64;
6072         break;
6073     case TARGET_F_SETLK:
6074         ret = F_SETLK64;
6075         break;
6076     case TARGET_F_SETLKW:
6077         ret = F_SETLKW64;
6078         break;
6079     case TARGET_F_GETOWN:
6080         ret = F_GETOWN;
6081         break;
6082     case TARGET_F_SETOWN:
6083         ret = F_SETOWN;
6084         break;
6085     case TARGET_F_GETSIG:
6086         ret = F_GETSIG;
6087         break;
6088     case TARGET_F_SETSIG:
6089         ret = F_SETSIG;
6090         break;
6091 #if TARGET_ABI_BITS == 32
6092     case TARGET_F_GETLK64:
6093         ret = F_GETLK64;
6094         break;
6095     case TARGET_F_SETLK64:
6096         ret = F_SETLK64;
6097         break;
6098     case TARGET_F_SETLKW64:
6099         ret = F_SETLKW64;
6100         break;
6101 #endif
6102     case TARGET_F_SETLEASE:
6103         ret = F_SETLEASE;
6104         break;
6105     case TARGET_F_GETLEASE:
6106         ret = F_GETLEASE;
6107         break;
6108 #ifdef F_DUPFD_CLOEXEC
6109     case TARGET_F_DUPFD_CLOEXEC:
6110         ret = F_DUPFD_CLOEXEC;
6111         break;
6112 #endif
6113     case TARGET_F_NOTIFY:
6114         ret = F_NOTIFY;
6115         break;
6116 #ifdef F_GETOWN_EX
6117     case TARGET_F_GETOWN_EX:
6118         ret = F_GETOWN_EX;
6119         break;
6120 #endif
6121 #ifdef F_SETOWN_EX
6122     case TARGET_F_SETOWN_EX:
6123         ret = F_SETOWN_EX;
6124         break;
6125 #endif
6126 #ifdef F_SETPIPE_SZ
6127     case TARGET_F_SETPIPE_SZ:
6128         ret = F_SETPIPE_SZ;
6129         break;
6130     case TARGET_F_GETPIPE_SZ:
6131         ret = F_GETPIPE_SZ;
6132         break;
6133 #endif
6134     default:
6135         ret = -TARGET_EINVAL;
6136         break;
6137     }
6138 
6139 #if defined(__powerpc64__)
6140     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6141      * is not supported by kernel. The glibc fcntl call actually adjusts
6142      * them to 5, 6 and 7 before making the syscall(). Since we make the
6143      * syscall directly, adjust to what is supported by the kernel.
6144      */
6145     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6146         ret -= F_GETLK64 - 5;
6147     }
6148 #endif
6149 
6150     return ret;
6151 }
6152 
6153 #define FLOCK_TRANSTBL \
6154     switch (type) { \
6155     TRANSTBL_CONVERT(F_RDLCK); \
6156     TRANSTBL_CONVERT(F_WRLCK); \
6157     TRANSTBL_CONVERT(F_UNLCK); \
6158     TRANSTBL_CONVERT(F_EXLCK); \
6159     TRANSTBL_CONVERT(F_SHLCK); \
6160     }
6161 
6162 static int target_to_host_flock(int type)
6163 {
6164 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6165     FLOCK_TRANSTBL
6166 #undef  TRANSTBL_CONVERT
6167     return -TARGET_EINVAL;
6168 }
6169 
6170 static int host_to_target_flock(int type)
6171 {
6172 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6173     FLOCK_TRANSTBL
6174 #undef  TRANSTBL_CONVERT
6175     /* if we don't know how to convert the value coming
6176      * from the host we copy to the target field as-is
6177      */
6178     return type;
6179 }
6180 
6181 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6182                                             abi_ulong target_flock_addr)
6183 {
6184     struct target_flock *target_fl;
6185     int l_type;
6186 
6187     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6188         return -TARGET_EFAULT;
6189     }
6190 
6191     __get_user(l_type, &target_fl->l_type);
6192     l_type = target_to_host_flock(l_type);
6193     if (l_type < 0) {
6194         return l_type;
6195     }
6196     fl->l_type = l_type;
6197     __get_user(fl->l_whence, &target_fl->l_whence);
6198     __get_user(fl->l_start, &target_fl->l_start);
6199     __get_user(fl->l_len, &target_fl->l_len);
6200     __get_user(fl->l_pid, &target_fl->l_pid);
6201     unlock_user_struct(target_fl, target_flock_addr, 0);
6202     return 0;
6203 }
6204 
6205 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6206                                           const struct flock64 *fl)
6207 {
6208     struct target_flock *target_fl;
6209     short l_type;
6210 
6211     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6212         return -TARGET_EFAULT;
6213     }
6214 
6215     l_type = host_to_target_flock(fl->l_type);
6216     __put_user(l_type, &target_fl->l_type);
6217     __put_user(fl->l_whence, &target_fl->l_whence);
6218     __put_user(fl->l_start, &target_fl->l_start);
6219     __put_user(fl->l_len, &target_fl->l_len);
6220     __put_user(fl->l_pid, &target_fl->l_pid);
6221     unlock_user_struct(target_fl, target_flock_addr, 1);
6222     return 0;
6223 }
6224 
6225 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6226 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6227 
6228 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6229 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6230                                                    abi_ulong target_flock_addr)
6231 {
6232     struct target_oabi_flock64 *target_fl;
6233     int l_type;
6234 
6235     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6236         return -TARGET_EFAULT;
6237     }
6238 
6239     __get_user(l_type, &target_fl->l_type);
6240     l_type = target_to_host_flock(l_type);
6241     if (l_type < 0) {
6242         return l_type;
6243     }
6244     fl->l_type = l_type;
6245     __get_user(fl->l_whence, &target_fl->l_whence);
6246     __get_user(fl->l_start, &target_fl->l_start);
6247     __get_user(fl->l_len, &target_fl->l_len);
6248     __get_user(fl->l_pid, &target_fl->l_pid);
6249     unlock_user_struct(target_fl, target_flock_addr, 0);
6250     return 0;
6251 }
6252 
6253 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6254                                                  const struct flock64 *fl)
6255 {
6256     struct target_oabi_flock64 *target_fl;
6257     short l_type;
6258 
6259     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6260         return -TARGET_EFAULT;
6261     }
6262 
6263     l_type = host_to_target_flock(fl->l_type);
6264     __put_user(l_type, &target_fl->l_type);
6265     __put_user(fl->l_whence, &target_fl->l_whence);
6266     __put_user(fl->l_start, &target_fl->l_start);
6267     __put_user(fl->l_len, &target_fl->l_len);
6268     __put_user(fl->l_pid, &target_fl->l_pid);
6269     unlock_user_struct(target_fl, target_flock_addr, 1);
6270     return 0;
6271 }
6272 #endif
6273 
6274 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6275                                               abi_ulong target_flock_addr)
6276 {
6277     struct target_flock64 *target_fl;
6278     int l_type;
6279 
6280     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6281         return -TARGET_EFAULT;
6282     }
6283 
6284     __get_user(l_type, &target_fl->l_type);
6285     l_type = target_to_host_flock(l_type);
6286     if (l_type < 0) {
6287         return l_type;
6288     }
6289     fl->l_type = l_type;
6290     __get_user(fl->l_whence, &target_fl->l_whence);
6291     __get_user(fl->l_start, &target_fl->l_start);
6292     __get_user(fl->l_len, &target_fl->l_len);
6293     __get_user(fl->l_pid, &target_fl->l_pid);
6294     unlock_user_struct(target_fl, target_flock_addr, 0);
6295     return 0;
6296 }
6297 
6298 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6299                                             const struct flock64 *fl)
6300 {
6301     struct target_flock64 *target_fl;
6302     short l_type;
6303 
6304     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6305         return -TARGET_EFAULT;
6306     }
6307 
6308     l_type = host_to_target_flock(fl->l_type);
6309     __put_user(l_type, &target_fl->l_type);
6310     __put_user(fl->l_whence, &target_fl->l_whence);
6311     __put_user(fl->l_start, &target_fl->l_start);
6312     __put_user(fl->l_len, &target_fl->l_len);
6313     __put_user(fl->l_pid, &target_fl->l_pid);
6314     unlock_user_struct(target_fl, target_flock_addr, 1);
6315     return 0;
6316 }
6317 
6318 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6319 {
6320     struct flock64 fl64;
6321 #ifdef F_GETOWN_EX
6322     struct f_owner_ex fox;
6323     struct target_f_owner_ex *target_fox;
6324 #endif
6325     abi_long ret;
6326     int host_cmd = target_to_host_fcntl_cmd(cmd);
6327 
6328     if (host_cmd == -TARGET_EINVAL)
6329 	    return host_cmd;
6330 
6331     switch(cmd) {
6332     case TARGET_F_GETLK:
6333         ret = copy_from_user_flock(&fl64, arg);
6334         if (ret) {
6335             return ret;
6336         }
6337         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6338         if (ret == 0) {
6339             ret = copy_to_user_flock(arg, &fl64);
6340         }
6341         break;
6342 
6343     case TARGET_F_SETLK:
6344     case TARGET_F_SETLKW:
6345         ret = copy_from_user_flock(&fl64, arg);
6346         if (ret) {
6347             return ret;
6348         }
6349         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6350         break;
6351 
6352     case TARGET_F_GETLK64:
6353         ret = copy_from_user_flock64(&fl64, arg);
6354         if (ret) {
6355             return ret;
6356         }
6357         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6358         if (ret == 0) {
6359             ret = copy_to_user_flock64(arg, &fl64);
6360         }
6361         break;
6362     case TARGET_F_SETLK64:
6363     case TARGET_F_SETLKW64:
6364         ret = copy_from_user_flock64(&fl64, arg);
6365         if (ret) {
6366             return ret;
6367         }
6368         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6369         break;
6370 
6371     case TARGET_F_GETFL:
6372         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6373         if (ret >= 0) {
6374             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6375         }
6376         break;
6377 
6378     case TARGET_F_SETFL:
6379         ret = get_errno(safe_fcntl(fd, host_cmd,
6380                                    target_to_host_bitmask(arg,
6381                                                           fcntl_flags_tbl)));
6382         break;
6383 
6384 #ifdef F_GETOWN_EX
6385     case TARGET_F_GETOWN_EX:
6386         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6387         if (ret >= 0) {
6388             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6389                 return -TARGET_EFAULT;
6390             target_fox->type = tswap32(fox.type);
6391             target_fox->pid = tswap32(fox.pid);
6392             unlock_user_struct(target_fox, arg, 1);
6393         }
6394         break;
6395 #endif
6396 
6397 #ifdef F_SETOWN_EX
6398     case TARGET_F_SETOWN_EX:
6399         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6400             return -TARGET_EFAULT;
6401         fox.type = tswap32(target_fox->type);
6402         fox.pid = tswap32(target_fox->pid);
6403         unlock_user_struct(target_fox, arg, 0);
6404         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6405         break;
6406 #endif
6407 
6408     case TARGET_F_SETOWN:
6409     case TARGET_F_GETOWN:
6410     case TARGET_F_SETSIG:
6411     case TARGET_F_GETSIG:
6412     case TARGET_F_SETLEASE:
6413     case TARGET_F_GETLEASE:
6414     case TARGET_F_SETPIPE_SZ:
6415     case TARGET_F_GETPIPE_SZ:
6416         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6417         break;
6418 
6419     default:
6420         ret = get_errno(safe_fcntl(fd, cmd, arg));
6421         break;
6422     }
6423     return ret;
6424 }
6425 
6426 #ifdef USE_UID16
6427 
6428 static inline int high2lowuid(int uid)
6429 {
6430     if (uid > 65535)
6431         return 65534;
6432     else
6433         return uid;
6434 }
6435 
6436 static inline int high2lowgid(int gid)
6437 {
6438     if (gid > 65535)
6439         return 65534;
6440     else
6441         return gid;
6442 }
6443 
6444 static inline int low2highuid(int uid)
6445 {
6446     if ((int16_t)uid == -1)
6447         return -1;
6448     else
6449         return uid;
6450 }
6451 
6452 static inline int low2highgid(int gid)
6453 {
6454     if ((int16_t)gid == -1)
6455         return -1;
6456     else
6457         return gid;
6458 }
6459 static inline int tswapid(int id)
6460 {
6461     return tswap16(id);
6462 }
6463 
6464 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6465 
6466 #else /* !USE_UID16 */
6467 static inline int high2lowuid(int uid)
6468 {
6469     return uid;
6470 }
6471 static inline int high2lowgid(int gid)
6472 {
6473     return gid;
6474 }
6475 static inline int low2highuid(int uid)
6476 {
6477     return uid;
6478 }
6479 static inline int low2highgid(int gid)
6480 {
6481     return gid;
6482 }
6483 static inline int tswapid(int id)
6484 {
6485     return tswap32(id);
6486 }
6487 
6488 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6489 
6490 #endif /* USE_UID16 */
6491 
6492 /* We must do direct syscalls for setting UID/GID, because we want to
6493  * implement the Linux system call semantics of "change only for this thread",
6494  * not the libc/POSIX semantics of "change for all threads in process".
6495  * (See http://ewontfix.com/17/ for more details.)
6496  * We use the 32-bit version of the syscalls if present; if it is not
6497  * then either the host architecture supports 32-bit UIDs natively with
6498  * the standard syscall, or the 16-bit UID is the best we can do.
6499  */
6500 #ifdef __NR_setuid32
6501 #define __NR_sys_setuid __NR_setuid32
6502 #else
6503 #define __NR_sys_setuid __NR_setuid
6504 #endif
6505 #ifdef __NR_setgid32
6506 #define __NR_sys_setgid __NR_setgid32
6507 #else
6508 #define __NR_sys_setgid __NR_setgid
6509 #endif
6510 #ifdef __NR_setresuid32
6511 #define __NR_sys_setresuid __NR_setresuid32
6512 #else
6513 #define __NR_sys_setresuid __NR_setresuid
6514 #endif
6515 #ifdef __NR_setresgid32
6516 #define __NR_sys_setresgid __NR_setresgid32
6517 #else
6518 #define __NR_sys_setresgid __NR_setresgid
6519 #endif
6520 
6521 _syscall1(int, sys_setuid, uid_t, uid)
6522 _syscall1(int, sys_setgid, gid_t, gid)
6523 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6524 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6525 
6526 void syscall_init(void)
6527 {
6528     IOCTLEntry *ie;
6529     const argtype *arg_type;
6530     int size;
6531     int i;
6532 
6533     thunk_init(STRUCT_MAX);
6534 
6535 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6536 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6537 #include "syscall_types.h"
6538 #undef STRUCT
6539 #undef STRUCT_SPECIAL
6540 
6541     /* Build target_to_host_errno_table[] table from
6542      * host_to_target_errno_table[]. */
6543     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6544         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6545     }
6546 
6547     /* we patch the ioctl size if necessary. We rely on the fact that
6548        no ioctl has all the bits at '1' in the size field */
6549     ie = ioctl_entries;
6550     while (ie->target_cmd != 0) {
6551         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6552             TARGET_IOC_SIZEMASK) {
6553             arg_type = ie->arg_type;
6554             if (arg_type[0] != TYPE_PTR) {
6555                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6556                         ie->target_cmd);
6557                 exit(1);
6558             }
6559             arg_type++;
6560             size = thunk_type_size(arg_type, 0);
6561             ie->target_cmd = (ie->target_cmd &
6562                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6563                 (size << TARGET_IOC_SIZESHIFT);
6564         }
6565 
6566         /* automatic consistency check if same arch */
6567 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6568     (defined(__x86_64__) && defined(TARGET_X86_64))
6569         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6570             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6571                     ie->name, ie->target_cmd, ie->host_cmd);
6572         }
6573 #endif
6574         ie++;
6575     }
6576 }
6577 
6578 #if TARGET_ABI_BITS == 32
6579 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6580 {
6581 #ifdef TARGET_WORDS_BIGENDIAN
6582     return ((uint64_t)word0 << 32) | word1;
6583 #else
6584     return ((uint64_t)word1 << 32) | word0;
6585 #endif
6586 }
6587 #else /* TARGET_ABI_BITS == 32 */
6588 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6589 {
6590     return word0;
6591 }
6592 #endif /* TARGET_ABI_BITS != 32 */
6593 
6594 #ifdef TARGET_NR_truncate64
6595 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6596                                          abi_long arg2,
6597                                          abi_long arg3,
6598                                          abi_long arg4)
6599 {
6600     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6601         arg2 = arg3;
6602         arg3 = arg4;
6603     }
6604     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6605 }
6606 #endif
6607 
6608 #ifdef TARGET_NR_ftruncate64
6609 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6610                                           abi_long arg2,
6611                                           abi_long arg3,
6612                                           abi_long arg4)
6613 {
6614     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6615         arg2 = arg3;
6616         arg3 = arg4;
6617     }
6618     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6619 }
6620 #endif
6621 
6622 #if defined(TARGET_NR_timer_settime) || \
6623     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6624 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6625                                                  abi_ulong target_addr)
6626 {
6627     struct target_itimerspec *target_itspec;
6628 
6629     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6630         return -TARGET_EFAULT;
6631     }
6632 
6633     host_itspec->it_interval.tv_sec =
6634                             tswapal(target_itspec->it_interval.tv_sec);
6635     host_itspec->it_interval.tv_nsec =
6636                             tswapal(target_itspec->it_interval.tv_nsec);
6637     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6638     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6639 
6640     unlock_user_struct(target_itspec, target_addr, 1);
6641     return 0;
6642 }
6643 #endif
6644 
6645 #if ((defined(TARGET_NR_timerfd_gettime) || \
6646       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6647     defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6648 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6649                                                struct itimerspec *host_its)
6650 {
6651     struct target_itimerspec *target_itspec;
6652 
6653     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6654         return -TARGET_EFAULT;
6655     }
6656 
6657     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6658     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6659 
6660     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6661     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6662 
6663     unlock_user_struct(target_itspec, target_addr, 0);
6664     return 0;
6665 }
6666 #endif
6667 
6668 #if defined(TARGET_NR_adjtimex) || \
6669     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6670 static inline abi_long target_to_host_timex(struct timex *host_tx,
6671                                             abi_long target_addr)
6672 {
6673     struct target_timex *target_tx;
6674 
6675     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6676         return -TARGET_EFAULT;
6677     }
6678 
6679     __get_user(host_tx->modes, &target_tx->modes);
6680     __get_user(host_tx->offset, &target_tx->offset);
6681     __get_user(host_tx->freq, &target_tx->freq);
6682     __get_user(host_tx->maxerror, &target_tx->maxerror);
6683     __get_user(host_tx->esterror, &target_tx->esterror);
6684     __get_user(host_tx->status, &target_tx->status);
6685     __get_user(host_tx->constant, &target_tx->constant);
6686     __get_user(host_tx->precision, &target_tx->precision);
6687     __get_user(host_tx->tolerance, &target_tx->tolerance);
6688     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6689     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6690     __get_user(host_tx->tick, &target_tx->tick);
6691     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6692     __get_user(host_tx->jitter, &target_tx->jitter);
6693     __get_user(host_tx->shift, &target_tx->shift);
6694     __get_user(host_tx->stabil, &target_tx->stabil);
6695     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6696     __get_user(host_tx->calcnt, &target_tx->calcnt);
6697     __get_user(host_tx->errcnt, &target_tx->errcnt);
6698     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6699     __get_user(host_tx->tai, &target_tx->tai);
6700 
6701     unlock_user_struct(target_tx, target_addr, 0);
6702     return 0;
6703 }
6704 
6705 static inline abi_long host_to_target_timex(abi_long target_addr,
6706                                             struct timex *host_tx)
6707 {
6708     struct target_timex *target_tx;
6709 
6710     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6711         return -TARGET_EFAULT;
6712     }
6713 
6714     __put_user(host_tx->modes, &target_tx->modes);
6715     __put_user(host_tx->offset, &target_tx->offset);
6716     __put_user(host_tx->freq, &target_tx->freq);
6717     __put_user(host_tx->maxerror, &target_tx->maxerror);
6718     __put_user(host_tx->esterror, &target_tx->esterror);
6719     __put_user(host_tx->status, &target_tx->status);
6720     __put_user(host_tx->constant, &target_tx->constant);
6721     __put_user(host_tx->precision, &target_tx->precision);
6722     __put_user(host_tx->tolerance, &target_tx->tolerance);
6723     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6724     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6725     __put_user(host_tx->tick, &target_tx->tick);
6726     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6727     __put_user(host_tx->jitter, &target_tx->jitter);
6728     __put_user(host_tx->shift, &target_tx->shift);
6729     __put_user(host_tx->stabil, &target_tx->stabil);
6730     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6731     __put_user(host_tx->calcnt, &target_tx->calcnt);
6732     __put_user(host_tx->errcnt, &target_tx->errcnt);
6733     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6734     __put_user(host_tx->tai, &target_tx->tai);
6735 
6736     unlock_user_struct(target_tx, target_addr, 1);
6737     return 0;
6738 }
6739 #endif
6740 
6741 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6742                                                abi_ulong target_addr)
6743 {
6744     struct target_sigevent *target_sevp;
6745 
6746     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6747         return -TARGET_EFAULT;
6748     }
6749 
6750     /* This union is awkward on 64 bit systems because it has a 32 bit
6751      * integer and a pointer in it; we follow the conversion approach
6752      * used for handling sigval types in signal.c so the guest should get
6753      * the correct value back even if we did a 64 bit byteswap and it's
6754      * using the 32 bit integer.
6755      */
6756     host_sevp->sigev_value.sival_ptr =
6757         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6758     host_sevp->sigev_signo =
6759         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6760     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6761     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6762 
6763     unlock_user_struct(target_sevp, target_addr, 1);
6764     return 0;
6765 }
6766 
6767 #if defined(TARGET_NR_mlockall)
6768 static inline int target_to_host_mlockall_arg(int arg)
6769 {
6770     int result = 0;
6771 
6772     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6773         result |= MCL_CURRENT;
6774     }
6775     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6776         result |= MCL_FUTURE;
6777     }
6778     return result;
6779 }
6780 #endif
6781 
6782 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6783      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6784      defined(TARGET_NR_newfstatat))
6785 static inline abi_long host_to_target_stat64(void *cpu_env,
6786                                              abi_ulong target_addr,
6787                                              struct stat *host_st)
6788 {
6789 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6790     if (((CPUARMState *)cpu_env)->eabi) {
6791         struct target_eabi_stat64 *target_st;
6792 
6793         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6794             return -TARGET_EFAULT;
6795         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6796         __put_user(host_st->st_dev, &target_st->st_dev);
6797         __put_user(host_st->st_ino, &target_st->st_ino);
6798 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6799         __put_user(host_st->st_ino, &target_st->__st_ino);
6800 #endif
6801         __put_user(host_st->st_mode, &target_st->st_mode);
6802         __put_user(host_st->st_nlink, &target_st->st_nlink);
6803         __put_user(host_st->st_uid, &target_st->st_uid);
6804         __put_user(host_st->st_gid, &target_st->st_gid);
6805         __put_user(host_st->st_rdev, &target_st->st_rdev);
6806         __put_user(host_st->st_size, &target_st->st_size);
6807         __put_user(host_st->st_blksize, &target_st->st_blksize);
6808         __put_user(host_st->st_blocks, &target_st->st_blocks);
6809         __put_user(host_st->st_atime, &target_st->target_st_atime);
6810         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6811         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6812 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6813         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6814         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6815         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6816 #endif
6817         unlock_user_struct(target_st, target_addr, 1);
6818     } else
6819 #endif
6820     {
6821 #if defined(TARGET_HAS_STRUCT_STAT64)
6822         struct target_stat64 *target_st;
6823 #else
6824         struct target_stat *target_st;
6825 #endif
6826 
6827         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6828             return -TARGET_EFAULT;
6829         memset(target_st, 0, sizeof(*target_st));
6830         __put_user(host_st->st_dev, &target_st->st_dev);
6831         __put_user(host_st->st_ino, &target_st->st_ino);
6832 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6833         __put_user(host_st->st_ino, &target_st->__st_ino);
6834 #endif
6835         __put_user(host_st->st_mode, &target_st->st_mode);
6836         __put_user(host_st->st_nlink, &target_st->st_nlink);
6837         __put_user(host_st->st_uid, &target_st->st_uid);
6838         __put_user(host_st->st_gid, &target_st->st_gid);
6839         __put_user(host_st->st_rdev, &target_st->st_rdev);
6840         /* XXX: better use of kernel struct */
6841         __put_user(host_st->st_size, &target_st->st_size);
6842         __put_user(host_st->st_blksize, &target_st->st_blksize);
6843         __put_user(host_st->st_blocks, &target_st->st_blocks);
6844         __put_user(host_st->st_atime, &target_st->target_st_atime);
6845         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6846         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6847 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6848         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6849         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6850         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6851 #endif
6852         unlock_user_struct(target_st, target_addr, 1);
6853     }
6854 
6855     return 0;
6856 }
6857 #endif
6858 
6859 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6860 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6861                                             abi_ulong target_addr)
6862 {
6863     struct target_statx *target_stx;
6864 
6865     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6866         return -TARGET_EFAULT;
6867     }
6868     memset(target_stx, 0, sizeof(*target_stx));
6869 
6870     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6871     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6872     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6873     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6874     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6875     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6876     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6877     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6878     __put_user(host_stx->stx_size, &target_stx->stx_size);
6879     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6880     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6881     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6882     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6883     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
6884     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
6885     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
6886     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
6887     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
6888     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
6889     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6890     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6891     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6892     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6893 
6894     unlock_user_struct(target_stx, target_addr, 1);
6895 
6896     return 0;
6897 }
6898 #endif
6899 
6900 
6901 /* ??? Using host futex calls even when target atomic operations
6902    are not really atomic probably breaks things.  However implementing
6903    futexes locally would make futexes shared between multiple processes
6904    tricky.  However they're probably useless because guest atomic
6905    operations won't work either.  */
6906 #if defined(TARGET_NR_futex)
6907 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6908                     target_ulong uaddr2, int val3)
6909 {
6910     struct timespec ts, *pts;
6911     int base_op;
6912 
6913     /* ??? We assume FUTEX_* constants are the same on both host
6914        and target.  */
6915 #ifdef FUTEX_CMD_MASK
6916     base_op = op & FUTEX_CMD_MASK;
6917 #else
6918     base_op = op;
6919 #endif
6920     switch (base_op) {
6921     case FUTEX_WAIT:
6922     case FUTEX_WAIT_BITSET:
6923         if (timeout) {
6924             pts = &ts;
6925             target_to_host_timespec(pts, timeout);
6926         } else {
6927             pts = NULL;
6928         }
6929         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6930                          pts, NULL, val3));
6931     case FUTEX_WAKE:
6932         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6933     case FUTEX_FD:
6934         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6935     case FUTEX_REQUEUE:
6936     case FUTEX_CMP_REQUEUE:
6937     case FUTEX_WAKE_OP:
6938         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6939            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6940            But the prototype takes a `struct timespec *'; insert casts
6941            to satisfy the compiler.  We do not need to tswap TIMEOUT
6942            since it's not compared to guest memory.  */
6943         pts = (struct timespec *)(uintptr_t) timeout;
6944         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6945                                     g2h(uaddr2),
6946                                     (base_op == FUTEX_CMP_REQUEUE
6947                                      ? tswap32(val3)
6948                                      : val3)));
6949     default:
6950         return -TARGET_ENOSYS;
6951     }
6952 }
6953 #endif
6954 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6955 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6956                                      abi_long handle, abi_long mount_id,
6957                                      abi_long flags)
6958 {
6959     struct file_handle *target_fh;
6960     struct file_handle *fh;
6961     int mid = 0;
6962     abi_long ret;
6963     char *name;
6964     unsigned int size, total_size;
6965 
6966     if (get_user_s32(size, handle)) {
6967         return -TARGET_EFAULT;
6968     }
6969 
6970     name = lock_user_string(pathname);
6971     if (!name) {
6972         return -TARGET_EFAULT;
6973     }
6974 
6975     total_size = sizeof(struct file_handle) + size;
6976     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6977     if (!target_fh) {
6978         unlock_user(name, pathname, 0);
6979         return -TARGET_EFAULT;
6980     }
6981 
6982     fh = g_malloc0(total_size);
6983     fh->handle_bytes = size;
6984 
6985     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6986     unlock_user(name, pathname, 0);
6987 
6988     /* man name_to_handle_at(2):
6989      * Other than the use of the handle_bytes field, the caller should treat
6990      * the file_handle structure as an opaque data type
6991      */
6992 
6993     memcpy(target_fh, fh, total_size);
6994     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6995     target_fh->handle_type = tswap32(fh->handle_type);
6996     g_free(fh);
6997     unlock_user(target_fh, handle, total_size);
6998 
6999     if (put_user_s32(mid, mount_id)) {
7000         return -TARGET_EFAULT;
7001     }
7002 
7003     return ret;
7004 
7005 }
7006 #endif
7007 
7008 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7009 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7010                                      abi_long flags)
7011 {
7012     struct file_handle *target_fh;
7013     struct file_handle *fh;
7014     unsigned int size, total_size;
7015     abi_long ret;
7016 
7017     if (get_user_s32(size, handle)) {
7018         return -TARGET_EFAULT;
7019     }
7020 
7021     total_size = sizeof(struct file_handle) + size;
7022     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7023     if (!target_fh) {
7024         return -TARGET_EFAULT;
7025     }
7026 
7027     fh = g_memdup(target_fh, total_size);
7028     fh->handle_bytes = size;
7029     fh->handle_type = tswap32(target_fh->handle_type);
7030 
7031     ret = get_errno(open_by_handle_at(mount_fd, fh,
7032                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7033 
7034     g_free(fh);
7035 
7036     unlock_user(target_fh, handle, total_size);
7037 
7038     return ret;
7039 }
7040 #endif
7041 
7042 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7043 
7044 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7045 {
7046     int host_flags;
7047     target_sigset_t *target_mask;
7048     sigset_t host_mask;
7049     abi_long ret;
7050 
7051     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7052         return -TARGET_EINVAL;
7053     }
7054     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7055         return -TARGET_EFAULT;
7056     }
7057 
7058     target_to_host_sigset(&host_mask, target_mask);
7059 
7060     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7061 
7062     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7063     if (ret >= 0) {
7064         fd_trans_register(ret, &target_signalfd_trans);
7065     }
7066 
7067     unlock_user_struct(target_mask, mask, 0);
7068 
7069     return ret;
7070 }
7071 #endif
7072 
7073 /* Map host to target signal numbers for the wait family of syscalls.
7074    Assume all other status bits are the same.  */
7075 int host_to_target_waitstatus(int status)
7076 {
7077     if (WIFSIGNALED(status)) {
7078         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7079     }
7080     if (WIFSTOPPED(status)) {
7081         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7082                | (status & 0xff);
7083     }
7084     return status;
7085 }
7086 
7087 static int open_self_cmdline(void *cpu_env, int fd)
7088 {
7089     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7090     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7091     int i;
7092 
7093     for (i = 0; i < bprm->argc; i++) {
7094         size_t len = strlen(bprm->argv[i]) + 1;
7095 
7096         if (write(fd, bprm->argv[i], len) != len) {
7097             return -1;
7098         }
7099     }
7100 
7101     return 0;
7102 }
7103 
7104 static int open_self_maps(void *cpu_env, int fd)
7105 {
7106     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7107     TaskState *ts = cpu->opaque;
7108     FILE *fp;
7109     char *line = NULL;
7110     size_t len = 0;
7111     ssize_t read;
7112 
7113     fp = fopen("/proc/self/maps", "r");
7114     if (fp == NULL) {
7115         return -1;
7116     }
7117 
7118     while ((read = getline(&line, &len, fp)) != -1) {
7119         int fields, dev_maj, dev_min, inode;
7120         uint64_t min, max, offset;
7121         char flag_r, flag_w, flag_x, flag_p;
7122         char path[512] = "";
7123         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7124                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7125                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7126 
7127         if ((fields < 10) || (fields > 11)) {
7128             continue;
7129         }
7130         if (h2g_valid(min)) {
7131             int flags = page_get_flags(h2g(min));
7132             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7133             if (page_check_range(h2g(min), max - min, flags) == -1) {
7134                 continue;
7135             }
7136             if (h2g(min) == ts->info->stack_limit) {
7137                 pstrcpy(path, sizeof(path), "      [stack]");
7138             }
7139             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7140                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7141                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7142                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7143                     path[0] ? "         " : "", path);
7144         }
7145     }
7146 
7147     free(line);
7148     fclose(fp);
7149 
7150     return 0;
7151 }
7152 
7153 static int open_self_stat(void *cpu_env, int fd)
7154 {
7155     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7156     TaskState *ts = cpu->opaque;
7157     abi_ulong start_stack = ts->info->start_stack;
7158     int i;
7159 
7160     for (i = 0; i < 44; i++) {
7161       char buf[128];
7162       int len;
7163       uint64_t val = 0;
7164 
7165       if (i == 0) {
7166         /* pid */
7167         val = getpid();
7168         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7169       } else if (i == 1) {
7170         /* app name */
7171         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7172       } else if (i == 27) {
7173         /* stack bottom */
7174         val = start_stack;
7175         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7176       } else {
7177         /* for the rest, there is MasterCard */
7178         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7179       }
7180 
7181       len = strlen(buf);
7182       if (write(fd, buf, len) != len) {
7183           return -1;
7184       }
7185     }
7186 
7187     return 0;
7188 }
7189 
7190 static int open_self_auxv(void *cpu_env, int fd)
7191 {
7192     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7193     TaskState *ts = cpu->opaque;
7194     abi_ulong auxv = ts->info->saved_auxv;
7195     abi_ulong len = ts->info->auxv_len;
7196     char *ptr;
7197 
7198     /*
7199      * Auxiliary vector is stored in target process stack.
7200      * read in whole auxv vector and copy it to file
7201      */
7202     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7203     if (ptr != NULL) {
7204         while (len > 0) {
7205             ssize_t r;
7206             r = write(fd, ptr, len);
7207             if (r <= 0) {
7208                 break;
7209             }
7210             len -= r;
7211             ptr += r;
7212         }
7213         lseek(fd, 0, SEEK_SET);
7214         unlock_user(ptr, auxv, len);
7215     }
7216 
7217     return 0;
7218 }
7219 
7220 static int is_proc_myself(const char *filename, const char *entry)
7221 {
7222     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7223         filename += strlen("/proc/");
7224         if (!strncmp(filename, "self/", strlen("self/"))) {
7225             filename += strlen("self/");
7226         } else if (*filename >= '1' && *filename <= '9') {
7227             char myself[80];
7228             snprintf(myself, sizeof(myself), "%d/", getpid());
7229             if (!strncmp(filename, myself, strlen(myself))) {
7230                 filename += strlen(myself);
7231             } else {
7232                 return 0;
7233             }
7234         } else {
7235             return 0;
7236         }
7237         if (!strcmp(filename, entry)) {
7238             return 1;
7239         }
7240     }
7241     return 0;
7242 }
7243 
7244 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7245     defined(TARGET_SPARC) || defined(TARGET_M68K)
7246 static int is_proc(const char *filename, const char *entry)
7247 {
7248     return strcmp(filename, entry) == 0;
7249 }
7250 #endif
7251 
7252 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7253 static int open_net_route(void *cpu_env, int fd)
7254 {
7255     FILE *fp;
7256     char *line = NULL;
7257     size_t len = 0;
7258     ssize_t read;
7259 
7260     fp = fopen("/proc/net/route", "r");
7261     if (fp == NULL) {
7262         return -1;
7263     }
7264 
7265     /* read header */
7266 
7267     read = getline(&line, &len, fp);
7268     dprintf(fd, "%s", line);
7269 
7270     /* read routes */
7271 
7272     while ((read = getline(&line, &len, fp)) != -1) {
7273         char iface[16];
7274         uint32_t dest, gw, mask;
7275         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7276         int fields;
7277 
7278         fields = sscanf(line,
7279                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7280                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7281                         &mask, &mtu, &window, &irtt);
7282         if (fields != 11) {
7283             continue;
7284         }
7285         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7286                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7287                 metric, tswap32(mask), mtu, window, irtt);
7288     }
7289 
7290     free(line);
7291     fclose(fp);
7292 
7293     return 0;
7294 }
7295 #endif
7296 
7297 #if defined(TARGET_SPARC)
7298 static int open_cpuinfo(void *cpu_env, int fd)
7299 {
7300     dprintf(fd, "type\t\t: sun4u\n");
7301     return 0;
7302 }
7303 #endif
7304 
7305 #if defined(TARGET_M68K)
7306 static int open_hardware(void *cpu_env, int fd)
7307 {
7308     dprintf(fd, "Model:\t\tqemu-m68k\n");
7309     return 0;
7310 }
7311 #endif
7312 
7313 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7314 {
7315     struct fake_open {
7316         const char *filename;
7317         int (*fill)(void *cpu_env, int fd);
7318         int (*cmp)(const char *s1, const char *s2);
7319     };
7320     const struct fake_open *fake_open;
7321     static const struct fake_open fakes[] = {
7322         { "maps", open_self_maps, is_proc_myself },
7323         { "stat", open_self_stat, is_proc_myself },
7324         { "auxv", open_self_auxv, is_proc_myself },
7325         { "cmdline", open_self_cmdline, is_proc_myself },
7326 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7327         { "/proc/net/route", open_net_route, is_proc },
7328 #endif
7329 #if defined(TARGET_SPARC)
7330         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7331 #endif
7332 #if defined(TARGET_M68K)
7333         { "/proc/hardware", open_hardware, is_proc },
7334 #endif
7335         { NULL, NULL, NULL }
7336     };
7337 
7338     if (is_proc_myself(pathname, "exe")) {
7339         int execfd = qemu_getauxval(AT_EXECFD);
7340         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7341     }
7342 
7343     for (fake_open = fakes; fake_open->filename; fake_open++) {
7344         if (fake_open->cmp(pathname, fake_open->filename)) {
7345             break;
7346         }
7347     }
7348 
7349     if (fake_open->filename) {
7350         const char *tmpdir;
7351         char filename[PATH_MAX];
7352         int fd, r;
7353 
7354         /* create temporary file to map stat to */
7355         tmpdir = getenv("TMPDIR");
7356         if (!tmpdir)
7357             tmpdir = "/tmp";
7358         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7359         fd = mkstemp(filename);
7360         if (fd < 0) {
7361             return fd;
7362         }
7363         unlink(filename);
7364 
7365         if ((r = fake_open->fill(cpu_env, fd))) {
7366             int e = errno;
7367             close(fd);
7368             errno = e;
7369             return r;
7370         }
7371         lseek(fd, 0, SEEK_SET);
7372 
7373         return fd;
7374     }
7375 
7376     return safe_openat(dirfd, path(pathname), flags, mode);
7377 }
7378 
7379 #define TIMER_MAGIC 0x0caf0000
7380 #define TIMER_MAGIC_MASK 0xffff0000
7381 
7382 /* Convert QEMU provided timer ID back to internal 16bit index format */
7383 static target_timer_t get_timer_id(abi_long arg)
7384 {
7385     target_timer_t timerid = arg;
7386 
7387     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7388         return -TARGET_EINVAL;
7389     }
7390 
7391     timerid &= 0xffff;
7392 
7393     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7394         return -TARGET_EINVAL;
7395     }
7396 
7397     return timerid;
7398 }
7399 
7400 static int target_to_host_cpu_mask(unsigned long *host_mask,
7401                                    size_t host_size,
7402                                    abi_ulong target_addr,
7403                                    size_t target_size)
7404 {
7405     unsigned target_bits = sizeof(abi_ulong) * 8;
7406     unsigned host_bits = sizeof(*host_mask) * 8;
7407     abi_ulong *target_mask;
7408     unsigned i, j;
7409 
7410     assert(host_size >= target_size);
7411 
7412     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7413     if (!target_mask) {
7414         return -TARGET_EFAULT;
7415     }
7416     memset(host_mask, 0, host_size);
7417 
7418     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7419         unsigned bit = i * target_bits;
7420         abi_ulong val;
7421 
7422         __get_user(val, &target_mask[i]);
7423         for (j = 0; j < target_bits; j++, bit++) {
7424             if (val & (1UL << j)) {
7425                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7426             }
7427         }
7428     }
7429 
7430     unlock_user(target_mask, target_addr, 0);
7431     return 0;
7432 }
7433 
7434 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7435                                    size_t host_size,
7436                                    abi_ulong target_addr,
7437                                    size_t target_size)
7438 {
7439     unsigned target_bits = sizeof(abi_ulong) * 8;
7440     unsigned host_bits = sizeof(*host_mask) * 8;
7441     abi_ulong *target_mask;
7442     unsigned i, j;
7443 
7444     assert(host_size >= target_size);
7445 
7446     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7447     if (!target_mask) {
7448         return -TARGET_EFAULT;
7449     }
7450 
7451     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7452         unsigned bit = i * target_bits;
7453         abi_ulong val = 0;
7454 
7455         for (j = 0; j < target_bits; j++, bit++) {
7456             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7457                 val |= 1UL << j;
7458             }
7459         }
7460         __put_user(val, &target_mask[i]);
7461     }
7462 
7463     unlock_user(target_mask, target_addr, target_size);
7464     return 0;
7465 }
7466 
7467 /* This is an internal helper for do_syscall so that it is easier
7468  * to have a single return point, so that actions, such as logging
7469  * of syscall results, can be performed.
7470  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7471  */
7472 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7473                             abi_long arg2, abi_long arg3, abi_long arg4,
7474                             abi_long arg5, abi_long arg6, abi_long arg7,
7475                             abi_long arg8)
7476 {
7477     CPUState *cpu = env_cpu(cpu_env);
7478     abi_long ret;
7479 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7480     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7481     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7482     || defined(TARGET_NR_statx)
7483     struct stat st;
7484 #endif
7485 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7486     || defined(TARGET_NR_fstatfs)
7487     struct statfs stfs;
7488 #endif
7489     void *p;
7490 
7491     switch(num) {
7492     case TARGET_NR_exit:
7493         /* In old applications this may be used to implement _exit(2).
7494            However in threaded applictions it is used for thread termination,
7495            and _exit_group is used for application termination.
7496            Do thread termination if we have more then one thread.  */
7497 
7498         if (block_signals()) {
7499             return -TARGET_ERESTARTSYS;
7500         }
7501 
7502         cpu_list_lock();
7503 
7504         if (CPU_NEXT(first_cpu)) {
7505             TaskState *ts;
7506 
7507             /* Remove the CPU from the list.  */
7508             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7509 
7510             cpu_list_unlock();
7511 
7512             ts = cpu->opaque;
7513             if (ts->child_tidptr) {
7514                 put_user_u32(0, ts->child_tidptr);
7515                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7516                           NULL, NULL, 0);
7517             }
7518             thread_cpu = NULL;
7519             object_unref(OBJECT(cpu));
7520             g_free(ts);
7521             rcu_unregister_thread();
7522             pthread_exit(NULL);
7523         }
7524 
7525         cpu_list_unlock();
7526         preexit_cleanup(cpu_env, arg1);
7527         _exit(arg1);
7528         return 0; /* avoid warning */
7529     case TARGET_NR_read:
7530         if (arg2 == 0 && arg3 == 0) {
7531             return get_errno(safe_read(arg1, 0, 0));
7532         } else {
7533             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7534                 return -TARGET_EFAULT;
7535             ret = get_errno(safe_read(arg1, p, arg3));
7536             if (ret >= 0 &&
7537                 fd_trans_host_to_target_data(arg1)) {
7538                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7539             }
7540             unlock_user(p, arg2, ret);
7541         }
7542         return ret;
7543     case TARGET_NR_write:
7544         if (arg2 == 0 && arg3 == 0) {
7545             return get_errno(safe_write(arg1, 0, 0));
7546         }
7547         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7548             return -TARGET_EFAULT;
7549         if (fd_trans_target_to_host_data(arg1)) {
7550             void *copy = g_malloc(arg3);
7551             memcpy(copy, p, arg3);
7552             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7553             if (ret >= 0) {
7554                 ret = get_errno(safe_write(arg1, copy, ret));
7555             }
7556             g_free(copy);
7557         } else {
7558             ret = get_errno(safe_write(arg1, p, arg3));
7559         }
7560         unlock_user(p, arg2, 0);
7561         return ret;
7562 
7563 #ifdef TARGET_NR_open
7564     case TARGET_NR_open:
7565         if (!(p = lock_user_string(arg1)))
7566             return -TARGET_EFAULT;
7567         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7568                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7569                                   arg3));
7570         fd_trans_unregister(ret);
7571         unlock_user(p, arg1, 0);
7572         return ret;
7573 #endif
7574     case TARGET_NR_openat:
7575         if (!(p = lock_user_string(arg2)))
7576             return -TARGET_EFAULT;
7577         ret = get_errno(do_openat(cpu_env, arg1, p,
7578                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7579                                   arg4));
7580         fd_trans_unregister(ret);
7581         unlock_user(p, arg2, 0);
7582         return ret;
7583 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7584     case TARGET_NR_name_to_handle_at:
7585         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7586         return ret;
7587 #endif
7588 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7589     case TARGET_NR_open_by_handle_at:
7590         ret = do_open_by_handle_at(arg1, arg2, arg3);
7591         fd_trans_unregister(ret);
7592         return ret;
7593 #endif
7594     case TARGET_NR_close:
7595         fd_trans_unregister(arg1);
7596         return get_errno(close(arg1));
7597 
7598     case TARGET_NR_brk:
7599         return do_brk(arg1);
7600 #ifdef TARGET_NR_fork
7601     case TARGET_NR_fork:
7602         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7603 #endif
7604 #ifdef TARGET_NR_waitpid
7605     case TARGET_NR_waitpid:
7606         {
7607             int status;
7608             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7609             if (!is_error(ret) && arg2 && ret
7610                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7611                 return -TARGET_EFAULT;
7612         }
7613         return ret;
7614 #endif
7615 #ifdef TARGET_NR_waitid
7616     case TARGET_NR_waitid:
7617         {
7618             siginfo_t info;
7619             info.si_pid = 0;
7620             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7621             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7622                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7623                     return -TARGET_EFAULT;
7624                 host_to_target_siginfo(p, &info);
7625                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7626             }
7627         }
7628         return ret;
7629 #endif
7630 #ifdef TARGET_NR_creat /* not on alpha */
7631     case TARGET_NR_creat:
7632         if (!(p = lock_user_string(arg1)))
7633             return -TARGET_EFAULT;
7634         ret = get_errno(creat(p, arg2));
7635         fd_trans_unregister(ret);
7636         unlock_user(p, arg1, 0);
7637         return ret;
7638 #endif
7639 #ifdef TARGET_NR_link
7640     case TARGET_NR_link:
7641         {
7642             void * p2;
7643             p = lock_user_string(arg1);
7644             p2 = lock_user_string(arg2);
7645             if (!p || !p2)
7646                 ret = -TARGET_EFAULT;
7647             else
7648                 ret = get_errno(link(p, p2));
7649             unlock_user(p2, arg2, 0);
7650             unlock_user(p, arg1, 0);
7651         }
7652         return ret;
7653 #endif
7654 #if defined(TARGET_NR_linkat)
7655     case TARGET_NR_linkat:
7656         {
7657             void * p2 = NULL;
7658             if (!arg2 || !arg4)
7659                 return -TARGET_EFAULT;
7660             p  = lock_user_string(arg2);
7661             p2 = lock_user_string(arg4);
7662             if (!p || !p2)
7663                 ret = -TARGET_EFAULT;
7664             else
7665                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7666             unlock_user(p, arg2, 0);
7667             unlock_user(p2, arg4, 0);
7668         }
7669         return ret;
7670 #endif
7671 #ifdef TARGET_NR_unlink
7672     case TARGET_NR_unlink:
7673         if (!(p = lock_user_string(arg1)))
7674             return -TARGET_EFAULT;
7675         ret = get_errno(unlink(p));
7676         unlock_user(p, arg1, 0);
7677         return ret;
7678 #endif
7679 #if defined(TARGET_NR_unlinkat)
7680     case TARGET_NR_unlinkat:
7681         if (!(p = lock_user_string(arg2)))
7682             return -TARGET_EFAULT;
7683         ret = get_errno(unlinkat(arg1, p, arg3));
7684         unlock_user(p, arg2, 0);
7685         return ret;
7686 #endif
7687     case TARGET_NR_execve:
7688         {
7689             char **argp, **envp;
7690             int argc, envc;
7691             abi_ulong gp;
7692             abi_ulong guest_argp;
7693             abi_ulong guest_envp;
7694             abi_ulong addr;
7695             char **q;
7696             int total_size = 0;
7697 
7698             argc = 0;
7699             guest_argp = arg2;
7700             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7701                 if (get_user_ual(addr, gp))
7702                     return -TARGET_EFAULT;
7703                 if (!addr)
7704                     break;
7705                 argc++;
7706             }
7707             envc = 0;
7708             guest_envp = arg3;
7709             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7710                 if (get_user_ual(addr, gp))
7711                     return -TARGET_EFAULT;
7712                 if (!addr)
7713                     break;
7714                 envc++;
7715             }
7716 
7717             argp = g_new0(char *, argc + 1);
7718             envp = g_new0(char *, envc + 1);
7719 
7720             for (gp = guest_argp, q = argp; gp;
7721                   gp += sizeof(abi_ulong), q++) {
7722                 if (get_user_ual(addr, gp))
7723                     goto execve_efault;
7724                 if (!addr)
7725                     break;
7726                 if (!(*q = lock_user_string(addr)))
7727                     goto execve_efault;
7728                 total_size += strlen(*q) + 1;
7729             }
7730             *q = NULL;
7731 
7732             for (gp = guest_envp, q = envp; gp;
7733                   gp += sizeof(abi_ulong), q++) {
7734                 if (get_user_ual(addr, gp))
7735                     goto execve_efault;
7736                 if (!addr)
7737                     break;
7738                 if (!(*q = lock_user_string(addr)))
7739                     goto execve_efault;
7740                 total_size += strlen(*q) + 1;
7741             }
7742             *q = NULL;
7743 
7744             if (!(p = lock_user_string(arg1)))
7745                 goto execve_efault;
7746             /* Although execve() is not an interruptible syscall it is
7747              * a special case where we must use the safe_syscall wrapper:
7748              * if we allow a signal to happen before we make the host
7749              * syscall then we will 'lose' it, because at the point of
7750              * execve the process leaves QEMU's control. So we use the
7751              * safe syscall wrapper to ensure that we either take the
7752              * signal as a guest signal, or else it does not happen
7753              * before the execve completes and makes it the other
7754              * program's problem.
7755              */
7756             ret = get_errno(safe_execve(p, argp, envp));
7757             unlock_user(p, arg1, 0);
7758 
7759             goto execve_end;
7760 
7761         execve_efault:
7762             ret = -TARGET_EFAULT;
7763 
7764         execve_end:
7765             for (gp = guest_argp, q = argp; *q;
7766                   gp += sizeof(abi_ulong), q++) {
7767                 if (get_user_ual(addr, gp)
7768                     || !addr)
7769                     break;
7770                 unlock_user(*q, addr, 0);
7771             }
7772             for (gp = guest_envp, q = envp; *q;
7773                   gp += sizeof(abi_ulong), q++) {
7774                 if (get_user_ual(addr, gp)
7775                     || !addr)
7776                     break;
7777                 unlock_user(*q, addr, 0);
7778             }
7779 
7780             g_free(argp);
7781             g_free(envp);
7782         }
7783         return ret;
7784     case TARGET_NR_chdir:
7785         if (!(p = lock_user_string(arg1)))
7786             return -TARGET_EFAULT;
7787         ret = get_errno(chdir(p));
7788         unlock_user(p, arg1, 0);
7789         return ret;
7790 #ifdef TARGET_NR_time
7791     case TARGET_NR_time:
7792         {
7793             time_t host_time;
7794             ret = get_errno(time(&host_time));
7795             if (!is_error(ret)
7796                 && arg1
7797                 && put_user_sal(host_time, arg1))
7798                 return -TARGET_EFAULT;
7799         }
7800         return ret;
7801 #endif
7802 #ifdef TARGET_NR_mknod
7803     case TARGET_NR_mknod:
7804         if (!(p = lock_user_string(arg1)))
7805             return -TARGET_EFAULT;
7806         ret = get_errno(mknod(p, arg2, arg3));
7807         unlock_user(p, arg1, 0);
7808         return ret;
7809 #endif
7810 #if defined(TARGET_NR_mknodat)
7811     case TARGET_NR_mknodat:
7812         if (!(p = lock_user_string(arg2)))
7813             return -TARGET_EFAULT;
7814         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7815         unlock_user(p, arg2, 0);
7816         return ret;
7817 #endif
7818 #ifdef TARGET_NR_chmod
7819     case TARGET_NR_chmod:
7820         if (!(p = lock_user_string(arg1)))
7821             return -TARGET_EFAULT;
7822         ret = get_errno(chmod(p, arg2));
7823         unlock_user(p, arg1, 0);
7824         return ret;
7825 #endif
7826 #ifdef TARGET_NR_lseek
7827     case TARGET_NR_lseek:
7828         return get_errno(lseek(arg1, arg2, arg3));
7829 #endif
7830 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7831     /* Alpha specific */
7832     case TARGET_NR_getxpid:
7833         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7834         return get_errno(getpid());
7835 #endif
7836 #ifdef TARGET_NR_getpid
7837     case TARGET_NR_getpid:
7838         return get_errno(getpid());
7839 #endif
7840     case TARGET_NR_mount:
7841         {
7842             /* need to look at the data field */
7843             void *p2, *p3;
7844 
7845             if (arg1) {
7846                 p = lock_user_string(arg1);
7847                 if (!p) {
7848                     return -TARGET_EFAULT;
7849                 }
7850             } else {
7851                 p = NULL;
7852             }
7853 
7854             p2 = lock_user_string(arg2);
7855             if (!p2) {
7856                 if (arg1) {
7857                     unlock_user(p, arg1, 0);
7858                 }
7859                 return -TARGET_EFAULT;
7860             }
7861 
7862             if (arg3) {
7863                 p3 = lock_user_string(arg3);
7864                 if (!p3) {
7865                     if (arg1) {
7866                         unlock_user(p, arg1, 0);
7867                     }
7868                     unlock_user(p2, arg2, 0);
7869                     return -TARGET_EFAULT;
7870                 }
7871             } else {
7872                 p3 = NULL;
7873             }
7874 
7875             /* FIXME - arg5 should be locked, but it isn't clear how to
7876              * do that since it's not guaranteed to be a NULL-terminated
7877              * string.
7878              */
7879             if (!arg5) {
7880                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7881             } else {
7882                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7883             }
7884             ret = get_errno(ret);
7885 
7886             if (arg1) {
7887                 unlock_user(p, arg1, 0);
7888             }
7889             unlock_user(p2, arg2, 0);
7890             if (arg3) {
7891                 unlock_user(p3, arg3, 0);
7892             }
7893         }
7894         return ret;
7895 #ifdef TARGET_NR_umount
7896     case TARGET_NR_umount:
7897         if (!(p = lock_user_string(arg1)))
7898             return -TARGET_EFAULT;
7899         ret = get_errno(umount(p));
7900         unlock_user(p, arg1, 0);
7901         return ret;
7902 #endif
7903 #ifdef TARGET_NR_stime /* not on alpha */
7904     case TARGET_NR_stime:
7905         {
7906             struct timespec ts;
7907             ts.tv_nsec = 0;
7908             if (get_user_sal(ts.tv_sec, arg1)) {
7909                 return -TARGET_EFAULT;
7910             }
7911             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
7912         }
7913 #endif
7914 #ifdef TARGET_NR_alarm /* not on alpha */
7915     case TARGET_NR_alarm:
7916         return alarm(arg1);
7917 #endif
7918 #ifdef TARGET_NR_pause /* not on alpha */
7919     case TARGET_NR_pause:
7920         if (!block_signals()) {
7921             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7922         }
7923         return -TARGET_EINTR;
7924 #endif
7925 #ifdef TARGET_NR_utime
7926     case TARGET_NR_utime:
7927         {
7928             struct utimbuf tbuf, *host_tbuf;
7929             struct target_utimbuf *target_tbuf;
7930             if (arg2) {
7931                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7932                     return -TARGET_EFAULT;
7933                 tbuf.actime = tswapal(target_tbuf->actime);
7934                 tbuf.modtime = tswapal(target_tbuf->modtime);
7935                 unlock_user_struct(target_tbuf, arg2, 0);
7936                 host_tbuf = &tbuf;
7937             } else {
7938                 host_tbuf = NULL;
7939             }
7940             if (!(p = lock_user_string(arg1)))
7941                 return -TARGET_EFAULT;
7942             ret = get_errno(utime(p, host_tbuf));
7943             unlock_user(p, arg1, 0);
7944         }
7945         return ret;
7946 #endif
7947 #ifdef TARGET_NR_utimes
7948     case TARGET_NR_utimes:
7949         {
7950             struct timeval *tvp, tv[2];
7951             if (arg2) {
7952                 if (copy_from_user_timeval(&tv[0], arg2)
7953                     || copy_from_user_timeval(&tv[1],
7954                                               arg2 + sizeof(struct target_timeval)))
7955                     return -TARGET_EFAULT;
7956                 tvp = tv;
7957             } else {
7958                 tvp = NULL;
7959             }
7960             if (!(p = lock_user_string(arg1)))
7961                 return -TARGET_EFAULT;
7962             ret = get_errno(utimes(p, tvp));
7963             unlock_user(p, arg1, 0);
7964         }
7965         return ret;
7966 #endif
7967 #if defined(TARGET_NR_futimesat)
7968     case TARGET_NR_futimesat:
7969         {
7970             struct timeval *tvp, tv[2];
7971             if (arg3) {
7972                 if (copy_from_user_timeval(&tv[0], arg3)
7973                     || copy_from_user_timeval(&tv[1],
7974                                               arg3 + sizeof(struct target_timeval)))
7975                     return -TARGET_EFAULT;
7976                 tvp = tv;
7977             } else {
7978                 tvp = NULL;
7979             }
7980             if (!(p = lock_user_string(arg2))) {
7981                 return -TARGET_EFAULT;
7982             }
7983             ret = get_errno(futimesat(arg1, path(p), tvp));
7984             unlock_user(p, arg2, 0);
7985         }
7986         return ret;
7987 #endif
7988 #ifdef TARGET_NR_access
7989     case TARGET_NR_access:
7990         if (!(p = lock_user_string(arg1))) {
7991             return -TARGET_EFAULT;
7992         }
7993         ret = get_errno(access(path(p), arg2));
7994         unlock_user(p, arg1, 0);
7995         return ret;
7996 #endif
7997 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7998     case TARGET_NR_faccessat:
7999         if (!(p = lock_user_string(arg2))) {
8000             return -TARGET_EFAULT;
8001         }
8002         ret = get_errno(faccessat(arg1, p, arg3, 0));
8003         unlock_user(p, arg2, 0);
8004         return ret;
8005 #endif
8006 #ifdef TARGET_NR_nice /* not on alpha */
8007     case TARGET_NR_nice:
8008         return get_errno(nice(arg1));
8009 #endif
8010     case TARGET_NR_sync:
8011         sync();
8012         return 0;
8013 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8014     case TARGET_NR_syncfs:
8015         return get_errno(syncfs(arg1));
8016 #endif
8017     case TARGET_NR_kill:
8018         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8019 #ifdef TARGET_NR_rename
8020     case TARGET_NR_rename:
8021         {
8022             void *p2;
8023             p = lock_user_string(arg1);
8024             p2 = lock_user_string(arg2);
8025             if (!p || !p2)
8026                 ret = -TARGET_EFAULT;
8027             else
8028                 ret = get_errno(rename(p, p2));
8029             unlock_user(p2, arg2, 0);
8030             unlock_user(p, arg1, 0);
8031         }
8032         return ret;
8033 #endif
8034 #if defined(TARGET_NR_renameat)
8035     case TARGET_NR_renameat:
8036         {
8037             void *p2;
8038             p  = lock_user_string(arg2);
8039             p2 = lock_user_string(arg4);
8040             if (!p || !p2)
8041                 ret = -TARGET_EFAULT;
8042             else
8043                 ret = get_errno(renameat(arg1, p, arg3, p2));
8044             unlock_user(p2, arg4, 0);
8045             unlock_user(p, arg2, 0);
8046         }
8047         return ret;
8048 #endif
8049 #if defined(TARGET_NR_renameat2)
8050     case TARGET_NR_renameat2:
8051         {
8052             void *p2;
8053             p  = lock_user_string(arg2);
8054             p2 = lock_user_string(arg4);
8055             if (!p || !p2) {
8056                 ret = -TARGET_EFAULT;
8057             } else {
8058                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8059             }
8060             unlock_user(p2, arg4, 0);
8061             unlock_user(p, arg2, 0);
8062         }
8063         return ret;
8064 #endif
8065 #ifdef TARGET_NR_mkdir
8066     case TARGET_NR_mkdir:
8067         if (!(p = lock_user_string(arg1)))
8068             return -TARGET_EFAULT;
8069         ret = get_errno(mkdir(p, arg2));
8070         unlock_user(p, arg1, 0);
8071         return ret;
8072 #endif
8073 #if defined(TARGET_NR_mkdirat)
8074     case TARGET_NR_mkdirat:
8075         if (!(p = lock_user_string(arg2)))
8076             return -TARGET_EFAULT;
8077         ret = get_errno(mkdirat(arg1, p, arg3));
8078         unlock_user(p, arg2, 0);
8079         return ret;
8080 #endif
8081 #ifdef TARGET_NR_rmdir
8082     case TARGET_NR_rmdir:
8083         if (!(p = lock_user_string(arg1)))
8084             return -TARGET_EFAULT;
8085         ret = get_errno(rmdir(p));
8086         unlock_user(p, arg1, 0);
8087         return ret;
8088 #endif
8089     case TARGET_NR_dup:
8090         ret = get_errno(dup(arg1));
8091         if (ret >= 0) {
8092             fd_trans_dup(arg1, ret);
8093         }
8094         return ret;
8095 #ifdef TARGET_NR_pipe
8096     case TARGET_NR_pipe:
8097         return do_pipe(cpu_env, arg1, 0, 0);
8098 #endif
8099 #ifdef TARGET_NR_pipe2
8100     case TARGET_NR_pipe2:
8101         return do_pipe(cpu_env, arg1,
8102                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8103 #endif
8104     case TARGET_NR_times:
8105         {
8106             struct target_tms *tmsp;
8107             struct tms tms;
8108             ret = get_errno(times(&tms));
8109             if (arg1) {
8110                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8111                 if (!tmsp)
8112                     return -TARGET_EFAULT;
8113                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8114                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8115                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8116                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8117             }
8118             if (!is_error(ret))
8119                 ret = host_to_target_clock_t(ret);
8120         }
8121         return ret;
8122     case TARGET_NR_acct:
8123         if (arg1 == 0) {
8124             ret = get_errno(acct(NULL));
8125         } else {
8126             if (!(p = lock_user_string(arg1))) {
8127                 return -TARGET_EFAULT;
8128             }
8129             ret = get_errno(acct(path(p)));
8130             unlock_user(p, arg1, 0);
8131         }
8132         return ret;
8133 #ifdef TARGET_NR_umount2
8134     case TARGET_NR_umount2:
8135         if (!(p = lock_user_string(arg1)))
8136             return -TARGET_EFAULT;
8137         ret = get_errno(umount2(p, arg2));
8138         unlock_user(p, arg1, 0);
8139         return ret;
8140 #endif
8141     case TARGET_NR_ioctl:
8142         return do_ioctl(arg1, arg2, arg3);
8143 #ifdef TARGET_NR_fcntl
8144     case TARGET_NR_fcntl:
8145         return do_fcntl(arg1, arg2, arg3);
8146 #endif
8147     case TARGET_NR_setpgid:
8148         return get_errno(setpgid(arg1, arg2));
8149     case TARGET_NR_umask:
8150         return get_errno(umask(arg1));
8151     case TARGET_NR_chroot:
8152         if (!(p = lock_user_string(arg1)))
8153             return -TARGET_EFAULT;
8154         ret = get_errno(chroot(p));
8155         unlock_user(p, arg1, 0);
8156         return ret;
8157 #ifdef TARGET_NR_dup2
8158     case TARGET_NR_dup2:
8159         ret = get_errno(dup2(arg1, arg2));
8160         if (ret >= 0) {
8161             fd_trans_dup(arg1, arg2);
8162         }
8163         return ret;
8164 #endif
8165 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8166     case TARGET_NR_dup3:
8167     {
8168         int host_flags;
8169 
8170         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8171             return -EINVAL;
8172         }
8173         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8174         ret = get_errno(dup3(arg1, arg2, host_flags));
8175         if (ret >= 0) {
8176             fd_trans_dup(arg1, arg2);
8177         }
8178         return ret;
8179     }
8180 #endif
8181 #ifdef TARGET_NR_getppid /* not on alpha */
8182     case TARGET_NR_getppid:
8183         return get_errno(getppid());
8184 #endif
8185 #ifdef TARGET_NR_getpgrp
8186     case TARGET_NR_getpgrp:
8187         return get_errno(getpgrp());
8188 #endif
8189     case TARGET_NR_setsid:
8190         return get_errno(setsid());
8191 #ifdef TARGET_NR_sigaction
8192     case TARGET_NR_sigaction:
8193         {
8194 #if defined(TARGET_ALPHA)
8195             struct target_sigaction act, oact, *pact = 0;
8196             struct target_old_sigaction *old_act;
8197             if (arg2) {
8198                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8199                     return -TARGET_EFAULT;
8200                 act._sa_handler = old_act->_sa_handler;
8201                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8202                 act.sa_flags = old_act->sa_flags;
8203                 act.sa_restorer = 0;
8204                 unlock_user_struct(old_act, arg2, 0);
8205                 pact = &act;
8206             }
8207             ret = get_errno(do_sigaction(arg1, pact, &oact));
8208             if (!is_error(ret) && arg3) {
8209                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8210                     return -TARGET_EFAULT;
8211                 old_act->_sa_handler = oact._sa_handler;
8212                 old_act->sa_mask = oact.sa_mask.sig[0];
8213                 old_act->sa_flags = oact.sa_flags;
8214                 unlock_user_struct(old_act, arg3, 1);
8215             }
8216 #elif defined(TARGET_MIPS)
8217 	    struct target_sigaction act, oact, *pact, *old_act;
8218 
8219 	    if (arg2) {
8220                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8221                     return -TARGET_EFAULT;
8222 		act._sa_handler = old_act->_sa_handler;
8223 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8224 		act.sa_flags = old_act->sa_flags;
8225 		unlock_user_struct(old_act, arg2, 0);
8226 		pact = &act;
8227 	    } else {
8228 		pact = NULL;
8229 	    }
8230 
8231 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8232 
8233 	    if (!is_error(ret) && arg3) {
8234                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8235                     return -TARGET_EFAULT;
8236 		old_act->_sa_handler = oact._sa_handler;
8237 		old_act->sa_flags = oact.sa_flags;
8238 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8239 		old_act->sa_mask.sig[1] = 0;
8240 		old_act->sa_mask.sig[2] = 0;
8241 		old_act->sa_mask.sig[3] = 0;
8242 		unlock_user_struct(old_act, arg3, 1);
8243 	    }
8244 #else
8245             struct target_old_sigaction *old_act;
8246             struct target_sigaction act, oact, *pact;
8247             if (arg2) {
8248                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8249                     return -TARGET_EFAULT;
8250                 act._sa_handler = old_act->_sa_handler;
8251                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8252                 act.sa_flags = old_act->sa_flags;
8253                 act.sa_restorer = old_act->sa_restorer;
8254 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8255                 act.ka_restorer = 0;
8256 #endif
8257                 unlock_user_struct(old_act, arg2, 0);
8258                 pact = &act;
8259             } else {
8260                 pact = NULL;
8261             }
8262             ret = get_errno(do_sigaction(arg1, pact, &oact));
8263             if (!is_error(ret) && arg3) {
8264                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8265                     return -TARGET_EFAULT;
8266                 old_act->_sa_handler = oact._sa_handler;
8267                 old_act->sa_mask = oact.sa_mask.sig[0];
8268                 old_act->sa_flags = oact.sa_flags;
8269                 old_act->sa_restorer = oact.sa_restorer;
8270                 unlock_user_struct(old_act, arg3, 1);
8271             }
8272 #endif
8273         }
8274         return ret;
8275 #endif
8276     case TARGET_NR_rt_sigaction:
8277         {
8278 #if defined(TARGET_ALPHA)
8279             /* For Alpha and SPARC this is a 5 argument syscall, with
8280              * a 'restorer' parameter which must be copied into the
8281              * sa_restorer field of the sigaction struct.
8282              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8283              * and arg5 is the sigsetsize.
8284              * Alpha also has a separate rt_sigaction struct that it uses
8285              * here; SPARC uses the usual sigaction struct.
8286              */
8287             struct target_rt_sigaction *rt_act;
8288             struct target_sigaction act, oact, *pact = 0;
8289 
8290             if (arg4 != sizeof(target_sigset_t)) {
8291                 return -TARGET_EINVAL;
8292             }
8293             if (arg2) {
8294                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8295                     return -TARGET_EFAULT;
8296                 act._sa_handler = rt_act->_sa_handler;
8297                 act.sa_mask = rt_act->sa_mask;
8298                 act.sa_flags = rt_act->sa_flags;
8299                 act.sa_restorer = arg5;
8300                 unlock_user_struct(rt_act, arg2, 0);
8301                 pact = &act;
8302             }
8303             ret = get_errno(do_sigaction(arg1, pact, &oact));
8304             if (!is_error(ret) && arg3) {
8305                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8306                     return -TARGET_EFAULT;
8307                 rt_act->_sa_handler = oact._sa_handler;
8308                 rt_act->sa_mask = oact.sa_mask;
8309                 rt_act->sa_flags = oact.sa_flags;
8310                 unlock_user_struct(rt_act, arg3, 1);
8311             }
8312 #else
8313 #ifdef TARGET_SPARC
8314             target_ulong restorer = arg4;
8315             target_ulong sigsetsize = arg5;
8316 #else
8317             target_ulong sigsetsize = arg4;
8318 #endif
8319             struct target_sigaction *act;
8320             struct target_sigaction *oact;
8321 
8322             if (sigsetsize != sizeof(target_sigset_t)) {
8323                 return -TARGET_EINVAL;
8324             }
8325             if (arg2) {
8326                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8327                     return -TARGET_EFAULT;
8328                 }
8329 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8330                 act->ka_restorer = restorer;
8331 #endif
8332             } else {
8333                 act = NULL;
8334             }
8335             if (arg3) {
8336                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8337                     ret = -TARGET_EFAULT;
8338                     goto rt_sigaction_fail;
8339                 }
8340             } else
8341                 oact = NULL;
8342             ret = get_errno(do_sigaction(arg1, act, oact));
8343 	rt_sigaction_fail:
8344             if (act)
8345                 unlock_user_struct(act, arg2, 0);
8346             if (oact)
8347                 unlock_user_struct(oact, arg3, 1);
8348 #endif
8349         }
8350         return ret;
8351 #ifdef TARGET_NR_sgetmask /* not on alpha */
8352     case TARGET_NR_sgetmask:
8353         {
8354             sigset_t cur_set;
8355             abi_ulong target_set;
8356             ret = do_sigprocmask(0, NULL, &cur_set);
8357             if (!ret) {
8358                 host_to_target_old_sigset(&target_set, &cur_set);
8359                 ret = target_set;
8360             }
8361         }
8362         return ret;
8363 #endif
8364 #ifdef TARGET_NR_ssetmask /* not on alpha */
8365     case TARGET_NR_ssetmask:
8366         {
8367             sigset_t set, oset;
8368             abi_ulong target_set = arg1;
8369             target_to_host_old_sigset(&set, &target_set);
8370             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8371             if (!ret) {
8372                 host_to_target_old_sigset(&target_set, &oset);
8373                 ret = target_set;
8374             }
8375         }
8376         return ret;
8377 #endif
8378 #ifdef TARGET_NR_sigprocmask
8379     case TARGET_NR_sigprocmask:
8380         {
8381 #if defined(TARGET_ALPHA)
8382             sigset_t set, oldset;
8383             abi_ulong mask;
8384             int how;
8385 
8386             switch (arg1) {
8387             case TARGET_SIG_BLOCK:
8388                 how = SIG_BLOCK;
8389                 break;
8390             case TARGET_SIG_UNBLOCK:
8391                 how = SIG_UNBLOCK;
8392                 break;
8393             case TARGET_SIG_SETMASK:
8394                 how = SIG_SETMASK;
8395                 break;
8396             default:
8397                 return -TARGET_EINVAL;
8398             }
8399             mask = arg2;
8400             target_to_host_old_sigset(&set, &mask);
8401 
8402             ret = do_sigprocmask(how, &set, &oldset);
8403             if (!is_error(ret)) {
8404                 host_to_target_old_sigset(&mask, &oldset);
8405                 ret = mask;
8406                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8407             }
8408 #else
8409             sigset_t set, oldset, *set_ptr;
8410             int how;
8411 
8412             if (arg2) {
8413                 switch (arg1) {
8414                 case TARGET_SIG_BLOCK:
8415                     how = SIG_BLOCK;
8416                     break;
8417                 case TARGET_SIG_UNBLOCK:
8418                     how = SIG_UNBLOCK;
8419                     break;
8420                 case TARGET_SIG_SETMASK:
8421                     how = SIG_SETMASK;
8422                     break;
8423                 default:
8424                     return -TARGET_EINVAL;
8425                 }
8426                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8427                     return -TARGET_EFAULT;
8428                 target_to_host_old_sigset(&set, p);
8429                 unlock_user(p, arg2, 0);
8430                 set_ptr = &set;
8431             } else {
8432                 how = 0;
8433                 set_ptr = NULL;
8434             }
8435             ret = do_sigprocmask(how, set_ptr, &oldset);
8436             if (!is_error(ret) && arg3) {
8437                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8438                     return -TARGET_EFAULT;
8439                 host_to_target_old_sigset(p, &oldset);
8440                 unlock_user(p, arg3, sizeof(target_sigset_t));
8441             }
8442 #endif
8443         }
8444         return ret;
8445 #endif
8446     case TARGET_NR_rt_sigprocmask:
8447         {
8448             int how = arg1;
8449             sigset_t set, oldset, *set_ptr;
8450 
8451             if (arg4 != sizeof(target_sigset_t)) {
8452                 return -TARGET_EINVAL;
8453             }
8454 
8455             if (arg2) {
8456                 switch(how) {
8457                 case TARGET_SIG_BLOCK:
8458                     how = SIG_BLOCK;
8459                     break;
8460                 case TARGET_SIG_UNBLOCK:
8461                     how = SIG_UNBLOCK;
8462                     break;
8463                 case TARGET_SIG_SETMASK:
8464                     how = SIG_SETMASK;
8465                     break;
8466                 default:
8467                     return -TARGET_EINVAL;
8468                 }
8469                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8470                     return -TARGET_EFAULT;
8471                 target_to_host_sigset(&set, p);
8472                 unlock_user(p, arg2, 0);
8473                 set_ptr = &set;
8474             } else {
8475                 how = 0;
8476                 set_ptr = NULL;
8477             }
8478             ret = do_sigprocmask(how, set_ptr, &oldset);
8479             if (!is_error(ret) && arg3) {
8480                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8481                     return -TARGET_EFAULT;
8482                 host_to_target_sigset(p, &oldset);
8483                 unlock_user(p, arg3, sizeof(target_sigset_t));
8484             }
8485         }
8486         return ret;
8487 #ifdef TARGET_NR_sigpending
8488     case TARGET_NR_sigpending:
8489         {
8490             sigset_t set;
8491             ret = get_errno(sigpending(&set));
8492             if (!is_error(ret)) {
8493                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8494                     return -TARGET_EFAULT;
8495                 host_to_target_old_sigset(p, &set);
8496                 unlock_user(p, arg1, sizeof(target_sigset_t));
8497             }
8498         }
8499         return ret;
8500 #endif
8501     case TARGET_NR_rt_sigpending:
8502         {
8503             sigset_t set;
8504 
8505             /* Yes, this check is >, not != like most. We follow the kernel's
8506              * logic and it does it like this because it implements
8507              * NR_sigpending through the same code path, and in that case
8508              * the old_sigset_t is smaller in size.
8509              */
8510             if (arg2 > sizeof(target_sigset_t)) {
8511                 return -TARGET_EINVAL;
8512             }
8513 
8514             ret = get_errno(sigpending(&set));
8515             if (!is_error(ret)) {
8516                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8517                     return -TARGET_EFAULT;
8518                 host_to_target_sigset(p, &set);
8519                 unlock_user(p, arg1, sizeof(target_sigset_t));
8520             }
8521         }
8522         return ret;
8523 #ifdef TARGET_NR_sigsuspend
8524     case TARGET_NR_sigsuspend:
8525         {
8526             TaskState *ts = cpu->opaque;
8527 #if defined(TARGET_ALPHA)
8528             abi_ulong mask = arg1;
8529             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8530 #else
8531             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8532                 return -TARGET_EFAULT;
8533             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8534             unlock_user(p, arg1, 0);
8535 #endif
8536             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8537                                                SIGSET_T_SIZE));
8538             if (ret != -TARGET_ERESTARTSYS) {
8539                 ts->in_sigsuspend = 1;
8540             }
8541         }
8542         return ret;
8543 #endif
8544     case TARGET_NR_rt_sigsuspend:
8545         {
8546             TaskState *ts = cpu->opaque;
8547 
8548             if (arg2 != sizeof(target_sigset_t)) {
8549                 return -TARGET_EINVAL;
8550             }
8551             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8552                 return -TARGET_EFAULT;
8553             target_to_host_sigset(&ts->sigsuspend_mask, p);
8554             unlock_user(p, arg1, 0);
8555             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8556                                                SIGSET_T_SIZE));
8557             if (ret != -TARGET_ERESTARTSYS) {
8558                 ts->in_sigsuspend = 1;
8559             }
8560         }
8561         return ret;
8562 #ifdef TARGET_NR_rt_sigtimedwait
8563     case TARGET_NR_rt_sigtimedwait:
8564         {
8565             sigset_t set;
8566             struct timespec uts, *puts;
8567             siginfo_t uinfo;
8568 
8569             if (arg4 != sizeof(target_sigset_t)) {
8570                 return -TARGET_EINVAL;
8571             }
8572 
8573             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8574                 return -TARGET_EFAULT;
8575             target_to_host_sigset(&set, p);
8576             unlock_user(p, arg1, 0);
8577             if (arg3) {
8578                 puts = &uts;
8579                 target_to_host_timespec(puts, arg3);
8580             } else {
8581                 puts = NULL;
8582             }
8583             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8584                                                  SIGSET_T_SIZE));
8585             if (!is_error(ret)) {
8586                 if (arg2) {
8587                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8588                                   0);
8589                     if (!p) {
8590                         return -TARGET_EFAULT;
8591                     }
8592                     host_to_target_siginfo(p, &uinfo);
8593                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8594                 }
8595                 ret = host_to_target_signal(ret);
8596             }
8597         }
8598         return ret;
8599 #endif
8600     case TARGET_NR_rt_sigqueueinfo:
8601         {
8602             siginfo_t uinfo;
8603 
8604             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8605             if (!p) {
8606                 return -TARGET_EFAULT;
8607             }
8608             target_to_host_siginfo(&uinfo, p);
8609             unlock_user(p, arg3, 0);
8610             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8611         }
8612         return ret;
8613     case TARGET_NR_rt_tgsigqueueinfo:
8614         {
8615             siginfo_t uinfo;
8616 
8617             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8618             if (!p) {
8619                 return -TARGET_EFAULT;
8620             }
8621             target_to_host_siginfo(&uinfo, p);
8622             unlock_user(p, arg4, 0);
8623             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8624         }
8625         return ret;
8626 #ifdef TARGET_NR_sigreturn
8627     case TARGET_NR_sigreturn:
8628         if (block_signals()) {
8629             return -TARGET_ERESTARTSYS;
8630         }
8631         return do_sigreturn(cpu_env);
8632 #endif
8633     case TARGET_NR_rt_sigreturn:
8634         if (block_signals()) {
8635             return -TARGET_ERESTARTSYS;
8636         }
8637         return do_rt_sigreturn(cpu_env);
8638     case TARGET_NR_sethostname:
8639         if (!(p = lock_user_string(arg1)))
8640             return -TARGET_EFAULT;
8641         ret = get_errno(sethostname(p, arg2));
8642         unlock_user(p, arg1, 0);
8643         return ret;
8644 #ifdef TARGET_NR_setrlimit
8645     case TARGET_NR_setrlimit:
8646         {
8647             int resource = target_to_host_resource(arg1);
8648             struct target_rlimit *target_rlim;
8649             struct rlimit rlim;
8650             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8651                 return -TARGET_EFAULT;
8652             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8653             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8654             unlock_user_struct(target_rlim, arg2, 0);
8655             /*
8656              * If we just passed through resource limit settings for memory then
8657              * they would also apply to QEMU's own allocations, and QEMU will
8658              * crash or hang or die if its allocations fail. Ideally we would
8659              * track the guest allocations in QEMU and apply the limits ourselves.
8660              * For now, just tell the guest the call succeeded but don't actually
8661              * limit anything.
8662              */
8663             if (resource != RLIMIT_AS &&
8664                 resource != RLIMIT_DATA &&
8665                 resource != RLIMIT_STACK) {
8666                 return get_errno(setrlimit(resource, &rlim));
8667             } else {
8668                 return 0;
8669             }
8670         }
8671 #endif
8672 #ifdef TARGET_NR_getrlimit
8673     case TARGET_NR_getrlimit:
8674         {
8675             int resource = target_to_host_resource(arg1);
8676             struct target_rlimit *target_rlim;
8677             struct rlimit rlim;
8678 
8679             ret = get_errno(getrlimit(resource, &rlim));
8680             if (!is_error(ret)) {
8681                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8682                     return -TARGET_EFAULT;
8683                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8684                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8685                 unlock_user_struct(target_rlim, arg2, 1);
8686             }
8687         }
8688         return ret;
8689 #endif
8690     case TARGET_NR_getrusage:
8691         {
8692             struct rusage rusage;
8693             ret = get_errno(getrusage(arg1, &rusage));
8694             if (!is_error(ret)) {
8695                 ret = host_to_target_rusage(arg2, &rusage);
8696             }
8697         }
8698         return ret;
8699 #if defined(TARGET_NR_gettimeofday)
8700     case TARGET_NR_gettimeofday:
8701         {
8702             struct timeval tv;
8703             ret = get_errno(gettimeofday(&tv, NULL));
8704             if (!is_error(ret)) {
8705                 if (copy_to_user_timeval(arg1, &tv))
8706                     return -TARGET_EFAULT;
8707             }
8708         }
8709         return ret;
8710 #endif
8711 #if defined(TARGET_NR_settimeofday)
8712     case TARGET_NR_settimeofday:
8713         {
8714             struct timeval tv, *ptv = NULL;
8715             struct timezone tz, *ptz = NULL;
8716 
8717             if (arg1) {
8718                 if (copy_from_user_timeval(&tv, arg1)) {
8719                     return -TARGET_EFAULT;
8720                 }
8721                 ptv = &tv;
8722             }
8723 
8724             if (arg2) {
8725                 if (copy_from_user_timezone(&tz, arg2)) {
8726                     return -TARGET_EFAULT;
8727                 }
8728                 ptz = &tz;
8729             }
8730 
8731             return get_errno(settimeofday(ptv, ptz));
8732         }
8733 #endif
8734 #if defined(TARGET_NR_select)
8735     case TARGET_NR_select:
8736 #if defined(TARGET_WANT_NI_OLD_SELECT)
8737         /* some architectures used to have old_select here
8738          * but now ENOSYS it.
8739          */
8740         ret = -TARGET_ENOSYS;
8741 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8742         ret = do_old_select(arg1);
8743 #else
8744         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8745 #endif
8746         return ret;
8747 #endif
8748 #ifdef TARGET_NR_pselect6
8749     case TARGET_NR_pselect6:
8750         {
8751             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8752             fd_set rfds, wfds, efds;
8753             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8754             struct timespec ts, *ts_ptr;
8755 
8756             /*
8757              * The 6th arg is actually two args smashed together,
8758              * so we cannot use the C library.
8759              */
8760             sigset_t set;
8761             struct {
8762                 sigset_t *set;
8763                 size_t size;
8764             } sig, *sig_ptr;
8765 
8766             abi_ulong arg_sigset, arg_sigsize, *arg7;
8767             target_sigset_t *target_sigset;
8768 
8769             n = arg1;
8770             rfd_addr = arg2;
8771             wfd_addr = arg3;
8772             efd_addr = arg4;
8773             ts_addr = arg5;
8774 
8775             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8776             if (ret) {
8777                 return ret;
8778             }
8779             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8780             if (ret) {
8781                 return ret;
8782             }
8783             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8784             if (ret) {
8785                 return ret;
8786             }
8787 
8788             /*
8789              * This takes a timespec, and not a timeval, so we cannot
8790              * use the do_select() helper ...
8791              */
8792             if (ts_addr) {
8793                 if (target_to_host_timespec(&ts, ts_addr)) {
8794                     return -TARGET_EFAULT;
8795                 }
8796                 ts_ptr = &ts;
8797             } else {
8798                 ts_ptr = NULL;
8799             }
8800 
8801             /* Extract the two packed args for the sigset */
8802             if (arg6) {
8803                 sig_ptr = &sig;
8804                 sig.size = SIGSET_T_SIZE;
8805 
8806                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8807                 if (!arg7) {
8808                     return -TARGET_EFAULT;
8809                 }
8810                 arg_sigset = tswapal(arg7[0]);
8811                 arg_sigsize = tswapal(arg7[1]);
8812                 unlock_user(arg7, arg6, 0);
8813 
8814                 if (arg_sigset) {
8815                     sig.set = &set;
8816                     if (arg_sigsize != sizeof(*target_sigset)) {
8817                         /* Like the kernel, we enforce correct size sigsets */
8818                         return -TARGET_EINVAL;
8819                     }
8820                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8821                                               sizeof(*target_sigset), 1);
8822                     if (!target_sigset) {
8823                         return -TARGET_EFAULT;
8824                     }
8825                     target_to_host_sigset(&set, target_sigset);
8826                     unlock_user(target_sigset, arg_sigset, 0);
8827                 } else {
8828                     sig.set = NULL;
8829                 }
8830             } else {
8831                 sig_ptr = NULL;
8832             }
8833 
8834             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8835                                           ts_ptr, sig_ptr));
8836 
8837             if (!is_error(ret)) {
8838                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8839                     return -TARGET_EFAULT;
8840                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8841                     return -TARGET_EFAULT;
8842                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8843                     return -TARGET_EFAULT;
8844 
8845                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8846                     return -TARGET_EFAULT;
8847             }
8848         }
8849         return ret;
8850 #endif
8851 #ifdef TARGET_NR_symlink
8852     case TARGET_NR_symlink:
8853         {
8854             void *p2;
8855             p = lock_user_string(arg1);
8856             p2 = lock_user_string(arg2);
8857             if (!p || !p2)
8858                 ret = -TARGET_EFAULT;
8859             else
8860                 ret = get_errno(symlink(p, p2));
8861             unlock_user(p2, arg2, 0);
8862             unlock_user(p, arg1, 0);
8863         }
8864         return ret;
8865 #endif
8866 #if defined(TARGET_NR_symlinkat)
8867     case TARGET_NR_symlinkat:
8868         {
8869             void *p2;
8870             p  = lock_user_string(arg1);
8871             p2 = lock_user_string(arg3);
8872             if (!p || !p2)
8873                 ret = -TARGET_EFAULT;
8874             else
8875                 ret = get_errno(symlinkat(p, arg2, p2));
8876             unlock_user(p2, arg3, 0);
8877             unlock_user(p, arg1, 0);
8878         }
8879         return ret;
8880 #endif
8881 #ifdef TARGET_NR_readlink
8882     case TARGET_NR_readlink:
8883         {
8884             void *p2;
8885             p = lock_user_string(arg1);
8886             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8887             if (!p || !p2) {
8888                 ret = -TARGET_EFAULT;
8889             } else if (!arg3) {
8890                 /* Short circuit this for the magic exe check. */
8891                 ret = -TARGET_EINVAL;
8892             } else if (is_proc_myself((const char *)p, "exe")) {
8893                 char real[PATH_MAX], *temp;
8894                 temp = realpath(exec_path, real);
8895                 /* Return value is # of bytes that we wrote to the buffer. */
8896                 if (temp == NULL) {
8897                     ret = get_errno(-1);
8898                 } else {
8899                     /* Don't worry about sign mismatch as earlier mapping
8900                      * logic would have thrown a bad address error. */
8901                     ret = MIN(strlen(real), arg3);
8902                     /* We cannot NUL terminate the string. */
8903                     memcpy(p2, real, ret);
8904                 }
8905             } else {
8906                 ret = get_errno(readlink(path(p), p2, arg3));
8907             }
8908             unlock_user(p2, arg2, ret);
8909             unlock_user(p, arg1, 0);
8910         }
8911         return ret;
8912 #endif
8913 #if defined(TARGET_NR_readlinkat)
8914     case TARGET_NR_readlinkat:
8915         {
8916             void *p2;
8917             p  = lock_user_string(arg2);
8918             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8919             if (!p || !p2) {
8920                 ret = -TARGET_EFAULT;
8921             } else if (is_proc_myself((const char *)p, "exe")) {
8922                 char real[PATH_MAX], *temp;
8923                 temp = realpath(exec_path, real);
8924                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8925                 snprintf((char *)p2, arg4, "%s", real);
8926             } else {
8927                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8928             }
8929             unlock_user(p2, arg3, ret);
8930             unlock_user(p, arg2, 0);
8931         }
8932         return ret;
8933 #endif
8934 #ifdef TARGET_NR_swapon
8935     case TARGET_NR_swapon:
8936         if (!(p = lock_user_string(arg1)))
8937             return -TARGET_EFAULT;
8938         ret = get_errno(swapon(p, arg2));
8939         unlock_user(p, arg1, 0);
8940         return ret;
8941 #endif
8942     case TARGET_NR_reboot:
8943         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8944            /* arg4 must be ignored in all other cases */
8945            p = lock_user_string(arg4);
8946            if (!p) {
8947                return -TARGET_EFAULT;
8948            }
8949            ret = get_errno(reboot(arg1, arg2, arg3, p));
8950            unlock_user(p, arg4, 0);
8951         } else {
8952            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8953         }
8954         return ret;
8955 #ifdef TARGET_NR_mmap
8956     case TARGET_NR_mmap:
8957 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8958     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8959     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8960     || defined(TARGET_S390X)
8961         {
8962             abi_ulong *v;
8963             abi_ulong v1, v2, v3, v4, v5, v6;
8964             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8965                 return -TARGET_EFAULT;
8966             v1 = tswapal(v[0]);
8967             v2 = tswapal(v[1]);
8968             v3 = tswapal(v[2]);
8969             v4 = tswapal(v[3]);
8970             v5 = tswapal(v[4]);
8971             v6 = tswapal(v[5]);
8972             unlock_user(v, arg1, 0);
8973             ret = get_errno(target_mmap(v1, v2, v3,
8974                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8975                                         v5, v6));
8976         }
8977 #else
8978         ret = get_errno(target_mmap(arg1, arg2, arg3,
8979                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8980                                     arg5,
8981                                     arg6));
8982 #endif
8983         return ret;
8984 #endif
8985 #ifdef TARGET_NR_mmap2
8986     case TARGET_NR_mmap2:
8987 #ifndef MMAP_SHIFT
8988 #define MMAP_SHIFT 12
8989 #endif
8990         ret = target_mmap(arg1, arg2, arg3,
8991                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8992                           arg5, arg6 << MMAP_SHIFT);
8993         return get_errno(ret);
8994 #endif
8995     case TARGET_NR_munmap:
8996         return get_errno(target_munmap(arg1, arg2));
8997     case TARGET_NR_mprotect:
8998         {
8999             TaskState *ts = cpu->opaque;
9000             /* Special hack to detect libc making the stack executable.  */
9001             if ((arg3 & PROT_GROWSDOWN)
9002                 && arg1 >= ts->info->stack_limit
9003                 && arg1 <= ts->info->start_stack) {
9004                 arg3 &= ~PROT_GROWSDOWN;
9005                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9006                 arg1 = ts->info->stack_limit;
9007             }
9008         }
9009         return get_errno(target_mprotect(arg1, arg2, arg3));
9010 #ifdef TARGET_NR_mremap
9011     case TARGET_NR_mremap:
9012         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9013 #endif
9014         /* ??? msync/mlock/munlock are broken for softmmu.  */
9015 #ifdef TARGET_NR_msync
9016     case TARGET_NR_msync:
9017         return get_errno(msync(g2h(arg1), arg2, arg3));
9018 #endif
9019 #ifdef TARGET_NR_mlock
9020     case TARGET_NR_mlock:
9021         return get_errno(mlock(g2h(arg1), arg2));
9022 #endif
9023 #ifdef TARGET_NR_munlock
9024     case TARGET_NR_munlock:
9025         return get_errno(munlock(g2h(arg1), arg2));
9026 #endif
9027 #ifdef TARGET_NR_mlockall
9028     case TARGET_NR_mlockall:
9029         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9030 #endif
9031 #ifdef TARGET_NR_munlockall
9032     case TARGET_NR_munlockall:
9033         return get_errno(munlockall());
9034 #endif
9035 #ifdef TARGET_NR_truncate
9036     case TARGET_NR_truncate:
9037         if (!(p = lock_user_string(arg1)))
9038             return -TARGET_EFAULT;
9039         ret = get_errno(truncate(p, arg2));
9040         unlock_user(p, arg1, 0);
9041         return ret;
9042 #endif
9043 #ifdef TARGET_NR_ftruncate
9044     case TARGET_NR_ftruncate:
9045         return get_errno(ftruncate(arg1, arg2));
9046 #endif
9047     case TARGET_NR_fchmod:
9048         return get_errno(fchmod(arg1, arg2));
9049 #if defined(TARGET_NR_fchmodat)
9050     case TARGET_NR_fchmodat:
9051         if (!(p = lock_user_string(arg2)))
9052             return -TARGET_EFAULT;
9053         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9054         unlock_user(p, arg2, 0);
9055         return ret;
9056 #endif
9057     case TARGET_NR_getpriority:
9058         /* Note that negative values are valid for getpriority, so we must
9059            differentiate based on errno settings.  */
9060         errno = 0;
9061         ret = getpriority(arg1, arg2);
9062         if (ret == -1 && errno != 0) {
9063             return -host_to_target_errno(errno);
9064         }
9065 #ifdef TARGET_ALPHA
9066         /* Return value is the unbiased priority.  Signal no error.  */
9067         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9068 #else
9069         /* Return value is a biased priority to avoid negative numbers.  */
9070         ret = 20 - ret;
9071 #endif
9072         return ret;
9073     case TARGET_NR_setpriority:
9074         return get_errno(setpriority(arg1, arg2, arg3));
9075 #ifdef TARGET_NR_statfs
9076     case TARGET_NR_statfs:
9077         if (!(p = lock_user_string(arg1))) {
9078             return -TARGET_EFAULT;
9079         }
9080         ret = get_errno(statfs(path(p), &stfs));
9081         unlock_user(p, arg1, 0);
9082     convert_statfs:
9083         if (!is_error(ret)) {
9084             struct target_statfs *target_stfs;
9085 
9086             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9087                 return -TARGET_EFAULT;
9088             __put_user(stfs.f_type, &target_stfs->f_type);
9089             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9090             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9091             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9092             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9093             __put_user(stfs.f_files, &target_stfs->f_files);
9094             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9095             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9096             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9097             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9098             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9099 #ifdef _STATFS_F_FLAGS
9100             __put_user(stfs.f_flags, &target_stfs->f_flags);
9101 #else
9102             __put_user(0, &target_stfs->f_flags);
9103 #endif
9104             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9105             unlock_user_struct(target_stfs, arg2, 1);
9106         }
9107         return ret;
9108 #endif
9109 #ifdef TARGET_NR_fstatfs
9110     case TARGET_NR_fstatfs:
9111         ret = get_errno(fstatfs(arg1, &stfs));
9112         goto convert_statfs;
9113 #endif
9114 #ifdef TARGET_NR_statfs64
9115     case TARGET_NR_statfs64:
9116         if (!(p = lock_user_string(arg1))) {
9117             return -TARGET_EFAULT;
9118         }
9119         ret = get_errno(statfs(path(p), &stfs));
9120         unlock_user(p, arg1, 0);
9121     convert_statfs64:
9122         if (!is_error(ret)) {
9123             struct target_statfs64 *target_stfs;
9124 
9125             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9126                 return -TARGET_EFAULT;
9127             __put_user(stfs.f_type, &target_stfs->f_type);
9128             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9129             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9130             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9131             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9132             __put_user(stfs.f_files, &target_stfs->f_files);
9133             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9134             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9135             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9136             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9137             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9138             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9139             unlock_user_struct(target_stfs, arg3, 1);
9140         }
9141         return ret;
9142     case TARGET_NR_fstatfs64:
9143         ret = get_errno(fstatfs(arg1, &stfs));
9144         goto convert_statfs64;
9145 #endif
9146 #ifdef TARGET_NR_socketcall
9147     case TARGET_NR_socketcall:
9148         return do_socketcall(arg1, arg2);
9149 #endif
9150 #ifdef TARGET_NR_accept
9151     case TARGET_NR_accept:
9152         return do_accept4(arg1, arg2, arg3, 0);
9153 #endif
9154 #ifdef TARGET_NR_accept4
9155     case TARGET_NR_accept4:
9156         return do_accept4(arg1, arg2, arg3, arg4);
9157 #endif
9158 #ifdef TARGET_NR_bind
9159     case TARGET_NR_bind:
9160         return do_bind(arg1, arg2, arg3);
9161 #endif
9162 #ifdef TARGET_NR_connect
9163     case TARGET_NR_connect:
9164         return do_connect(arg1, arg2, arg3);
9165 #endif
9166 #ifdef TARGET_NR_getpeername
9167     case TARGET_NR_getpeername:
9168         return do_getpeername(arg1, arg2, arg3);
9169 #endif
9170 #ifdef TARGET_NR_getsockname
9171     case TARGET_NR_getsockname:
9172         return do_getsockname(arg1, arg2, arg3);
9173 #endif
9174 #ifdef TARGET_NR_getsockopt
9175     case TARGET_NR_getsockopt:
9176         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9177 #endif
9178 #ifdef TARGET_NR_listen
9179     case TARGET_NR_listen:
9180         return get_errno(listen(arg1, arg2));
9181 #endif
9182 #ifdef TARGET_NR_recv
9183     case TARGET_NR_recv:
9184         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9185 #endif
9186 #ifdef TARGET_NR_recvfrom
9187     case TARGET_NR_recvfrom:
9188         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9189 #endif
9190 #ifdef TARGET_NR_recvmsg
9191     case TARGET_NR_recvmsg:
9192         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9193 #endif
9194 #ifdef TARGET_NR_send
9195     case TARGET_NR_send:
9196         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9197 #endif
9198 #ifdef TARGET_NR_sendmsg
9199     case TARGET_NR_sendmsg:
9200         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9201 #endif
9202 #ifdef TARGET_NR_sendmmsg
9203     case TARGET_NR_sendmmsg:
9204         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9205 #endif
9206 #ifdef TARGET_NR_recvmmsg
9207     case TARGET_NR_recvmmsg:
9208         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9209 #endif
9210 #ifdef TARGET_NR_sendto
9211     case TARGET_NR_sendto:
9212         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9213 #endif
9214 #ifdef TARGET_NR_shutdown
9215     case TARGET_NR_shutdown:
9216         return get_errno(shutdown(arg1, arg2));
9217 #endif
9218 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9219     case TARGET_NR_getrandom:
9220         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9221         if (!p) {
9222             return -TARGET_EFAULT;
9223         }
9224         ret = get_errno(getrandom(p, arg2, arg3));
9225         unlock_user(p, arg1, ret);
9226         return ret;
9227 #endif
9228 #ifdef TARGET_NR_socket
9229     case TARGET_NR_socket:
9230         return do_socket(arg1, arg2, arg3);
9231 #endif
9232 #ifdef TARGET_NR_socketpair
9233     case TARGET_NR_socketpair:
9234         return do_socketpair(arg1, arg2, arg3, arg4);
9235 #endif
9236 #ifdef TARGET_NR_setsockopt
9237     case TARGET_NR_setsockopt:
9238         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9239 #endif
9240 #if defined(TARGET_NR_syslog)
9241     case TARGET_NR_syslog:
9242         {
9243             int len = arg2;
9244 
9245             switch (arg1) {
9246             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9247             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9248             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9249             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9250             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9251             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9252             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9253             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9254                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9255             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9256             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9257             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9258                 {
9259                     if (len < 0) {
9260                         return -TARGET_EINVAL;
9261                     }
9262                     if (len == 0) {
9263                         return 0;
9264                     }
9265                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9266                     if (!p) {
9267                         return -TARGET_EFAULT;
9268                     }
9269                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9270                     unlock_user(p, arg2, arg3);
9271                 }
9272                 return ret;
9273             default:
9274                 return -TARGET_EINVAL;
9275             }
9276         }
9277         break;
9278 #endif
9279     case TARGET_NR_setitimer:
9280         {
9281             struct itimerval value, ovalue, *pvalue;
9282 
9283             if (arg2) {
9284                 pvalue = &value;
9285                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9286                     || copy_from_user_timeval(&pvalue->it_value,
9287                                               arg2 + sizeof(struct target_timeval)))
9288                     return -TARGET_EFAULT;
9289             } else {
9290                 pvalue = NULL;
9291             }
9292             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9293             if (!is_error(ret) && arg3) {
9294                 if (copy_to_user_timeval(arg3,
9295                                          &ovalue.it_interval)
9296                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9297                                             &ovalue.it_value))
9298                     return -TARGET_EFAULT;
9299             }
9300         }
9301         return ret;
9302     case TARGET_NR_getitimer:
9303         {
9304             struct itimerval value;
9305 
9306             ret = get_errno(getitimer(arg1, &value));
9307             if (!is_error(ret) && arg2) {
9308                 if (copy_to_user_timeval(arg2,
9309                                          &value.it_interval)
9310                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9311                                             &value.it_value))
9312                     return -TARGET_EFAULT;
9313             }
9314         }
9315         return ret;
9316 #ifdef TARGET_NR_stat
9317     case TARGET_NR_stat:
9318         if (!(p = lock_user_string(arg1))) {
9319             return -TARGET_EFAULT;
9320         }
9321         ret = get_errno(stat(path(p), &st));
9322         unlock_user(p, arg1, 0);
9323         goto do_stat;
9324 #endif
9325 #ifdef TARGET_NR_lstat
9326     case TARGET_NR_lstat:
9327         if (!(p = lock_user_string(arg1))) {
9328             return -TARGET_EFAULT;
9329         }
9330         ret = get_errno(lstat(path(p), &st));
9331         unlock_user(p, arg1, 0);
9332         goto do_stat;
9333 #endif
9334 #ifdef TARGET_NR_fstat
9335     case TARGET_NR_fstat:
9336         {
9337             ret = get_errno(fstat(arg1, &st));
9338 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9339         do_stat:
9340 #endif
9341             if (!is_error(ret)) {
9342                 struct target_stat *target_st;
9343 
9344                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9345                     return -TARGET_EFAULT;
9346                 memset(target_st, 0, sizeof(*target_st));
9347                 __put_user(st.st_dev, &target_st->st_dev);
9348                 __put_user(st.st_ino, &target_st->st_ino);
9349                 __put_user(st.st_mode, &target_st->st_mode);
9350                 __put_user(st.st_uid, &target_st->st_uid);
9351                 __put_user(st.st_gid, &target_st->st_gid);
9352                 __put_user(st.st_nlink, &target_st->st_nlink);
9353                 __put_user(st.st_rdev, &target_st->st_rdev);
9354                 __put_user(st.st_size, &target_st->st_size);
9355                 __put_user(st.st_blksize, &target_st->st_blksize);
9356                 __put_user(st.st_blocks, &target_st->st_blocks);
9357                 __put_user(st.st_atime, &target_st->target_st_atime);
9358                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9359                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9360 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9361     defined(TARGET_STAT_HAVE_NSEC)
9362                 __put_user(st.st_atim.tv_nsec,
9363                            &target_st->target_st_atime_nsec);
9364                 __put_user(st.st_mtim.tv_nsec,
9365                            &target_st->target_st_mtime_nsec);
9366                 __put_user(st.st_ctim.tv_nsec,
9367                            &target_st->target_st_ctime_nsec);
9368 #endif
9369                 unlock_user_struct(target_st, arg2, 1);
9370             }
9371         }
9372         return ret;
9373 #endif
9374     case TARGET_NR_vhangup:
9375         return get_errno(vhangup());
9376 #ifdef TARGET_NR_syscall
9377     case TARGET_NR_syscall:
9378         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9379                           arg6, arg7, arg8, 0);
9380 #endif
9381 #if defined(TARGET_NR_wait4)
9382     case TARGET_NR_wait4:
9383         {
9384             int status;
9385             abi_long status_ptr = arg2;
9386             struct rusage rusage, *rusage_ptr;
9387             abi_ulong target_rusage = arg4;
9388             abi_long rusage_err;
9389             if (target_rusage)
9390                 rusage_ptr = &rusage;
9391             else
9392                 rusage_ptr = NULL;
9393             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9394             if (!is_error(ret)) {
9395                 if (status_ptr && ret) {
9396                     status = host_to_target_waitstatus(status);
9397                     if (put_user_s32(status, status_ptr))
9398                         return -TARGET_EFAULT;
9399                 }
9400                 if (target_rusage) {
9401                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9402                     if (rusage_err) {
9403                         ret = rusage_err;
9404                     }
9405                 }
9406             }
9407         }
9408         return ret;
9409 #endif
9410 #ifdef TARGET_NR_swapoff
9411     case TARGET_NR_swapoff:
9412         if (!(p = lock_user_string(arg1)))
9413             return -TARGET_EFAULT;
9414         ret = get_errno(swapoff(p));
9415         unlock_user(p, arg1, 0);
9416         return ret;
9417 #endif
9418     case TARGET_NR_sysinfo:
9419         {
9420             struct target_sysinfo *target_value;
9421             struct sysinfo value;
9422             ret = get_errno(sysinfo(&value));
9423             if (!is_error(ret) && arg1)
9424             {
9425                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9426                     return -TARGET_EFAULT;
9427                 __put_user(value.uptime, &target_value->uptime);
9428                 __put_user(value.loads[0], &target_value->loads[0]);
9429                 __put_user(value.loads[1], &target_value->loads[1]);
9430                 __put_user(value.loads[2], &target_value->loads[2]);
9431                 __put_user(value.totalram, &target_value->totalram);
9432                 __put_user(value.freeram, &target_value->freeram);
9433                 __put_user(value.sharedram, &target_value->sharedram);
9434                 __put_user(value.bufferram, &target_value->bufferram);
9435                 __put_user(value.totalswap, &target_value->totalswap);
9436                 __put_user(value.freeswap, &target_value->freeswap);
9437                 __put_user(value.procs, &target_value->procs);
9438                 __put_user(value.totalhigh, &target_value->totalhigh);
9439                 __put_user(value.freehigh, &target_value->freehigh);
9440                 __put_user(value.mem_unit, &target_value->mem_unit);
9441                 unlock_user_struct(target_value, arg1, 1);
9442             }
9443         }
9444         return ret;
9445 #ifdef TARGET_NR_ipc
9446     case TARGET_NR_ipc:
9447         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9448 #endif
9449 #ifdef TARGET_NR_semget
9450     case TARGET_NR_semget:
9451         return get_errno(semget(arg1, arg2, arg3));
9452 #endif
9453 #ifdef TARGET_NR_semop
9454     case TARGET_NR_semop:
9455         return do_semop(arg1, arg2, arg3);
9456 #endif
9457 #ifdef TARGET_NR_semctl
9458     case TARGET_NR_semctl:
9459         return do_semctl(arg1, arg2, arg3, arg4);
9460 #endif
9461 #ifdef TARGET_NR_msgctl
9462     case TARGET_NR_msgctl:
9463         return do_msgctl(arg1, arg2, arg3);
9464 #endif
9465 #ifdef TARGET_NR_msgget
9466     case TARGET_NR_msgget:
9467         return get_errno(msgget(arg1, arg2));
9468 #endif
9469 #ifdef TARGET_NR_msgrcv
9470     case TARGET_NR_msgrcv:
9471         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9472 #endif
9473 #ifdef TARGET_NR_msgsnd
9474     case TARGET_NR_msgsnd:
9475         return do_msgsnd(arg1, arg2, arg3, arg4);
9476 #endif
9477 #ifdef TARGET_NR_shmget
9478     case TARGET_NR_shmget:
9479         return get_errno(shmget(arg1, arg2, arg3));
9480 #endif
9481 #ifdef TARGET_NR_shmctl
9482     case TARGET_NR_shmctl:
9483         return do_shmctl(arg1, arg2, arg3);
9484 #endif
9485 #ifdef TARGET_NR_shmat
9486     case TARGET_NR_shmat:
9487         return do_shmat(cpu_env, arg1, arg2, arg3);
9488 #endif
9489 #ifdef TARGET_NR_shmdt
9490     case TARGET_NR_shmdt:
9491         return do_shmdt(arg1);
9492 #endif
9493     case TARGET_NR_fsync:
9494         return get_errno(fsync(arg1));
9495     case TARGET_NR_clone:
9496         /* Linux manages to have three different orderings for its
9497          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9498          * match the kernel's CONFIG_CLONE_* settings.
9499          * Microblaze is further special in that it uses a sixth
9500          * implicit argument to clone for the TLS pointer.
9501          */
9502 #if defined(TARGET_MICROBLAZE)
9503         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9504 #elif defined(TARGET_CLONE_BACKWARDS)
9505         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9506 #elif defined(TARGET_CLONE_BACKWARDS2)
9507         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9508 #else
9509         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9510 #endif
9511         return ret;
9512 #ifdef __NR_exit_group
9513         /* new thread calls */
9514     case TARGET_NR_exit_group:
9515         preexit_cleanup(cpu_env, arg1);
9516         return get_errno(exit_group(arg1));
9517 #endif
9518     case TARGET_NR_setdomainname:
9519         if (!(p = lock_user_string(arg1)))
9520             return -TARGET_EFAULT;
9521         ret = get_errno(setdomainname(p, arg2));
9522         unlock_user(p, arg1, 0);
9523         return ret;
9524     case TARGET_NR_uname:
9525         /* no need to transcode because we use the linux syscall */
9526         {
9527             struct new_utsname * buf;
9528 
9529             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9530                 return -TARGET_EFAULT;
9531             ret = get_errno(sys_uname(buf));
9532             if (!is_error(ret)) {
9533                 /* Overwrite the native machine name with whatever is being
9534                    emulated. */
9535                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9536                           sizeof(buf->machine));
9537                 /* Allow the user to override the reported release.  */
9538                 if (qemu_uname_release && *qemu_uname_release) {
9539                     g_strlcpy(buf->release, qemu_uname_release,
9540                               sizeof(buf->release));
9541                 }
9542             }
9543             unlock_user_struct(buf, arg1, 1);
9544         }
9545         return ret;
9546 #ifdef TARGET_I386
9547     case TARGET_NR_modify_ldt:
9548         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9549 #if !defined(TARGET_X86_64)
9550     case TARGET_NR_vm86:
9551         return do_vm86(cpu_env, arg1, arg2);
9552 #endif
9553 #endif
9554 #if defined(TARGET_NR_adjtimex)
9555     case TARGET_NR_adjtimex:
9556         {
9557             struct timex host_buf;
9558 
9559             if (target_to_host_timex(&host_buf, arg1) != 0) {
9560                 return -TARGET_EFAULT;
9561             }
9562             ret = get_errno(adjtimex(&host_buf));
9563             if (!is_error(ret)) {
9564                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9565                     return -TARGET_EFAULT;
9566                 }
9567             }
9568         }
9569         return ret;
9570 #endif
9571 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9572     case TARGET_NR_clock_adjtime:
9573         {
9574             struct timex htx, *phtx = &htx;
9575 
9576             if (target_to_host_timex(phtx, arg2) != 0) {
9577                 return -TARGET_EFAULT;
9578             }
9579             ret = get_errno(clock_adjtime(arg1, phtx));
9580             if (!is_error(ret) && phtx) {
9581                 if (host_to_target_timex(arg2, phtx) != 0) {
9582                     return -TARGET_EFAULT;
9583                 }
9584             }
9585         }
9586         return ret;
9587 #endif
9588     case TARGET_NR_getpgid:
9589         return get_errno(getpgid(arg1));
9590     case TARGET_NR_fchdir:
9591         return get_errno(fchdir(arg1));
9592     case TARGET_NR_personality:
9593         return get_errno(personality(arg1));
9594 #ifdef TARGET_NR__llseek /* Not on alpha */
9595     case TARGET_NR__llseek:
9596         {
9597             int64_t res;
9598 #if !defined(__NR_llseek)
9599             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9600             if (res == -1) {
9601                 ret = get_errno(res);
9602             } else {
9603                 ret = 0;
9604             }
9605 #else
9606             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9607 #endif
9608             if ((ret == 0) && put_user_s64(res, arg4)) {
9609                 return -TARGET_EFAULT;
9610             }
9611         }
9612         return ret;
9613 #endif
9614 #ifdef TARGET_NR_getdents
9615     case TARGET_NR_getdents:
9616 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9617 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9618         {
9619             struct target_dirent *target_dirp;
9620             struct linux_dirent *dirp;
9621             abi_long count = arg3;
9622 
9623             dirp = g_try_malloc(count);
9624             if (!dirp) {
9625                 return -TARGET_ENOMEM;
9626             }
9627 
9628             ret = get_errno(sys_getdents(arg1, dirp, count));
9629             if (!is_error(ret)) {
9630                 struct linux_dirent *de;
9631 		struct target_dirent *tde;
9632                 int len = ret;
9633                 int reclen, treclen;
9634 		int count1, tnamelen;
9635 
9636 		count1 = 0;
9637                 de = dirp;
9638                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9639                     return -TARGET_EFAULT;
9640 		tde = target_dirp;
9641                 while (len > 0) {
9642                     reclen = de->d_reclen;
9643                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9644                     assert(tnamelen >= 0);
9645                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9646                     assert(count1 + treclen <= count);
9647                     tde->d_reclen = tswap16(treclen);
9648                     tde->d_ino = tswapal(de->d_ino);
9649                     tde->d_off = tswapal(de->d_off);
9650                     memcpy(tde->d_name, de->d_name, tnamelen);
9651                     de = (struct linux_dirent *)((char *)de + reclen);
9652                     len -= reclen;
9653                     tde = (struct target_dirent *)((char *)tde + treclen);
9654 		    count1 += treclen;
9655                 }
9656 		ret = count1;
9657                 unlock_user(target_dirp, arg2, ret);
9658             }
9659             g_free(dirp);
9660         }
9661 #else
9662         {
9663             struct linux_dirent *dirp;
9664             abi_long count = arg3;
9665 
9666             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9667                 return -TARGET_EFAULT;
9668             ret = get_errno(sys_getdents(arg1, dirp, count));
9669             if (!is_error(ret)) {
9670                 struct linux_dirent *de;
9671                 int len = ret;
9672                 int reclen;
9673                 de = dirp;
9674                 while (len > 0) {
9675                     reclen = de->d_reclen;
9676                     if (reclen > len)
9677                         break;
9678                     de->d_reclen = tswap16(reclen);
9679                     tswapls(&de->d_ino);
9680                     tswapls(&de->d_off);
9681                     de = (struct linux_dirent *)((char *)de + reclen);
9682                     len -= reclen;
9683                 }
9684             }
9685             unlock_user(dirp, arg2, ret);
9686         }
9687 #endif
9688 #else
9689         /* Implement getdents in terms of getdents64 */
9690         {
9691             struct linux_dirent64 *dirp;
9692             abi_long count = arg3;
9693 
9694             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9695             if (!dirp) {
9696                 return -TARGET_EFAULT;
9697             }
9698             ret = get_errno(sys_getdents64(arg1, dirp, count));
9699             if (!is_error(ret)) {
9700                 /* Convert the dirent64 structs to target dirent.  We do this
9701                  * in-place, since we can guarantee that a target_dirent is no
9702                  * larger than a dirent64; however this means we have to be
9703                  * careful to read everything before writing in the new format.
9704                  */
9705                 struct linux_dirent64 *de;
9706                 struct target_dirent *tde;
9707                 int len = ret;
9708                 int tlen = 0;
9709 
9710                 de = dirp;
9711                 tde = (struct target_dirent *)dirp;
9712                 while (len > 0) {
9713                     int namelen, treclen;
9714                     int reclen = de->d_reclen;
9715                     uint64_t ino = de->d_ino;
9716                     int64_t off = de->d_off;
9717                     uint8_t type = de->d_type;
9718 
9719                     namelen = strlen(de->d_name);
9720                     treclen = offsetof(struct target_dirent, d_name)
9721                         + namelen + 2;
9722                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9723 
9724                     memmove(tde->d_name, de->d_name, namelen + 1);
9725                     tde->d_ino = tswapal(ino);
9726                     tde->d_off = tswapal(off);
9727                     tde->d_reclen = tswap16(treclen);
9728                     /* The target_dirent type is in what was formerly a padding
9729                      * byte at the end of the structure:
9730                      */
9731                     *(((char *)tde) + treclen - 1) = type;
9732 
9733                     de = (struct linux_dirent64 *)((char *)de + reclen);
9734                     tde = (struct target_dirent *)((char *)tde + treclen);
9735                     len -= reclen;
9736                     tlen += treclen;
9737                 }
9738                 ret = tlen;
9739             }
9740             unlock_user(dirp, arg2, ret);
9741         }
9742 #endif
9743         return ret;
9744 #endif /* TARGET_NR_getdents */
9745 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9746     case TARGET_NR_getdents64:
9747         {
9748             struct linux_dirent64 *dirp;
9749             abi_long count = arg3;
9750             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9751                 return -TARGET_EFAULT;
9752             ret = get_errno(sys_getdents64(arg1, dirp, count));
9753             if (!is_error(ret)) {
9754                 struct linux_dirent64 *de;
9755                 int len = ret;
9756                 int reclen;
9757                 de = dirp;
9758                 while (len > 0) {
9759                     reclen = de->d_reclen;
9760                     if (reclen > len)
9761                         break;
9762                     de->d_reclen = tswap16(reclen);
9763                     tswap64s((uint64_t *)&de->d_ino);
9764                     tswap64s((uint64_t *)&de->d_off);
9765                     de = (struct linux_dirent64 *)((char *)de + reclen);
9766                     len -= reclen;
9767                 }
9768             }
9769             unlock_user(dirp, arg2, ret);
9770         }
9771         return ret;
9772 #endif /* TARGET_NR_getdents64 */
9773 #if defined(TARGET_NR__newselect)
9774     case TARGET_NR__newselect:
9775         return do_select(arg1, arg2, arg3, arg4, arg5);
9776 #endif
9777 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9778 # ifdef TARGET_NR_poll
9779     case TARGET_NR_poll:
9780 # endif
9781 # ifdef TARGET_NR_ppoll
9782     case TARGET_NR_ppoll:
9783 # endif
9784         {
9785             struct target_pollfd *target_pfd;
9786             unsigned int nfds = arg2;
9787             struct pollfd *pfd;
9788             unsigned int i;
9789 
9790             pfd = NULL;
9791             target_pfd = NULL;
9792             if (nfds) {
9793                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9794                     return -TARGET_EINVAL;
9795                 }
9796 
9797                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9798                                        sizeof(struct target_pollfd) * nfds, 1);
9799                 if (!target_pfd) {
9800                     return -TARGET_EFAULT;
9801                 }
9802 
9803                 pfd = alloca(sizeof(struct pollfd) * nfds);
9804                 for (i = 0; i < nfds; i++) {
9805                     pfd[i].fd = tswap32(target_pfd[i].fd);
9806                     pfd[i].events = tswap16(target_pfd[i].events);
9807                 }
9808             }
9809 
9810             switch (num) {
9811 # ifdef TARGET_NR_ppoll
9812             case TARGET_NR_ppoll:
9813             {
9814                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9815                 target_sigset_t *target_set;
9816                 sigset_t _set, *set = &_set;
9817 
9818                 if (arg3) {
9819                     if (target_to_host_timespec(timeout_ts, arg3)) {
9820                         unlock_user(target_pfd, arg1, 0);
9821                         return -TARGET_EFAULT;
9822                     }
9823                 } else {
9824                     timeout_ts = NULL;
9825                 }
9826 
9827                 if (arg4) {
9828                     if (arg5 != sizeof(target_sigset_t)) {
9829                         unlock_user(target_pfd, arg1, 0);
9830                         return -TARGET_EINVAL;
9831                     }
9832 
9833                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9834                     if (!target_set) {
9835                         unlock_user(target_pfd, arg1, 0);
9836                         return -TARGET_EFAULT;
9837                     }
9838                     target_to_host_sigset(set, target_set);
9839                 } else {
9840                     set = NULL;
9841                 }
9842 
9843                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9844                                            set, SIGSET_T_SIZE));
9845 
9846                 if (!is_error(ret) && arg3) {
9847                     host_to_target_timespec(arg3, timeout_ts);
9848                 }
9849                 if (arg4) {
9850                     unlock_user(target_set, arg4, 0);
9851                 }
9852                 break;
9853             }
9854 # endif
9855 # ifdef TARGET_NR_poll
9856             case TARGET_NR_poll:
9857             {
9858                 struct timespec ts, *pts;
9859 
9860                 if (arg3 >= 0) {
9861                     /* Convert ms to secs, ns */
9862                     ts.tv_sec = arg3 / 1000;
9863                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9864                     pts = &ts;
9865                 } else {
9866                     /* -ve poll() timeout means "infinite" */
9867                     pts = NULL;
9868                 }
9869                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9870                 break;
9871             }
9872 # endif
9873             default:
9874                 g_assert_not_reached();
9875             }
9876 
9877             if (!is_error(ret)) {
9878                 for(i = 0; i < nfds; i++) {
9879                     target_pfd[i].revents = tswap16(pfd[i].revents);
9880                 }
9881             }
9882             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9883         }
9884         return ret;
9885 #endif
9886     case TARGET_NR_flock:
9887         /* NOTE: the flock constant seems to be the same for every
9888            Linux platform */
9889         return get_errno(safe_flock(arg1, arg2));
9890     case TARGET_NR_readv:
9891         {
9892             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9893             if (vec != NULL) {
9894                 ret = get_errno(safe_readv(arg1, vec, arg3));
9895                 unlock_iovec(vec, arg2, arg3, 1);
9896             } else {
9897                 ret = -host_to_target_errno(errno);
9898             }
9899         }
9900         return ret;
9901     case TARGET_NR_writev:
9902         {
9903             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9904             if (vec != NULL) {
9905                 ret = get_errno(safe_writev(arg1, vec, arg3));
9906                 unlock_iovec(vec, arg2, arg3, 0);
9907             } else {
9908                 ret = -host_to_target_errno(errno);
9909             }
9910         }
9911         return ret;
9912 #if defined(TARGET_NR_preadv)
9913     case TARGET_NR_preadv:
9914         {
9915             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9916             if (vec != NULL) {
9917                 unsigned long low, high;
9918 
9919                 target_to_host_low_high(arg4, arg5, &low, &high);
9920                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9921                 unlock_iovec(vec, arg2, arg3, 1);
9922             } else {
9923                 ret = -host_to_target_errno(errno);
9924            }
9925         }
9926         return ret;
9927 #endif
9928 #if defined(TARGET_NR_pwritev)
9929     case TARGET_NR_pwritev:
9930         {
9931             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9932             if (vec != NULL) {
9933                 unsigned long low, high;
9934 
9935                 target_to_host_low_high(arg4, arg5, &low, &high);
9936                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9937                 unlock_iovec(vec, arg2, arg3, 0);
9938             } else {
9939                 ret = -host_to_target_errno(errno);
9940            }
9941         }
9942         return ret;
9943 #endif
9944     case TARGET_NR_getsid:
9945         return get_errno(getsid(arg1));
9946 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9947     case TARGET_NR_fdatasync:
9948         return get_errno(fdatasync(arg1));
9949 #endif
9950 #ifdef TARGET_NR__sysctl
9951     case TARGET_NR__sysctl:
9952         /* We don't implement this, but ENOTDIR is always a safe
9953            return value. */
9954         return -TARGET_ENOTDIR;
9955 #endif
9956     case TARGET_NR_sched_getaffinity:
9957         {
9958             unsigned int mask_size;
9959             unsigned long *mask;
9960 
9961             /*
9962              * sched_getaffinity needs multiples of ulong, so need to take
9963              * care of mismatches between target ulong and host ulong sizes.
9964              */
9965             if (arg2 & (sizeof(abi_ulong) - 1)) {
9966                 return -TARGET_EINVAL;
9967             }
9968             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9969 
9970             mask = alloca(mask_size);
9971             memset(mask, 0, mask_size);
9972             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9973 
9974             if (!is_error(ret)) {
9975                 if (ret > arg2) {
9976                     /* More data returned than the caller's buffer will fit.
9977                      * This only happens if sizeof(abi_long) < sizeof(long)
9978                      * and the caller passed us a buffer holding an odd number
9979                      * of abi_longs. If the host kernel is actually using the
9980                      * extra 4 bytes then fail EINVAL; otherwise we can just
9981                      * ignore them and only copy the interesting part.
9982                      */
9983                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9984                     if (numcpus > arg2 * 8) {
9985                         return -TARGET_EINVAL;
9986                     }
9987                     ret = arg2;
9988                 }
9989 
9990                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9991                     return -TARGET_EFAULT;
9992                 }
9993             }
9994         }
9995         return ret;
9996     case TARGET_NR_sched_setaffinity:
9997         {
9998             unsigned int mask_size;
9999             unsigned long *mask;
10000 
10001             /*
10002              * sched_setaffinity needs multiples of ulong, so need to take
10003              * care of mismatches between target ulong and host ulong sizes.
10004              */
10005             if (arg2 & (sizeof(abi_ulong) - 1)) {
10006                 return -TARGET_EINVAL;
10007             }
10008             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10009             mask = alloca(mask_size);
10010 
10011             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10012             if (ret) {
10013                 return ret;
10014             }
10015 
10016             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10017         }
10018     case TARGET_NR_getcpu:
10019         {
10020             unsigned cpu, node;
10021             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10022                                        arg2 ? &node : NULL,
10023                                        NULL));
10024             if (is_error(ret)) {
10025                 return ret;
10026             }
10027             if (arg1 && put_user_u32(cpu, arg1)) {
10028                 return -TARGET_EFAULT;
10029             }
10030             if (arg2 && put_user_u32(node, arg2)) {
10031                 return -TARGET_EFAULT;
10032             }
10033         }
10034         return ret;
10035     case TARGET_NR_sched_setparam:
10036         {
10037             struct sched_param *target_schp;
10038             struct sched_param schp;
10039 
10040             if (arg2 == 0) {
10041                 return -TARGET_EINVAL;
10042             }
10043             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10044                 return -TARGET_EFAULT;
10045             schp.sched_priority = tswap32(target_schp->sched_priority);
10046             unlock_user_struct(target_schp, arg2, 0);
10047             return get_errno(sched_setparam(arg1, &schp));
10048         }
10049     case TARGET_NR_sched_getparam:
10050         {
10051             struct sched_param *target_schp;
10052             struct sched_param schp;
10053 
10054             if (arg2 == 0) {
10055                 return -TARGET_EINVAL;
10056             }
10057             ret = get_errno(sched_getparam(arg1, &schp));
10058             if (!is_error(ret)) {
10059                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10060                     return -TARGET_EFAULT;
10061                 target_schp->sched_priority = tswap32(schp.sched_priority);
10062                 unlock_user_struct(target_schp, arg2, 1);
10063             }
10064         }
10065         return ret;
10066     case TARGET_NR_sched_setscheduler:
10067         {
10068             struct sched_param *target_schp;
10069             struct sched_param schp;
10070             if (arg3 == 0) {
10071                 return -TARGET_EINVAL;
10072             }
10073             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10074                 return -TARGET_EFAULT;
10075             schp.sched_priority = tswap32(target_schp->sched_priority);
10076             unlock_user_struct(target_schp, arg3, 0);
10077             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10078         }
10079     case TARGET_NR_sched_getscheduler:
10080         return get_errno(sched_getscheduler(arg1));
10081     case TARGET_NR_sched_yield:
10082         return get_errno(sched_yield());
10083     case TARGET_NR_sched_get_priority_max:
10084         return get_errno(sched_get_priority_max(arg1));
10085     case TARGET_NR_sched_get_priority_min:
10086         return get_errno(sched_get_priority_min(arg1));
10087 #ifdef TARGET_NR_sched_rr_get_interval
10088     case TARGET_NR_sched_rr_get_interval:
10089         {
10090             struct timespec ts;
10091             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10092             if (!is_error(ret)) {
10093                 ret = host_to_target_timespec(arg2, &ts);
10094             }
10095         }
10096         return ret;
10097 #endif
10098 #if defined(TARGET_NR_nanosleep)
10099     case TARGET_NR_nanosleep:
10100         {
10101             struct timespec req, rem;
10102             target_to_host_timespec(&req, arg1);
10103             ret = get_errno(safe_nanosleep(&req, &rem));
10104             if (is_error(ret) && arg2) {
10105                 host_to_target_timespec(arg2, &rem);
10106             }
10107         }
10108         return ret;
10109 #endif
10110     case TARGET_NR_prctl:
10111         switch (arg1) {
10112         case PR_GET_PDEATHSIG:
10113         {
10114             int deathsig;
10115             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10116             if (!is_error(ret) && arg2
10117                 && put_user_ual(deathsig, arg2)) {
10118                 return -TARGET_EFAULT;
10119             }
10120             return ret;
10121         }
10122 #ifdef PR_GET_NAME
10123         case PR_GET_NAME:
10124         {
10125             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10126             if (!name) {
10127                 return -TARGET_EFAULT;
10128             }
10129             ret = get_errno(prctl(arg1, (unsigned long)name,
10130                                   arg3, arg4, arg5));
10131             unlock_user(name, arg2, 16);
10132             return ret;
10133         }
10134         case PR_SET_NAME:
10135         {
10136             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10137             if (!name) {
10138                 return -TARGET_EFAULT;
10139             }
10140             ret = get_errno(prctl(arg1, (unsigned long)name,
10141                                   arg3, arg4, arg5));
10142             unlock_user(name, arg2, 0);
10143             return ret;
10144         }
10145 #endif
10146 #ifdef TARGET_MIPS
10147         case TARGET_PR_GET_FP_MODE:
10148         {
10149             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10150             ret = 0;
10151             if (env->CP0_Status & (1 << CP0St_FR)) {
10152                 ret |= TARGET_PR_FP_MODE_FR;
10153             }
10154             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10155                 ret |= TARGET_PR_FP_MODE_FRE;
10156             }
10157             return ret;
10158         }
10159         case TARGET_PR_SET_FP_MODE:
10160         {
10161             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10162             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10163             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10164             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10165             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10166 
10167             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10168                                             TARGET_PR_FP_MODE_FRE;
10169 
10170             /* If nothing to change, return right away, successfully.  */
10171             if (old_fr == new_fr && old_fre == new_fre) {
10172                 return 0;
10173             }
10174             /* Check the value is valid */
10175             if (arg2 & ~known_bits) {
10176                 return -TARGET_EOPNOTSUPP;
10177             }
10178             /* Setting FRE without FR is not supported.  */
10179             if (new_fre && !new_fr) {
10180                 return -TARGET_EOPNOTSUPP;
10181             }
10182             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10183                 /* FR1 is not supported */
10184                 return -TARGET_EOPNOTSUPP;
10185             }
10186             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10187                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10188                 /* cannot set FR=0 */
10189                 return -TARGET_EOPNOTSUPP;
10190             }
10191             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10192                 /* Cannot set FRE=1 */
10193                 return -TARGET_EOPNOTSUPP;
10194             }
10195 
10196             int i;
10197             fpr_t *fpr = env->active_fpu.fpr;
10198             for (i = 0; i < 32 ; i += 2) {
10199                 if (!old_fr && new_fr) {
10200                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10201                 } else if (old_fr && !new_fr) {
10202                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10203                 }
10204             }
10205 
10206             if (new_fr) {
10207                 env->CP0_Status |= (1 << CP0St_FR);
10208                 env->hflags |= MIPS_HFLAG_F64;
10209             } else {
10210                 env->CP0_Status &= ~(1 << CP0St_FR);
10211                 env->hflags &= ~MIPS_HFLAG_F64;
10212             }
10213             if (new_fre) {
10214                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10215                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10216                     env->hflags |= MIPS_HFLAG_FRE;
10217                 }
10218             } else {
10219                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10220                 env->hflags &= ~MIPS_HFLAG_FRE;
10221             }
10222 
10223             return 0;
10224         }
10225 #endif /* MIPS */
10226 #ifdef TARGET_AARCH64
10227         case TARGET_PR_SVE_SET_VL:
10228             /*
10229              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10230              * PR_SVE_VL_INHERIT.  Note the kernel definition
10231              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10232              * even though the current architectural maximum is VQ=16.
10233              */
10234             ret = -TARGET_EINVAL;
10235             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10236                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10237                 CPUARMState *env = cpu_env;
10238                 ARMCPU *cpu = env_archcpu(env);
10239                 uint32_t vq, old_vq;
10240 
10241                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10242                 vq = MAX(arg2 / 16, 1);
10243                 vq = MIN(vq, cpu->sve_max_vq);
10244 
10245                 if (vq < old_vq) {
10246                     aarch64_sve_narrow_vq(env, vq);
10247                 }
10248                 env->vfp.zcr_el[1] = vq - 1;
10249                 arm_rebuild_hflags(env);
10250                 ret = vq * 16;
10251             }
10252             return ret;
10253         case TARGET_PR_SVE_GET_VL:
10254             ret = -TARGET_EINVAL;
10255             {
10256                 ARMCPU *cpu = env_archcpu(cpu_env);
10257                 if (cpu_isar_feature(aa64_sve, cpu)) {
10258                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10259                 }
10260             }
10261             return ret;
10262         case TARGET_PR_PAC_RESET_KEYS:
10263             {
10264                 CPUARMState *env = cpu_env;
10265                 ARMCPU *cpu = env_archcpu(env);
10266 
10267                 if (arg3 || arg4 || arg5) {
10268                     return -TARGET_EINVAL;
10269                 }
10270                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10271                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10272                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10273                                TARGET_PR_PAC_APGAKEY);
10274                     int ret = 0;
10275                     Error *err = NULL;
10276 
10277                     if (arg2 == 0) {
10278                         arg2 = all;
10279                     } else if (arg2 & ~all) {
10280                         return -TARGET_EINVAL;
10281                     }
10282                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10283                         ret |= qemu_guest_getrandom(&env->keys.apia,
10284                                                     sizeof(ARMPACKey), &err);
10285                     }
10286                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10287                         ret |= qemu_guest_getrandom(&env->keys.apib,
10288                                                     sizeof(ARMPACKey), &err);
10289                     }
10290                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10291                         ret |= qemu_guest_getrandom(&env->keys.apda,
10292                                                     sizeof(ARMPACKey), &err);
10293                     }
10294                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10295                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10296                                                     sizeof(ARMPACKey), &err);
10297                     }
10298                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10299                         ret |= qemu_guest_getrandom(&env->keys.apga,
10300                                                     sizeof(ARMPACKey), &err);
10301                     }
10302                     if (ret != 0) {
10303                         /*
10304                          * Some unknown failure in the crypto.  The best
10305                          * we can do is log it and fail the syscall.
10306                          * The real syscall cannot fail this way.
10307                          */
10308                         qemu_log_mask(LOG_UNIMP,
10309                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10310                                       error_get_pretty(err));
10311                         error_free(err);
10312                         return -TARGET_EIO;
10313                     }
10314                     return 0;
10315                 }
10316             }
10317             return -TARGET_EINVAL;
10318 #endif /* AARCH64 */
10319         case PR_GET_SECCOMP:
10320         case PR_SET_SECCOMP:
10321             /* Disable seccomp to prevent the target disabling syscalls we
10322              * need. */
10323             return -TARGET_EINVAL;
10324         default:
10325             /* Most prctl options have no pointer arguments */
10326             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10327         }
10328         break;
10329 #ifdef TARGET_NR_arch_prctl
10330     case TARGET_NR_arch_prctl:
10331         return do_arch_prctl(cpu_env, arg1, arg2);
10332 #endif
10333 #ifdef TARGET_NR_pread64
10334     case TARGET_NR_pread64:
10335         if (regpairs_aligned(cpu_env, num)) {
10336             arg4 = arg5;
10337             arg5 = arg6;
10338         }
10339         if (arg2 == 0 && arg3 == 0) {
10340             /* Special-case NULL buffer and zero length, which should succeed */
10341             p = 0;
10342         } else {
10343             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10344             if (!p) {
10345                 return -TARGET_EFAULT;
10346             }
10347         }
10348         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10349         unlock_user(p, arg2, ret);
10350         return ret;
10351     case TARGET_NR_pwrite64:
10352         if (regpairs_aligned(cpu_env, num)) {
10353             arg4 = arg5;
10354             arg5 = arg6;
10355         }
10356         if (arg2 == 0 && arg3 == 0) {
10357             /* Special-case NULL buffer and zero length, which should succeed */
10358             p = 0;
10359         } else {
10360             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10361             if (!p) {
10362                 return -TARGET_EFAULT;
10363             }
10364         }
10365         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10366         unlock_user(p, arg2, 0);
10367         return ret;
10368 #endif
10369     case TARGET_NR_getcwd:
10370         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10371             return -TARGET_EFAULT;
10372         ret = get_errno(sys_getcwd1(p, arg2));
10373         unlock_user(p, arg1, ret);
10374         return ret;
10375     case TARGET_NR_capget:
10376     case TARGET_NR_capset:
10377     {
10378         struct target_user_cap_header *target_header;
10379         struct target_user_cap_data *target_data = NULL;
10380         struct __user_cap_header_struct header;
10381         struct __user_cap_data_struct data[2];
10382         struct __user_cap_data_struct *dataptr = NULL;
10383         int i, target_datalen;
10384         int data_items = 1;
10385 
10386         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10387             return -TARGET_EFAULT;
10388         }
10389         header.version = tswap32(target_header->version);
10390         header.pid = tswap32(target_header->pid);
10391 
10392         if (header.version != _LINUX_CAPABILITY_VERSION) {
10393             /* Version 2 and up takes pointer to two user_data structs */
10394             data_items = 2;
10395         }
10396 
10397         target_datalen = sizeof(*target_data) * data_items;
10398 
10399         if (arg2) {
10400             if (num == TARGET_NR_capget) {
10401                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10402             } else {
10403                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10404             }
10405             if (!target_data) {
10406                 unlock_user_struct(target_header, arg1, 0);
10407                 return -TARGET_EFAULT;
10408             }
10409 
10410             if (num == TARGET_NR_capset) {
10411                 for (i = 0; i < data_items; i++) {
10412                     data[i].effective = tswap32(target_data[i].effective);
10413                     data[i].permitted = tswap32(target_data[i].permitted);
10414                     data[i].inheritable = tswap32(target_data[i].inheritable);
10415                 }
10416             }
10417 
10418             dataptr = data;
10419         }
10420 
10421         if (num == TARGET_NR_capget) {
10422             ret = get_errno(capget(&header, dataptr));
10423         } else {
10424             ret = get_errno(capset(&header, dataptr));
10425         }
10426 
10427         /* The kernel always updates version for both capget and capset */
10428         target_header->version = tswap32(header.version);
10429         unlock_user_struct(target_header, arg1, 1);
10430 
10431         if (arg2) {
10432             if (num == TARGET_NR_capget) {
10433                 for (i = 0; i < data_items; i++) {
10434                     target_data[i].effective = tswap32(data[i].effective);
10435                     target_data[i].permitted = tswap32(data[i].permitted);
10436                     target_data[i].inheritable = tswap32(data[i].inheritable);
10437                 }
10438                 unlock_user(target_data, arg2, target_datalen);
10439             } else {
10440                 unlock_user(target_data, arg2, 0);
10441             }
10442         }
10443         return ret;
10444     }
10445     case TARGET_NR_sigaltstack:
10446         return do_sigaltstack(arg1, arg2,
10447                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10448 
10449 #ifdef CONFIG_SENDFILE
10450 #ifdef TARGET_NR_sendfile
10451     case TARGET_NR_sendfile:
10452     {
10453         off_t *offp = NULL;
10454         off_t off;
10455         if (arg3) {
10456             ret = get_user_sal(off, arg3);
10457             if (is_error(ret)) {
10458                 return ret;
10459             }
10460             offp = &off;
10461         }
10462         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10463         if (!is_error(ret) && arg3) {
10464             abi_long ret2 = put_user_sal(off, arg3);
10465             if (is_error(ret2)) {
10466                 ret = ret2;
10467             }
10468         }
10469         return ret;
10470     }
10471 #endif
10472 #ifdef TARGET_NR_sendfile64
10473     case TARGET_NR_sendfile64:
10474     {
10475         off_t *offp = NULL;
10476         off_t off;
10477         if (arg3) {
10478             ret = get_user_s64(off, arg3);
10479             if (is_error(ret)) {
10480                 return ret;
10481             }
10482             offp = &off;
10483         }
10484         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10485         if (!is_error(ret) && arg3) {
10486             abi_long ret2 = put_user_s64(off, arg3);
10487             if (is_error(ret2)) {
10488                 ret = ret2;
10489             }
10490         }
10491         return ret;
10492     }
10493 #endif
10494 #endif
10495 #ifdef TARGET_NR_vfork
10496     case TARGET_NR_vfork:
10497         return get_errno(do_fork(cpu_env,
10498                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10499                          0, 0, 0, 0));
10500 #endif
10501 #ifdef TARGET_NR_ugetrlimit
10502     case TARGET_NR_ugetrlimit:
10503     {
10504 	struct rlimit rlim;
10505 	int resource = target_to_host_resource(arg1);
10506 	ret = get_errno(getrlimit(resource, &rlim));
10507 	if (!is_error(ret)) {
10508 	    struct target_rlimit *target_rlim;
10509             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10510                 return -TARGET_EFAULT;
10511 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10512 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10513             unlock_user_struct(target_rlim, arg2, 1);
10514 	}
10515         return ret;
10516     }
10517 #endif
10518 #ifdef TARGET_NR_truncate64
10519     case TARGET_NR_truncate64:
10520         if (!(p = lock_user_string(arg1)))
10521             return -TARGET_EFAULT;
10522 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10523         unlock_user(p, arg1, 0);
10524         return ret;
10525 #endif
10526 #ifdef TARGET_NR_ftruncate64
10527     case TARGET_NR_ftruncate64:
10528         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10529 #endif
10530 #ifdef TARGET_NR_stat64
10531     case TARGET_NR_stat64:
10532         if (!(p = lock_user_string(arg1))) {
10533             return -TARGET_EFAULT;
10534         }
10535         ret = get_errno(stat(path(p), &st));
10536         unlock_user(p, arg1, 0);
10537         if (!is_error(ret))
10538             ret = host_to_target_stat64(cpu_env, arg2, &st);
10539         return ret;
10540 #endif
10541 #ifdef TARGET_NR_lstat64
10542     case TARGET_NR_lstat64:
10543         if (!(p = lock_user_string(arg1))) {
10544             return -TARGET_EFAULT;
10545         }
10546         ret = get_errno(lstat(path(p), &st));
10547         unlock_user(p, arg1, 0);
10548         if (!is_error(ret))
10549             ret = host_to_target_stat64(cpu_env, arg2, &st);
10550         return ret;
10551 #endif
10552 #ifdef TARGET_NR_fstat64
10553     case TARGET_NR_fstat64:
10554         ret = get_errno(fstat(arg1, &st));
10555         if (!is_error(ret))
10556             ret = host_to_target_stat64(cpu_env, arg2, &st);
10557         return ret;
10558 #endif
10559 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10560 #ifdef TARGET_NR_fstatat64
10561     case TARGET_NR_fstatat64:
10562 #endif
10563 #ifdef TARGET_NR_newfstatat
10564     case TARGET_NR_newfstatat:
10565 #endif
10566         if (!(p = lock_user_string(arg2))) {
10567             return -TARGET_EFAULT;
10568         }
10569         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10570         unlock_user(p, arg2, 0);
10571         if (!is_error(ret))
10572             ret = host_to_target_stat64(cpu_env, arg3, &st);
10573         return ret;
10574 #endif
10575 #if defined(TARGET_NR_statx)
10576     case TARGET_NR_statx:
10577         {
10578             struct target_statx *target_stx;
10579             int dirfd = arg1;
10580             int flags = arg3;
10581 
10582             p = lock_user_string(arg2);
10583             if (p == NULL) {
10584                 return -TARGET_EFAULT;
10585             }
10586 #if defined(__NR_statx)
10587             {
10588                 /*
10589                  * It is assumed that struct statx is architecture independent.
10590                  */
10591                 struct target_statx host_stx;
10592                 int mask = arg4;
10593 
10594                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10595                 if (!is_error(ret)) {
10596                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10597                         unlock_user(p, arg2, 0);
10598                         return -TARGET_EFAULT;
10599                     }
10600                 }
10601 
10602                 if (ret != -TARGET_ENOSYS) {
10603                     unlock_user(p, arg2, 0);
10604                     return ret;
10605                 }
10606             }
10607 #endif
10608             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10609             unlock_user(p, arg2, 0);
10610 
10611             if (!is_error(ret)) {
10612                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10613                     return -TARGET_EFAULT;
10614                 }
10615                 memset(target_stx, 0, sizeof(*target_stx));
10616                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10617                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10618                 __put_user(st.st_ino, &target_stx->stx_ino);
10619                 __put_user(st.st_mode, &target_stx->stx_mode);
10620                 __put_user(st.st_uid, &target_stx->stx_uid);
10621                 __put_user(st.st_gid, &target_stx->stx_gid);
10622                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10623                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10624                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10625                 __put_user(st.st_size, &target_stx->stx_size);
10626                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10627                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10628                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10629                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10630                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10631                 unlock_user_struct(target_stx, arg5, 1);
10632             }
10633         }
10634         return ret;
10635 #endif
10636 #ifdef TARGET_NR_lchown
10637     case TARGET_NR_lchown:
10638         if (!(p = lock_user_string(arg1)))
10639             return -TARGET_EFAULT;
10640         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10641         unlock_user(p, arg1, 0);
10642         return ret;
10643 #endif
10644 #ifdef TARGET_NR_getuid
10645     case TARGET_NR_getuid:
10646         return get_errno(high2lowuid(getuid()));
10647 #endif
10648 #ifdef TARGET_NR_getgid
10649     case TARGET_NR_getgid:
10650         return get_errno(high2lowgid(getgid()));
10651 #endif
10652 #ifdef TARGET_NR_geteuid
10653     case TARGET_NR_geteuid:
10654         return get_errno(high2lowuid(geteuid()));
10655 #endif
10656 #ifdef TARGET_NR_getegid
10657     case TARGET_NR_getegid:
10658         return get_errno(high2lowgid(getegid()));
10659 #endif
10660     case TARGET_NR_setreuid:
10661         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10662     case TARGET_NR_setregid:
10663         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10664     case TARGET_NR_getgroups:
10665         {
10666             int gidsetsize = arg1;
10667             target_id *target_grouplist;
10668             gid_t *grouplist;
10669             int i;
10670 
10671             grouplist = alloca(gidsetsize * sizeof(gid_t));
10672             ret = get_errno(getgroups(gidsetsize, grouplist));
10673             if (gidsetsize == 0)
10674                 return ret;
10675             if (!is_error(ret)) {
10676                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10677                 if (!target_grouplist)
10678                     return -TARGET_EFAULT;
10679                 for(i = 0;i < ret; i++)
10680                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10681                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10682             }
10683         }
10684         return ret;
10685     case TARGET_NR_setgroups:
10686         {
10687             int gidsetsize = arg1;
10688             target_id *target_grouplist;
10689             gid_t *grouplist = NULL;
10690             int i;
10691             if (gidsetsize) {
10692                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10693                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10694                 if (!target_grouplist) {
10695                     return -TARGET_EFAULT;
10696                 }
10697                 for (i = 0; i < gidsetsize; i++) {
10698                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10699                 }
10700                 unlock_user(target_grouplist, arg2, 0);
10701             }
10702             return get_errno(setgroups(gidsetsize, grouplist));
10703         }
10704     case TARGET_NR_fchown:
10705         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10706 #if defined(TARGET_NR_fchownat)
10707     case TARGET_NR_fchownat:
10708         if (!(p = lock_user_string(arg2)))
10709             return -TARGET_EFAULT;
10710         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10711                                  low2highgid(arg4), arg5));
10712         unlock_user(p, arg2, 0);
10713         return ret;
10714 #endif
10715 #ifdef TARGET_NR_setresuid
10716     case TARGET_NR_setresuid:
10717         return get_errno(sys_setresuid(low2highuid(arg1),
10718                                        low2highuid(arg2),
10719                                        low2highuid(arg3)));
10720 #endif
10721 #ifdef TARGET_NR_getresuid
10722     case TARGET_NR_getresuid:
10723         {
10724             uid_t ruid, euid, suid;
10725             ret = get_errno(getresuid(&ruid, &euid, &suid));
10726             if (!is_error(ret)) {
10727                 if (put_user_id(high2lowuid(ruid), arg1)
10728                     || put_user_id(high2lowuid(euid), arg2)
10729                     || put_user_id(high2lowuid(suid), arg3))
10730                     return -TARGET_EFAULT;
10731             }
10732         }
10733         return ret;
10734 #endif
10735 #ifdef TARGET_NR_getresgid
10736     case TARGET_NR_setresgid:
10737         return get_errno(sys_setresgid(low2highgid(arg1),
10738                                        low2highgid(arg2),
10739                                        low2highgid(arg3)));
10740 #endif
10741 #ifdef TARGET_NR_getresgid
10742     case TARGET_NR_getresgid:
10743         {
10744             gid_t rgid, egid, sgid;
10745             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10746             if (!is_error(ret)) {
10747                 if (put_user_id(high2lowgid(rgid), arg1)
10748                     || put_user_id(high2lowgid(egid), arg2)
10749                     || put_user_id(high2lowgid(sgid), arg3))
10750                     return -TARGET_EFAULT;
10751             }
10752         }
10753         return ret;
10754 #endif
10755 #ifdef TARGET_NR_chown
10756     case TARGET_NR_chown:
10757         if (!(p = lock_user_string(arg1)))
10758             return -TARGET_EFAULT;
10759         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10760         unlock_user(p, arg1, 0);
10761         return ret;
10762 #endif
10763     case TARGET_NR_setuid:
10764         return get_errno(sys_setuid(low2highuid(arg1)));
10765     case TARGET_NR_setgid:
10766         return get_errno(sys_setgid(low2highgid(arg1)));
10767     case TARGET_NR_setfsuid:
10768         return get_errno(setfsuid(arg1));
10769     case TARGET_NR_setfsgid:
10770         return get_errno(setfsgid(arg1));
10771 
10772 #ifdef TARGET_NR_lchown32
10773     case TARGET_NR_lchown32:
10774         if (!(p = lock_user_string(arg1)))
10775             return -TARGET_EFAULT;
10776         ret = get_errno(lchown(p, arg2, arg3));
10777         unlock_user(p, arg1, 0);
10778         return ret;
10779 #endif
10780 #ifdef TARGET_NR_getuid32
10781     case TARGET_NR_getuid32:
10782         return get_errno(getuid());
10783 #endif
10784 
10785 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10786    /* Alpha specific */
10787     case TARGET_NR_getxuid:
10788          {
10789             uid_t euid;
10790             euid=geteuid();
10791             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10792          }
10793         return get_errno(getuid());
10794 #endif
10795 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10796    /* Alpha specific */
10797     case TARGET_NR_getxgid:
10798          {
10799             uid_t egid;
10800             egid=getegid();
10801             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10802          }
10803         return get_errno(getgid());
10804 #endif
10805 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10806     /* Alpha specific */
10807     case TARGET_NR_osf_getsysinfo:
10808         ret = -TARGET_EOPNOTSUPP;
10809         switch (arg1) {
10810           case TARGET_GSI_IEEE_FP_CONTROL:
10811             {
10812                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10813                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10814 
10815                 swcr &= ~SWCR_STATUS_MASK;
10816                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10817 
10818                 if (put_user_u64 (swcr, arg2))
10819                         return -TARGET_EFAULT;
10820                 ret = 0;
10821             }
10822             break;
10823 
10824           /* case GSI_IEEE_STATE_AT_SIGNAL:
10825              -- Not implemented in linux kernel.
10826              case GSI_UACPROC:
10827              -- Retrieves current unaligned access state; not much used.
10828              case GSI_PROC_TYPE:
10829              -- Retrieves implver information; surely not used.
10830              case GSI_GET_HWRPB:
10831              -- Grabs a copy of the HWRPB; surely not used.
10832           */
10833         }
10834         return ret;
10835 #endif
10836 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10837     /* Alpha specific */
10838     case TARGET_NR_osf_setsysinfo:
10839         ret = -TARGET_EOPNOTSUPP;
10840         switch (arg1) {
10841           case TARGET_SSI_IEEE_FP_CONTROL:
10842             {
10843                 uint64_t swcr, fpcr;
10844 
10845                 if (get_user_u64 (swcr, arg2)) {
10846                     return -TARGET_EFAULT;
10847                 }
10848 
10849                 /*
10850                  * The kernel calls swcr_update_status to update the
10851                  * status bits from the fpcr at every point that it
10852                  * could be queried.  Therefore, we store the status
10853                  * bits only in FPCR.
10854                  */
10855                 ((CPUAlphaState *)cpu_env)->swcr
10856                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10857 
10858                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10859                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10860                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10861                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10862                 ret = 0;
10863             }
10864             break;
10865 
10866           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10867             {
10868                 uint64_t exc, fpcr, fex;
10869 
10870                 if (get_user_u64(exc, arg2)) {
10871                     return -TARGET_EFAULT;
10872                 }
10873                 exc &= SWCR_STATUS_MASK;
10874                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10875 
10876                 /* Old exceptions are not signaled.  */
10877                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10878                 fex = exc & ~fex;
10879                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10880                 fex &= ((CPUArchState *)cpu_env)->swcr;
10881 
10882                 /* Update the hardware fpcr.  */
10883                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10884                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10885 
10886                 if (fex) {
10887                     int si_code = TARGET_FPE_FLTUNK;
10888                     target_siginfo_t info;
10889 
10890                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10891                         si_code = TARGET_FPE_FLTUND;
10892                     }
10893                     if (fex & SWCR_TRAP_ENABLE_INE) {
10894                         si_code = TARGET_FPE_FLTRES;
10895                     }
10896                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10897                         si_code = TARGET_FPE_FLTUND;
10898                     }
10899                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10900                         si_code = TARGET_FPE_FLTOVF;
10901                     }
10902                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10903                         si_code = TARGET_FPE_FLTDIV;
10904                     }
10905                     if (fex & SWCR_TRAP_ENABLE_INV) {
10906                         si_code = TARGET_FPE_FLTINV;
10907                     }
10908 
10909                     info.si_signo = SIGFPE;
10910                     info.si_errno = 0;
10911                     info.si_code = si_code;
10912                     info._sifields._sigfault._addr
10913                         = ((CPUArchState *)cpu_env)->pc;
10914                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10915                                  QEMU_SI_FAULT, &info);
10916                 }
10917                 ret = 0;
10918             }
10919             break;
10920 
10921           /* case SSI_NVPAIRS:
10922              -- Used with SSIN_UACPROC to enable unaligned accesses.
10923              case SSI_IEEE_STATE_AT_SIGNAL:
10924              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10925              -- Not implemented in linux kernel
10926           */
10927         }
10928         return ret;
10929 #endif
10930 #ifdef TARGET_NR_osf_sigprocmask
10931     /* Alpha specific.  */
10932     case TARGET_NR_osf_sigprocmask:
10933         {
10934             abi_ulong mask;
10935             int how;
10936             sigset_t set, oldset;
10937 
10938             switch(arg1) {
10939             case TARGET_SIG_BLOCK:
10940                 how = SIG_BLOCK;
10941                 break;
10942             case TARGET_SIG_UNBLOCK:
10943                 how = SIG_UNBLOCK;
10944                 break;
10945             case TARGET_SIG_SETMASK:
10946                 how = SIG_SETMASK;
10947                 break;
10948             default:
10949                 return -TARGET_EINVAL;
10950             }
10951             mask = arg2;
10952             target_to_host_old_sigset(&set, &mask);
10953             ret = do_sigprocmask(how, &set, &oldset);
10954             if (!ret) {
10955                 host_to_target_old_sigset(&mask, &oldset);
10956                 ret = mask;
10957             }
10958         }
10959         return ret;
10960 #endif
10961 
10962 #ifdef TARGET_NR_getgid32
10963     case TARGET_NR_getgid32:
10964         return get_errno(getgid());
10965 #endif
10966 #ifdef TARGET_NR_geteuid32
10967     case TARGET_NR_geteuid32:
10968         return get_errno(geteuid());
10969 #endif
10970 #ifdef TARGET_NR_getegid32
10971     case TARGET_NR_getegid32:
10972         return get_errno(getegid());
10973 #endif
10974 #ifdef TARGET_NR_setreuid32
10975     case TARGET_NR_setreuid32:
10976         return get_errno(setreuid(arg1, arg2));
10977 #endif
10978 #ifdef TARGET_NR_setregid32
10979     case TARGET_NR_setregid32:
10980         return get_errno(setregid(arg1, arg2));
10981 #endif
10982 #ifdef TARGET_NR_getgroups32
10983     case TARGET_NR_getgroups32:
10984         {
10985             int gidsetsize = arg1;
10986             uint32_t *target_grouplist;
10987             gid_t *grouplist;
10988             int i;
10989 
10990             grouplist = alloca(gidsetsize * sizeof(gid_t));
10991             ret = get_errno(getgroups(gidsetsize, grouplist));
10992             if (gidsetsize == 0)
10993                 return ret;
10994             if (!is_error(ret)) {
10995                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10996                 if (!target_grouplist) {
10997                     return -TARGET_EFAULT;
10998                 }
10999                 for(i = 0;i < ret; i++)
11000                     target_grouplist[i] = tswap32(grouplist[i]);
11001                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11002             }
11003         }
11004         return ret;
11005 #endif
11006 #ifdef TARGET_NR_setgroups32
11007     case TARGET_NR_setgroups32:
11008         {
11009             int gidsetsize = arg1;
11010             uint32_t *target_grouplist;
11011             gid_t *grouplist;
11012             int i;
11013 
11014             grouplist = alloca(gidsetsize * sizeof(gid_t));
11015             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11016             if (!target_grouplist) {
11017                 return -TARGET_EFAULT;
11018             }
11019             for(i = 0;i < gidsetsize; i++)
11020                 grouplist[i] = tswap32(target_grouplist[i]);
11021             unlock_user(target_grouplist, arg2, 0);
11022             return get_errno(setgroups(gidsetsize, grouplist));
11023         }
11024 #endif
11025 #ifdef TARGET_NR_fchown32
11026     case TARGET_NR_fchown32:
11027         return get_errno(fchown(arg1, arg2, arg3));
11028 #endif
11029 #ifdef TARGET_NR_setresuid32
11030     case TARGET_NR_setresuid32:
11031         return get_errno(sys_setresuid(arg1, arg2, arg3));
11032 #endif
11033 #ifdef TARGET_NR_getresuid32
11034     case TARGET_NR_getresuid32:
11035         {
11036             uid_t ruid, euid, suid;
11037             ret = get_errno(getresuid(&ruid, &euid, &suid));
11038             if (!is_error(ret)) {
11039                 if (put_user_u32(ruid, arg1)
11040                     || put_user_u32(euid, arg2)
11041                     || put_user_u32(suid, arg3))
11042                     return -TARGET_EFAULT;
11043             }
11044         }
11045         return ret;
11046 #endif
11047 #ifdef TARGET_NR_setresgid32
11048     case TARGET_NR_setresgid32:
11049         return get_errno(sys_setresgid(arg1, arg2, arg3));
11050 #endif
11051 #ifdef TARGET_NR_getresgid32
11052     case TARGET_NR_getresgid32:
11053         {
11054             gid_t rgid, egid, sgid;
11055             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11056             if (!is_error(ret)) {
11057                 if (put_user_u32(rgid, arg1)
11058                     || put_user_u32(egid, arg2)
11059                     || put_user_u32(sgid, arg3))
11060                     return -TARGET_EFAULT;
11061             }
11062         }
11063         return ret;
11064 #endif
11065 #ifdef TARGET_NR_chown32
11066     case TARGET_NR_chown32:
11067         if (!(p = lock_user_string(arg1)))
11068             return -TARGET_EFAULT;
11069         ret = get_errno(chown(p, arg2, arg3));
11070         unlock_user(p, arg1, 0);
11071         return ret;
11072 #endif
11073 #ifdef TARGET_NR_setuid32
11074     case TARGET_NR_setuid32:
11075         return get_errno(sys_setuid(arg1));
11076 #endif
11077 #ifdef TARGET_NR_setgid32
11078     case TARGET_NR_setgid32:
11079         return get_errno(sys_setgid(arg1));
11080 #endif
11081 #ifdef TARGET_NR_setfsuid32
11082     case TARGET_NR_setfsuid32:
11083         return get_errno(setfsuid(arg1));
11084 #endif
11085 #ifdef TARGET_NR_setfsgid32
11086     case TARGET_NR_setfsgid32:
11087         return get_errno(setfsgid(arg1));
11088 #endif
11089 #ifdef TARGET_NR_mincore
11090     case TARGET_NR_mincore:
11091         {
11092             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11093             if (!a) {
11094                 return -TARGET_ENOMEM;
11095             }
11096             p = lock_user_string(arg3);
11097             if (!p) {
11098                 ret = -TARGET_EFAULT;
11099             } else {
11100                 ret = get_errno(mincore(a, arg2, p));
11101                 unlock_user(p, arg3, ret);
11102             }
11103             unlock_user(a, arg1, 0);
11104         }
11105         return ret;
11106 #endif
11107 #ifdef TARGET_NR_arm_fadvise64_64
11108     case TARGET_NR_arm_fadvise64_64:
11109         /* arm_fadvise64_64 looks like fadvise64_64 but
11110          * with different argument order: fd, advice, offset, len
11111          * rather than the usual fd, offset, len, advice.
11112          * Note that offset and len are both 64-bit so appear as
11113          * pairs of 32-bit registers.
11114          */
11115         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11116                             target_offset64(arg5, arg6), arg2);
11117         return -host_to_target_errno(ret);
11118 #endif
11119 
11120 #if TARGET_ABI_BITS == 32
11121 
11122 #ifdef TARGET_NR_fadvise64_64
11123     case TARGET_NR_fadvise64_64:
11124 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11125         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11126         ret = arg2;
11127         arg2 = arg3;
11128         arg3 = arg4;
11129         arg4 = arg5;
11130         arg5 = arg6;
11131         arg6 = ret;
11132 #else
11133         /* 6 args: fd, offset (high, low), len (high, low), advice */
11134         if (regpairs_aligned(cpu_env, num)) {
11135             /* offset is in (3,4), len in (5,6) and advice in 7 */
11136             arg2 = arg3;
11137             arg3 = arg4;
11138             arg4 = arg5;
11139             arg5 = arg6;
11140             arg6 = arg7;
11141         }
11142 #endif
11143         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11144                             target_offset64(arg4, arg5), arg6);
11145         return -host_to_target_errno(ret);
11146 #endif
11147 
11148 #ifdef TARGET_NR_fadvise64
11149     case TARGET_NR_fadvise64:
11150         /* 5 args: fd, offset (high, low), len, advice */
11151         if (regpairs_aligned(cpu_env, num)) {
11152             /* offset is in (3,4), len in 5 and advice in 6 */
11153             arg2 = arg3;
11154             arg3 = arg4;
11155             arg4 = arg5;
11156             arg5 = arg6;
11157         }
11158         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11159         return -host_to_target_errno(ret);
11160 #endif
11161 
11162 #else /* not a 32-bit ABI */
11163 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11164 #ifdef TARGET_NR_fadvise64_64
11165     case TARGET_NR_fadvise64_64:
11166 #endif
11167 #ifdef TARGET_NR_fadvise64
11168     case TARGET_NR_fadvise64:
11169 #endif
11170 #ifdef TARGET_S390X
11171         switch (arg4) {
11172         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11173         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11174         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11175         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11176         default: break;
11177         }
11178 #endif
11179         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11180 #endif
11181 #endif /* end of 64-bit ABI fadvise handling */
11182 
11183 #ifdef TARGET_NR_madvise
11184     case TARGET_NR_madvise:
11185         /* A straight passthrough may not be safe because qemu sometimes
11186            turns private file-backed mappings into anonymous mappings.
11187            This will break MADV_DONTNEED.
11188            This is a hint, so ignoring and returning success is ok.  */
11189         return 0;
11190 #endif
11191 #if TARGET_ABI_BITS == 32
11192     case TARGET_NR_fcntl64:
11193     {
11194 	int cmd;
11195 	struct flock64 fl;
11196         from_flock64_fn *copyfrom = copy_from_user_flock64;
11197         to_flock64_fn *copyto = copy_to_user_flock64;
11198 
11199 #ifdef TARGET_ARM
11200         if (!((CPUARMState *)cpu_env)->eabi) {
11201             copyfrom = copy_from_user_oabi_flock64;
11202             copyto = copy_to_user_oabi_flock64;
11203         }
11204 #endif
11205 
11206 	cmd = target_to_host_fcntl_cmd(arg2);
11207         if (cmd == -TARGET_EINVAL) {
11208             return cmd;
11209         }
11210 
11211         switch(arg2) {
11212         case TARGET_F_GETLK64:
11213             ret = copyfrom(&fl, arg3);
11214             if (ret) {
11215                 break;
11216             }
11217             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11218             if (ret == 0) {
11219                 ret = copyto(arg3, &fl);
11220             }
11221 	    break;
11222 
11223         case TARGET_F_SETLK64:
11224         case TARGET_F_SETLKW64:
11225             ret = copyfrom(&fl, arg3);
11226             if (ret) {
11227                 break;
11228             }
11229             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11230 	    break;
11231         default:
11232             ret = do_fcntl(arg1, arg2, arg3);
11233             break;
11234         }
11235         return ret;
11236     }
11237 #endif
11238 #ifdef TARGET_NR_cacheflush
11239     case TARGET_NR_cacheflush:
11240         /* self-modifying code is handled automatically, so nothing needed */
11241         return 0;
11242 #endif
11243 #ifdef TARGET_NR_getpagesize
11244     case TARGET_NR_getpagesize:
11245         return TARGET_PAGE_SIZE;
11246 #endif
11247     case TARGET_NR_gettid:
11248         return get_errno(sys_gettid());
11249 #ifdef TARGET_NR_readahead
11250     case TARGET_NR_readahead:
11251 #if TARGET_ABI_BITS == 32
11252         if (regpairs_aligned(cpu_env, num)) {
11253             arg2 = arg3;
11254             arg3 = arg4;
11255             arg4 = arg5;
11256         }
11257         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11258 #else
11259         ret = get_errno(readahead(arg1, arg2, arg3));
11260 #endif
11261         return ret;
11262 #endif
11263 #ifdef CONFIG_ATTR
11264 #ifdef TARGET_NR_setxattr
11265     case TARGET_NR_listxattr:
11266     case TARGET_NR_llistxattr:
11267     {
11268         void *p, *b = 0;
11269         if (arg2) {
11270             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11271             if (!b) {
11272                 return -TARGET_EFAULT;
11273             }
11274         }
11275         p = lock_user_string(arg1);
11276         if (p) {
11277             if (num == TARGET_NR_listxattr) {
11278                 ret = get_errno(listxattr(p, b, arg3));
11279             } else {
11280                 ret = get_errno(llistxattr(p, b, arg3));
11281             }
11282         } else {
11283             ret = -TARGET_EFAULT;
11284         }
11285         unlock_user(p, arg1, 0);
11286         unlock_user(b, arg2, arg3);
11287         return ret;
11288     }
11289     case TARGET_NR_flistxattr:
11290     {
11291         void *b = 0;
11292         if (arg2) {
11293             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11294             if (!b) {
11295                 return -TARGET_EFAULT;
11296             }
11297         }
11298         ret = get_errno(flistxattr(arg1, b, arg3));
11299         unlock_user(b, arg2, arg3);
11300         return ret;
11301     }
11302     case TARGET_NR_setxattr:
11303     case TARGET_NR_lsetxattr:
11304         {
11305             void *p, *n, *v = 0;
11306             if (arg3) {
11307                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11308                 if (!v) {
11309                     return -TARGET_EFAULT;
11310                 }
11311             }
11312             p = lock_user_string(arg1);
11313             n = lock_user_string(arg2);
11314             if (p && n) {
11315                 if (num == TARGET_NR_setxattr) {
11316                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11317                 } else {
11318                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11319                 }
11320             } else {
11321                 ret = -TARGET_EFAULT;
11322             }
11323             unlock_user(p, arg1, 0);
11324             unlock_user(n, arg2, 0);
11325             unlock_user(v, arg3, 0);
11326         }
11327         return ret;
11328     case TARGET_NR_fsetxattr:
11329         {
11330             void *n, *v = 0;
11331             if (arg3) {
11332                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11333                 if (!v) {
11334                     return -TARGET_EFAULT;
11335                 }
11336             }
11337             n = lock_user_string(arg2);
11338             if (n) {
11339                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11340             } else {
11341                 ret = -TARGET_EFAULT;
11342             }
11343             unlock_user(n, arg2, 0);
11344             unlock_user(v, arg3, 0);
11345         }
11346         return ret;
11347     case TARGET_NR_getxattr:
11348     case TARGET_NR_lgetxattr:
11349         {
11350             void *p, *n, *v = 0;
11351             if (arg3) {
11352                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11353                 if (!v) {
11354                     return -TARGET_EFAULT;
11355                 }
11356             }
11357             p = lock_user_string(arg1);
11358             n = lock_user_string(arg2);
11359             if (p && n) {
11360                 if (num == TARGET_NR_getxattr) {
11361                     ret = get_errno(getxattr(p, n, v, arg4));
11362                 } else {
11363                     ret = get_errno(lgetxattr(p, n, v, arg4));
11364                 }
11365             } else {
11366                 ret = -TARGET_EFAULT;
11367             }
11368             unlock_user(p, arg1, 0);
11369             unlock_user(n, arg2, 0);
11370             unlock_user(v, arg3, arg4);
11371         }
11372         return ret;
11373     case TARGET_NR_fgetxattr:
11374         {
11375             void *n, *v = 0;
11376             if (arg3) {
11377                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11378                 if (!v) {
11379                     return -TARGET_EFAULT;
11380                 }
11381             }
11382             n = lock_user_string(arg2);
11383             if (n) {
11384                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11385             } else {
11386                 ret = -TARGET_EFAULT;
11387             }
11388             unlock_user(n, arg2, 0);
11389             unlock_user(v, arg3, arg4);
11390         }
11391         return ret;
11392     case TARGET_NR_removexattr:
11393     case TARGET_NR_lremovexattr:
11394         {
11395             void *p, *n;
11396             p = lock_user_string(arg1);
11397             n = lock_user_string(arg2);
11398             if (p && n) {
11399                 if (num == TARGET_NR_removexattr) {
11400                     ret = get_errno(removexattr(p, n));
11401                 } else {
11402                     ret = get_errno(lremovexattr(p, n));
11403                 }
11404             } else {
11405                 ret = -TARGET_EFAULT;
11406             }
11407             unlock_user(p, arg1, 0);
11408             unlock_user(n, arg2, 0);
11409         }
11410         return ret;
11411     case TARGET_NR_fremovexattr:
11412         {
11413             void *n;
11414             n = lock_user_string(arg2);
11415             if (n) {
11416                 ret = get_errno(fremovexattr(arg1, n));
11417             } else {
11418                 ret = -TARGET_EFAULT;
11419             }
11420             unlock_user(n, arg2, 0);
11421         }
11422         return ret;
11423 #endif
11424 #endif /* CONFIG_ATTR */
11425 #ifdef TARGET_NR_set_thread_area
11426     case TARGET_NR_set_thread_area:
11427 #if defined(TARGET_MIPS)
11428       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11429       return 0;
11430 #elif defined(TARGET_CRIS)
11431       if (arg1 & 0xff)
11432           ret = -TARGET_EINVAL;
11433       else {
11434           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11435           ret = 0;
11436       }
11437       return ret;
11438 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11439       return do_set_thread_area(cpu_env, arg1);
11440 #elif defined(TARGET_M68K)
11441       {
11442           TaskState *ts = cpu->opaque;
11443           ts->tp_value = arg1;
11444           return 0;
11445       }
11446 #else
11447       return -TARGET_ENOSYS;
11448 #endif
11449 #endif
11450 #ifdef TARGET_NR_get_thread_area
11451     case TARGET_NR_get_thread_area:
11452 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11453         return do_get_thread_area(cpu_env, arg1);
11454 #elif defined(TARGET_M68K)
11455         {
11456             TaskState *ts = cpu->opaque;
11457             return ts->tp_value;
11458         }
11459 #else
11460         return -TARGET_ENOSYS;
11461 #endif
11462 #endif
11463 #ifdef TARGET_NR_getdomainname
11464     case TARGET_NR_getdomainname:
11465         return -TARGET_ENOSYS;
11466 #endif
11467 
11468 #ifdef TARGET_NR_clock_settime
11469     case TARGET_NR_clock_settime:
11470     {
11471         struct timespec ts;
11472 
11473         ret = target_to_host_timespec(&ts, arg2);
11474         if (!is_error(ret)) {
11475             ret = get_errno(clock_settime(arg1, &ts));
11476         }
11477         return ret;
11478     }
11479 #endif
11480 #ifdef TARGET_NR_clock_settime64
11481     case TARGET_NR_clock_settime64:
11482     {
11483         struct timespec ts;
11484 
11485         ret = target_to_host_timespec64(&ts, arg2);
11486         if (!is_error(ret)) {
11487             ret = get_errno(clock_settime(arg1, &ts));
11488         }
11489         return ret;
11490     }
11491 #endif
11492 #ifdef TARGET_NR_clock_gettime
11493     case TARGET_NR_clock_gettime:
11494     {
11495         struct timespec ts;
11496         ret = get_errno(clock_gettime(arg1, &ts));
11497         if (!is_error(ret)) {
11498             ret = host_to_target_timespec(arg2, &ts);
11499         }
11500         return ret;
11501     }
11502 #endif
11503 #ifdef TARGET_NR_clock_gettime64
11504     case TARGET_NR_clock_gettime64:
11505     {
11506         struct timespec ts;
11507         ret = get_errno(clock_gettime(arg1, &ts));
11508         if (!is_error(ret)) {
11509             ret = host_to_target_timespec64(arg2, &ts);
11510         }
11511         return ret;
11512     }
11513 #endif
11514 #ifdef TARGET_NR_clock_getres
11515     case TARGET_NR_clock_getres:
11516     {
11517         struct timespec ts;
11518         ret = get_errno(clock_getres(arg1, &ts));
11519         if (!is_error(ret)) {
11520             host_to_target_timespec(arg2, &ts);
11521         }
11522         return ret;
11523     }
11524 #endif
11525 #ifdef TARGET_NR_clock_nanosleep
11526     case TARGET_NR_clock_nanosleep:
11527     {
11528         struct timespec ts;
11529         target_to_host_timespec(&ts, arg3);
11530         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11531                                              &ts, arg4 ? &ts : NULL));
11532         if (arg4)
11533             host_to_target_timespec(arg4, &ts);
11534 
11535 #if defined(TARGET_PPC)
11536         /* clock_nanosleep is odd in that it returns positive errno values.
11537          * On PPC, CR0 bit 3 should be set in such a situation. */
11538         if (ret && ret != -TARGET_ERESTARTSYS) {
11539             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11540         }
11541 #endif
11542         return ret;
11543     }
11544 #endif
11545 
11546 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11547     case TARGET_NR_set_tid_address:
11548         return get_errno(set_tid_address((int *)g2h(arg1)));
11549 #endif
11550 
11551     case TARGET_NR_tkill:
11552         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11553 
11554     case TARGET_NR_tgkill:
11555         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11556                          target_to_host_signal(arg3)));
11557 
11558 #ifdef TARGET_NR_set_robust_list
11559     case TARGET_NR_set_robust_list:
11560     case TARGET_NR_get_robust_list:
11561         /* The ABI for supporting robust futexes has userspace pass
11562          * the kernel a pointer to a linked list which is updated by
11563          * userspace after the syscall; the list is walked by the kernel
11564          * when the thread exits. Since the linked list in QEMU guest
11565          * memory isn't a valid linked list for the host and we have
11566          * no way to reliably intercept the thread-death event, we can't
11567          * support these. Silently return ENOSYS so that guest userspace
11568          * falls back to a non-robust futex implementation (which should
11569          * be OK except in the corner case of the guest crashing while
11570          * holding a mutex that is shared with another process via
11571          * shared memory).
11572          */
11573         return -TARGET_ENOSYS;
11574 #endif
11575 
11576 #if defined(TARGET_NR_utimensat)
11577     case TARGET_NR_utimensat:
11578         {
11579             struct timespec *tsp, ts[2];
11580             if (!arg3) {
11581                 tsp = NULL;
11582             } else {
11583                 target_to_host_timespec(ts, arg3);
11584                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11585                 tsp = ts;
11586             }
11587             if (!arg2)
11588                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11589             else {
11590                 if (!(p = lock_user_string(arg2))) {
11591                     return -TARGET_EFAULT;
11592                 }
11593                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11594                 unlock_user(p, arg2, 0);
11595             }
11596         }
11597         return ret;
11598 #endif
11599 #ifdef TARGET_NR_futex
11600     case TARGET_NR_futex:
11601         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11602 #endif
11603 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11604     case TARGET_NR_inotify_init:
11605         ret = get_errno(sys_inotify_init());
11606         if (ret >= 0) {
11607             fd_trans_register(ret, &target_inotify_trans);
11608         }
11609         return ret;
11610 #endif
11611 #ifdef CONFIG_INOTIFY1
11612 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11613     case TARGET_NR_inotify_init1:
11614         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11615                                           fcntl_flags_tbl)));
11616         if (ret >= 0) {
11617             fd_trans_register(ret, &target_inotify_trans);
11618         }
11619         return ret;
11620 #endif
11621 #endif
11622 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11623     case TARGET_NR_inotify_add_watch:
11624         p = lock_user_string(arg2);
11625         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11626         unlock_user(p, arg2, 0);
11627         return ret;
11628 #endif
11629 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11630     case TARGET_NR_inotify_rm_watch:
11631         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11632 #endif
11633 
11634 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11635     case TARGET_NR_mq_open:
11636         {
11637             struct mq_attr posix_mq_attr;
11638             struct mq_attr *pposix_mq_attr;
11639             int host_flags;
11640 
11641             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11642             pposix_mq_attr = NULL;
11643             if (arg4) {
11644                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11645                     return -TARGET_EFAULT;
11646                 }
11647                 pposix_mq_attr = &posix_mq_attr;
11648             }
11649             p = lock_user_string(arg1 - 1);
11650             if (!p) {
11651                 return -TARGET_EFAULT;
11652             }
11653             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11654             unlock_user (p, arg1, 0);
11655         }
11656         return ret;
11657 
11658     case TARGET_NR_mq_unlink:
11659         p = lock_user_string(arg1 - 1);
11660         if (!p) {
11661             return -TARGET_EFAULT;
11662         }
11663         ret = get_errno(mq_unlink(p));
11664         unlock_user (p, arg1, 0);
11665         return ret;
11666 
11667 #ifdef TARGET_NR_mq_timedsend
11668     case TARGET_NR_mq_timedsend:
11669         {
11670             struct timespec ts;
11671 
11672             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11673             if (arg5 != 0) {
11674                 target_to_host_timespec(&ts, arg5);
11675                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11676                 host_to_target_timespec(arg5, &ts);
11677             } else {
11678                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11679             }
11680             unlock_user (p, arg2, arg3);
11681         }
11682         return ret;
11683 #endif
11684 
11685 #ifdef TARGET_NR_mq_timedreceive
11686     case TARGET_NR_mq_timedreceive:
11687         {
11688             struct timespec ts;
11689             unsigned int prio;
11690 
11691             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11692             if (arg5 != 0) {
11693                 target_to_host_timespec(&ts, arg5);
11694                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11695                                                      &prio, &ts));
11696                 host_to_target_timespec(arg5, &ts);
11697             } else {
11698                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11699                                                      &prio, NULL));
11700             }
11701             unlock_user (p, arg2, arg3);
11702             if (arg4 != 0)
11703                 put_user_u32(prio, arg4);
11704         }
11705         return ret;
11706 #endif
11707 
11708     /* Not implemented for now... */
11709 /*     case TARGET_NR_mq_notify: */
11710 /*         break; */
11711 
11712     case TARGET_NR_mq_getsetattr:
11713         {
11714             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11715             ret = 0;
11716             if (arg2 != 0) {
11717                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11718                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11719                                            &posix_mq_attr_out));
11720             } else if (arg3 != 0) {
11721                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11722             }
11723             if (ret == 0 && arg3 != 0) {
11724                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11725             }
11726         }
11727         return ret;
11728 #endif
11729 
11730 #ifdef CONFIG_SPLICE
11731 #ifdef TARGET_NR_tee
11732     case TARGET_NR_tee:
11733         {
11734             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11735         }
11736         return ret;
11737 #endif
11738 #ifdef TARGET_NR_splice
11739     case TARGET_NR_splice:
11740         {
11741             loff_t loff_in, loff_out;
11742             loff_t *ploff_in = NULL, *ploff_out = NULL;
11743             if (arg2) {
11744                 if (get_user_u64(loff_in, arg2)) {
11745                     return -TARGET_EFAULT;
11746                 }
11747                 ploff_in = &loff_in;
11748             }
11749             if (arg4) {
11750                 if (get_user_u64(loff_out, arg4)) {
11751                     return -TARGET_EFAULT;
11752                 }
11753                 ploff_out = &loff_out;
11754             }
11755             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11756             if (arg2) {
11757                 if (put_user_u64(loff_in, arg2)) {
11758                     return -TARGET_EFAULT;
11759                 }
11760             }
11761             if (arg4) {
11762                 if (put_user_u64(loff_out, arg4)) {
11763                     return -TARGET_EFAULT;
11764                 }
11765             }
11766         }
11767         return ret;
11768 #endif
11769 #ifdef TARGET_NR_vmsplice
11770 	case TARGET_NR_vmsplice:
11771         {
11772             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11773             if (vec != NULL) {
11774                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11775                 unlock_iovec(vec, arg2, arg3, 0);
11776             } else {
11777                 ret = -host_to_target_errno(errno);
11778             }
11779         }
11780         return ret;
11781 #endif
11782 #endif /* CONFIG_SPLICE */
11783 #ifdef CONFIG_EVENTFD
11784 #if defined(TARGET_NR_eventfd)
11785     case TARGET_NR_eventfd:
11786         ret = get_errno(eventfd(arg1, 0));
11787         if (ret >= 0) {
11788             fd_trans_register(ret, &target_eventfd_trans);
11789         }
11790         return ret;
11791 #endif
11792 #if defined(TARGET_NR_eventfd2)
11793     case TARGET_NR_eventfd2:
11794     {
11795         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11796         if (arg2 & TARGET_O_NONBLOCK) {
11797             host_flags |= O_NONBLOCK;
11798         }
11799         if (arg2 & TARGET_O_CLOEXEC) {
11800             host_flags |= O_CLOEXEC;
11801         }
11802         ret = get_errno(eventfd(arg1, host_flags));
11803         if (ret >= 0) {
11804             fd_trans_register(ret, &target_eventfd_trans);
11805         }
11806         return ret;
11807     }
11808 #endif
11809 #endif /* CONFIG_EVENTFD  */
11810 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11811     case TARGET_NR_fallocate:
11812 #if TARGET_ABI_BITS == 32
11813         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11814                                   target_offset64(arg5, arg6)));
11815 #else
11816         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11817 #endif
11818         return ret;
11819 #endif
11820 #if defined(CONFIG_SYNC_FILE_RANGE)
11821 #if defined(TARGET_NR_sync_file_range)
11822     case TARGET_NR_sync_file_range:
11823 #if TARGET_ABI_BITS == 32
11824 #if defined(TARGET_MIPS)
11825         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11826                                         target_offset64(arg5, arg6), arg7));
11827 #else
11828         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11829                                         target_offset64(arg4, arg5), arg6));
11830 #endif /* !TARGET_MIPS */
11831 #else
11832         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11833 #endif
11834         return ret;
11835 #endif
11836 #if defined(TARGET_NR_sync_file_range2) || \
11837     defined(TARGET_NR_arm_sync_file_range)
11838 #if defined(TARGET_NR_sync_file_range2)
11839     case TARGET_NR_sync_file_range2:
11840 #endif
11841 #if defined(TARGET_NR_arm_sync_file_range)
11842     case TARGET_NR_arm_sync_file_range:
11843 #endif
11844         /* This is like sync_file_range but the arguments are reordered */
11845 #if TARGET_ABI_BITS == 32
11846         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11847                                         target_offset64(arg5, arg6), arg2));
11848 #else
11849         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11850 #endif
11851         return ret;
11852 #endif
11853 #endif
11854 #if defined(TARGET_NR_signalfd4)
11855     case TARGET_NR_signalfd4:
11856         return do_signalfd4(arg1, arg2, arg4);
11857 #endif
11858 #if defined(TARGET_NR_signalfd)
11859     case TARGET_NR_signalfd:
11860         return do_signalfd4(arg1, arg2, 0);
11861 #endif
11862 #if defined(CONFIG_EPOLL)
11863 #if defined(TARGET_NR_epoll_create)
11864     case TARGET_NR_epoll_create:
11865         return get_errno(epoll_create(arg1));
11866 #endif
11867 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11868     case TARGET_NR_epoll_create1:
11869         return get_errno(epoll_create1(arg1));
11870 #endif
11871 #if defined(TARGET_NR_epoll_ctl)
11872     case TARGET_NR_epoll_ctl:
11873     {
11874         struct epoll_event ep;
11875         struct epoll_event *epp = 0;
11876         if (arg4) {
11877             struct target_epoll_event *target_ep;
11878             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11879                 return -TARGET_EFAULT;
11880             }
11881             ep.events = tswap32(target_ep->events);
11882             /* The epoll_data_t union is just opaque data to the kernel,
11883              * so we transfer all 64 bits across and need not worry what
11884              * actual data type it is.
11885              */
11886             ep.data.u64 = tswap64(target_ep->data.u64);
11887             unlock_user_struct(target_ep, arg4, 0);
11888             epp = &ep;
11889         }
11890         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11891     }
11892 #endif
11893 
11894 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11895 #if defined(TARGET_NR_epoll_wait)
11896     case TARGET_NR_epoll_wait:
11897 #endif
11898 #if defined(TARGET_NR_epoll_pwait)
11899     case TARGET_NR_epoll_pwait:
11900 #endif
11901     {
11902         struct target_epoll_event *target_ep;
11903         struct epoll_event *ep;
11904         int epfd = arg1;
11905         int maxevents = arg3;
11906         int timeout = arg4;
11907 
11908         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11909             return -TARGET_EINVAL;
11910         }
11911 
11912         target_ep = lock_user(VERIFY_WRITE, arg2,
11913                               maxevents * sizeof(struct target_epoll_event), 1);
11914         if (!target_ep) {
11915             return -TARGET_EFAULT;
11916         }
11917 
11918         ep = g_try_new(struct epoll_event, maxevents);
11919         if (!ep) {
11920             unlock_user(target_ep, arg2, 0);
11921             return -TARGET_ENOMEM;
11922         }
11923 
11924         switch (num) {
11925 #if defined(TARGET_NR_epoll_pwait)
11926         case TARGET_NR_epoll_pwait:
11927         {
11928             target_sigset_t *target_set;
11929             sigset_t _set, *set = &_set;
11930 
11931             if (arg5) {
11932                 if (arg6 != sizeof(target_sigset_t)) {
11933                     ret = -TARGET_EINVAL;
11934                     break;
11935                 }
11936 
11937                 target_set = lock_user(VERIFY_READ, arg5,
11938                                        sizeof(target_sigset_t), 1);
11939                 if (!target_set) {
11940                     ret = -TARGET_EFAULT;
11941                     break;
11942                 }
11943                 target_to_host_sigset(set, target_set);
11944                 unlock_user(target_set, arg5, 0);
11945             } else {
11946                 set = NULL;
11947             }
11948 
11949             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11950                                              set, SIGSET_T_SIZE));
11951             break;
11952         }
11953 #endif
11954 #if defined(TARGET_NR_epoll_wait)
11955         case TARGET_NR_epoll_wait:
11956             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11957                                              NULL, 0));
11958             break;
11959 #endif
11960         default:
11961             ret = -TARGET_ENOSYS;
11962         }
11963         if (!is_error(ret)) {
11964             int i;
11965             for (i = 0; i < ret; i++) {
11966                 target_ep[i].events = tswap32(ep[i].events);
11967                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11968             }
11969             unlock_user(target_ep, arg2,
11970                         ret * sizeof(struct target_epoll_event));
11971         } else {
11972             unlock_user(target_ep, arg2, 0);
11973         }
11974         g_free(ep);
11975         return ret;
11976     }
11977 #endif
11978 #endif
11979 #ifdef TARGET_NR_prlimit64
11980     case TARGET_NR_prlimit64:
11981     {
11982         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11983         struct target_rlimit64 *target_rnew, *target_rold;
11984         struct host_rlimit64 rnew, rold, *rnewp = 0;
11985         int resource = target_to_host_resource(arg2);
11986 
11987         if (arg3 && (resource != RLIMIT_AS &&
11988                      resource != RLIMIT_DATA &&
11989                      resource != RLIMIT_STACK)) {
11990             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11991                 return -TARGET_EFAULT;
11992             }
11993             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11994             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11995             unlock_user_struct(target_rnew, arg3, 0);
11996             rnewp = &rnew;
11997         }
11998 
11999         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12000         if (!is_error(ret) && arg4) {
12001             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12002                 return -TARGET_EFAULT;
12003             }
12004             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12005             target_rold->rlim_max = tswap64(rold.rlim_max);
12006             unlock_user_struct(target_rold, arg4, 1);
12007         }
12008         return ret;
12009     }
12010 #endif
12011 #ifdef TARGET_NR_gethostname
12012     case TARGET_NR_gethostname:
12013     {
12014         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12015         if (name) {
12016             ret = get_errno(gethostname(name, arg2));
12017             unlock_user(name, arg1, arg2);
12018         } else {
12019             ret = -TARGET_EFAULT;
12020         }
12021         return ret;
12022     }
12023 #endif
12024 #ifdef TARGET_NR_atomic_cmpxchg_32
12025     case TARGET_NR_atomic_cmpxchg_32:
12026     {
12027         /* should use start_exclusive from main.c */
12028         abi_ulong mem_value;
12029         if (get_user_u32(mem_value, arg6)) {
12030             target_siginfo_t info;
12031             info.si_signo = SIGSEGV;
12032             info.si_errno = 0;
12033             info.si_code = TARGET_SEGV_MAPERR;
12034             info._sifields._sigfault._addr = arg6;
12035             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12036                          QEMU_SI_FAULT, &info);
12037             ret = 0xdeadbeef;
12038 
12039         }
12040         if (mem_value == arg2)
12041             put_user_u32(arg1, arg6);
12042         return mem_value;
12043     }
12044 #endif
12045 #ifdef TARGET_NR_atomic_barrier
12046     case TARGET_NR_atomic_barrier:
12047         /* Like the kernel implementation and the
12048            qemu arm barrier, no-op this? */
12049         return 0;
12050 #endif
12051 
12052 #ifdef TARGET_NR_timer_create
12053     case TARGET_NR_timer_create:
12054     {
12055         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12056 
12057         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12058 
12059         int clkid = arg1;
12060         int timer_index = next_free_host_timer();
12061 
12062         if (timer_index < 0) {
12063             ret = -TARGET_EAGAIN;
12064         } else {
12065             timer_t *phtimer = g_posix_timers  + timer_index;
12066 
12067             if (arg2) {
12068                 phost_sevp = &host_sevp;
12069                 ret = target_to_host_sigevent(phost_sevp, arg2);
12070                 if (ret != 0) {
12071                     return ret;
12072                 }
12073             }
12074 
12075             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12076             if (ret) {
12077                 phtimer = NULL;
12078             } else {
12079                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12080                     return -TARGET_EFAULT;
12081                 }
12082             }
12083         }
12084         return ret;
12085     }
12086 #endif
12087 
12088 #ifdef TARGET_NR_timer_settime
12089     case TARGET_NR_timer_settime:
12090     {
12091         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12092          * struct itimerspec * old_value */
12093         target_timer_t timerid = get_timer_id(arg1);
12094 
12095         if (timerid < 0) {
12096             ret = timerid;
12097         } else if (arg3 == 0) {
12098             ret = -TARGET_EINVAL;
12099         } else {
12100             timer_t htimer = g_posix_timers[timerid];
12101             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12102 
12103             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12104                 return -TARGET_EFAULT;
12105             }
12106             ret = get_errno(
12107                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12108             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12109                 return -TARGET_EFAULT;
12110             }
12111         }
12112         return ret;
12113     }
12114 #endif
12115 
12116 #ifdef TARGET_NR_timer_gettime
12117     case TARGET_NR_timer_gettime:
12118     {
12119         /* args: timer_t timerid, struct itimerspec *curr_value */
12120         target_timer_t timerid = get_timer_id(arg1);
12121 
12122         if (timerid < 0) {
12123             ret = timerid;
12124         } else if (!arg2) {
12125             ret = -TARGET_EFAULT;
12126         } else {
12127             timer_t htimer = g_posix_timers[timerid];
12128             struct itimerspec hspec;
12129             ret = get_errno(timer_gettime(htimer, &hspec));
12130 
12131             if (host_to_target_itimerspec(arg2, &hspec)) {
12132                 ret = -TARGET_EFAULT;
12133             }
12134         }
12135         return ret;
12136     }
12137 #endif
12138 
12139 #ifdef TARGET_NR_timer_getoverrun
12140     case TARGET_NR_timer_getoverrun:
12141     {
12142         /* args: timer_t timerid */
12143         target_timer_t timerid = get_timer_id(arg1);
12144 
12145         if (timerid < 0) {
12146             ret = timerid;
12147         } else {
12148             timer_t htimer = g_posix_timers[timerid];
12149             ret = get_errno(timer_getoverrun(htimer));
12150         }
12151         return ret;
12152     }
12153 #endif
12154 
12155 #ifdef TARGET_NR_timer_delete
12156     case TARGET_NR_timer_delete:
12157     {
12158         /* args: timer_t timerid */
12159         target_timer_t timerid = get_timer_id(arg1);
12160 
12161         if (timerid < 0) {
12162             ret = timerid;
12163         } else {
12164             timer_t htimer = g_posix_timers[timerid];
12165             ret = get_errno(timer_delete(htimer));
12166             g_posix_timers[timerid] = 0;
12167         }
12168         return ret;
12169     }
12170 #endif
12171 
12172 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12173     case TARGET_NR_timerfd_create:
12174         return get_errno(timerfd_create(arg1,
12175                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12176 #endif
12177 
12178 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12179     case TARGET_NR_timerfd_gettime:
12180         {
12181             struct itimerspec its_curr;
12182 
12183             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12184 
12185             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12186                 return -TARGET_EFAULT;
12187             }
12188         }
12189         return ret;
12190 #endif
12191 
12192 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12193     case TARGET_NR_timerfd_settime:
12194         {
12195             struct itimerspec its_new, its_old, *p_new;
12196 
12197             if (arg3) {
12198                 if (target_to_host_itimerspec(&its_new, arg3)) {
12199                     return -TARGET_EFAULT;
12200                 }
12201                 p_new = &its_new;
12202             } else {
12203                 p_new = NULL;
12204             }
12205 
12206             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12207 
12208             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12209                 return -TARGET_EFAULT;
12210             }
12211         }
12212         return ret;
12213 #endif
12214 
12215 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12216     case TARGET_NR_ioprio_get:
12217         return get_errno(ioprio_get(arg1, arg2));
12218 #endif
12219 
12220 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12221     case TARGET_NR_ioprio_set:
12222         return get_errno(ioprio_set(arg1, arg2, arg3));
12223 #endif
12224 
12225 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12226     case TARGET_NR_setns:
12227         return get_errno(setns(arg1, arg2));
12228 #endif
12229 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12230     case TARGET_NR_unshare:
12231         return get_errno(unshare(arg1));
12232 #endif
12233 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12234     case TARGET_NR_kcmp:
12235         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12236 #endif
12237 #ifdef TARGET_NR_swapcontext
12238     case TARGET_NR_swapcontext:
12239         /* PowerPC specific.  */
12240         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12241 #endif
12242 #ifdef TARGET_NR_memfd_create
12243     case TARGET_NR_memfd_create:
12244         p = lock_user_string(arg1);
12245         if (!p) {
12246             return -TARGET_EFAULT;
12247         }
12248         ret = get_errno(memfd_create(p, arg2));
12249         fd_trans_unregister(ret);
12250         unlock_user(p, arg1, 0);
12251         return ret;
12252 #endif
12253 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12254     case TARGET_NR_membarrier:
12255         return get_errno(membarrier(arg1, arg2));
12256 #endif
12257 
12258     default:
12259         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12260         return -TARGET_ENOSYS;
12261     }
12262     return ret;
12263 }
12264 
12265 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12266                     abi_long arg2, abi_long arg3, abi_long arg4,
12267                     abi_long arg5, abi_long arg6, abi_long arg7,
12268                     abi_long arg8)
12269 {
12270     CPUState *cpu = env_cpu(cpu_env);
12271     abi_long ret;
12272 
12273 #ifdef DEBUG_ERESTARTSYS
12274     /* Debug-only code for exercising the syscall-restart code paths
12275      * in the per-architecture cpu main loops: restart every syscall
12276      * the guest makes once before letting it through.
12277      */
12278     {
12279         static bool flag;
12280         flag = !flag;
12281         if (flag) {
12282             return -TARGET_ERESTARTSYS;
12283         }
12284     }
12285 #endif
12286 
12287     record_syscall_start(cpu, num, arg1,
12288                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12289 
12290     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12291         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12292     }
12293 
12294     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12295                       arg5, arg6, arg7, arg8);
12296 
12297     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12298         print_syscall_ret(num, ret);
12299     }
12300 
12301     record_syscall_return(cpu, num, ret);
12302     return ret;
12303 }
12304