xref: /openbmc/qemu/linux-user/syscall.c (revision cfe68ae0)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #include "linux_loop.h"
116 #include "uname.h"
117 
118 #include "qemu.h"
119 #include "qemu/guest-random.h"
120 #include "user/syscall-trace.h"
121 #include "qapi/error.h"
122 #include "fd-trans.h"
123 #include "tcg/tcg.h"
124 
125 #ifndef CLONE_IO
126 #define CLONE_IO                0x80000000      /* Clone io context */
127 #endif
128 
129 /* We can't directly call the host clone syscall, because this will
130  * badly confuse libc (breaking mutexes, for example). So we must
131  * divide clone flags into:
132  *  * flag combinations that look like pthread_create()
133  *  * flag combinations that look like fork()
134  *  * flags we can implement within QEMU itself
135  *  * flags we can't support and will return an error for
136  */
137 /* For thread creation, all these flags must be present; for
138  * fork, none must be present.
139  */
140 #define CLONE_THREAD_FLAGS                              \
141     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
142      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
143 
144 /* These flags are ignored:
145  * CLONE_DETACHED is now ignored by the kernel;
146  * CLONE_IO is just an optimisation hint to the I/O scheduler
147  */
148 #define CLONE_IGNORED_FLAGS                     \
149     (CLONE_DETACHED | CLONE_IO)
150 
151 /* Flags for fork which we can implement within QEMU itself */
152 #define CLONE_OPTIONAL_FORK_FLAGS               \
153     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
154      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
155 
156 /* Flags for thread creation which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
158     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
159      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
160 
161 #define CLONE_INVALID_FORK_FLAGS                                        \
162     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
163 
164 #define CLONE_INVALID_THREAD_FLAGS                                      \
165     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
166        CLONE_IGNORED_FLAGS))
167 
168 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
169  * have almost all been allocated. We cannot support any of
170  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
171  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
172  * The checks against the invalid thread masks above will catch these.
173  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
174  */
175 
176 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
177  * once. This exercises the codepaths for restart.
178  */
179 //#define DEBUG_ERESTARTSYS
180 
181 //#include <linux/msdos_fs.h>
182 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
183 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
184 
185 #undef _syscall0
186 #undef _syscall1
187 #undef _syscall2
188 #undef _syscall3
189 #undef _syscall4
190 #undef _syscall5
191 #undef _syscall6
192 
193 #define _syscall0(type,name)		\
194 static type name (void)			\
195 {					\
196 	return syscall(__NR_##name);	\
197 }
198 
199 #define _syscall1(type,name,type1,arg1)		\
200 static type name (type1 arg1)			\
201 {						\
202 	return syscall(__NR_##name, arg1);	\
203 }
204 
205 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
206 static type name (type1 arg1,type2 arg2)		\
207 {							\
208 	return syscall(__NR_##name, arg1, arg2);	\
209 }
210 
211 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
212 static type name (type1 arg1,type2 arg2,type3 arg3)		\
213 {								\
214 	return syscall(__NR_##name, arg1, arg2, arg3);		\
215 }
216 
217 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
219 {										\
220 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
221 }
222 
223 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
224 		  type5,arg5)							\
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
226 {										\
227 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
228 }
229 
230 
231 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
232 		  type5,arg5,type6,arg6)					\
233 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
234                   type6 arg6)							\
235 {										\
236 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
237 }
238 
239 
240 #define __NR_sys_uname __NR_uname
241 #define __NR_sys_getcwd1 __NR_getcwd
242 #define __NR_sys_getdents __NR_getdents
243 #define __NR_sys_getdents64 __NR_getdents64
244 #define __NR_sys_getpriority __NR_getpriority
245 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
246 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
247 #define __NR_sys_syslog __NR_syslog
248 #define __NR_sys_futex __NR_futex
249 #define __NR_sys_inotify_init __NR_inotify_init
250 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
251 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
252 #define __NR_sys_statx __NR_statx
253 
254 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
255 #define __NR__llseek __NR_lseek
256 #endif
257 
258 /* Newer kernel ports have llseek() instead of _llseek() */
259 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
260 #define TARGET_NR__llseek TARGET_NR_llseek
261 #endif
262 
263 #define __NR_sys_gettid __NR_gettid
264 _syscall0(int, sys_gettid)
265 
266 /* For the 64-bit guest on 32-bit host case we must emulate
267  * getdents using getdents64, because otherwise the host
268  * might hand us back more dirent records than we can fit
269  * into the guest buffer after structure format conversion.
270  * Otherwise we emulate getdents with getdents if the host has it.
271  */
272 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
273 #define EMULATE_GETDENTS_WITH_GETDENTS
274 #endif
275 
276 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
277 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
278 #endif
279 #if (defined(TARGET_NR_getdents) && \
280       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
281     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
282 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
283 #endif
284 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
285 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
286           loff_t *, res, uint, wh);
287 #endif
288 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
289 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
290           siginfo_t *, uinfo)
291 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
292 #ifdef __NR_exit_group
293 _syscall1(int,exit_group,int,error_code)
294 #endif
295 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
296 _syscall1(int,set_tid_address,int *,tidptr)
297 #endif
298 #if (defined(TARGET_NR_futex) || defined(TARGET_NR_exit)) && defined(__NR_futex)
299 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
300           const struct timespec *,timeout,int *,uaddr2,int,val3)
301 #endif
302 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
303 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
304           unsigned long *, user_mask_ptr);
305 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
306 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
307           unsigned long *, user_mask_ptr);
308 #define __NR_sys_getcpu __NR_getcpu
309 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
310 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
311           void *, arg);
312 _syscall2(int, capget, struct __user_cap_header_struct *, header,
313           struct __user_cap_data_struct *, data);
314 _syscall2(int, capset, struct __user_cap_header_struct *, header,
315           struct __user_cap_data_struct *, data);
316 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
317 _syscall2(int, ioprio_get, int, which, int, who)
318 #endif
319 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
320 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
321 #endif
322 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
323 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
324 #endif
325 
326 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
327 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
328           unsigned long, idx1, unsigned long, idx2)
329 #endif
330 
331 /*
332  * It is assumed that struct statx is architecture independent.
333  */
334 #if defined(TARGET_NR_statx) && defined(__NR_statx)
335 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
336           unsigned int, mask, struct target_statx *, statxbuf)
337 #endif
338 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
339 _syscall2(int, membarrier, int, cmd, int, flags)
340 #endif
341 
342 static bitmask_transtbl fcntl_flags_tbl[] = {
343   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
344   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
345   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
346   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
347   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
348   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
349   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
350   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
351   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
352   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
353   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
354   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
355   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
356 #if defined(O_DIRECT)
357   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
358 #endif
359 #if defined(O_NOATIME)
360   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
361 #endif
362 #if defined(O_CLOEXEC)
363   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
364 #endif
365 #if defined(O_PATH)
366   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
367 #endif
368 #if defined(O_TMPFILE)
369   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
370 #endif
371   /* Don't terminate the list prematurely on 64-bit host+guest.  */
372 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
373   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
374 #endif
375   { 0, 0, 0, 0 }
376 };
377 
378 static int sys_getcwd1(char *buf, size_t size)
379 {
380   if (getcwd(buf, size) == NULL) {
381       /* getcwd() sets errno */
382       return (-1);
383   }
384   return strlen(buf)+1;
385 }
386 
387 #ifdef TARGET_NR_utimensat
388 #if defined(__NR_utimensat)
389 #define __NR_sys_utimensat __NR_utimensat
390 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
391           const struct timespec *,tsp,int,flags)
392 #else
393 static int sys_utimensat(int dirfd, const char *pathname,
394                          const struct timespec times[2], int flags)
395 {
396     errno = ENOSYS;
397     return -1;
398 }
399 #endif
400 #endif /* TARGET_NR_utimensat */
401 
402 #ifdef TARGET_NR_renameat2
403 #if defined(__NR_renameat2)
404 #define __NR_sys_renameat2 __NR_renameat2
405 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
406           const char *, new, unsigned int, flags)
407 #else
408 static int sys_renameat2(int oldfd, const char *old,
409                          int newfd, const char *new, int flags)
410 {
411     if (flags == 0) {
412         return renameat(oldfd, old, newfd, new);
413     }
414     errno = ENOSYS;
415     return -1;
416 }
417 #endif
418 #endif /* TARGET_NR_renameat2 */
419 
420 #ifdef CONFIG_INOTIFY
421 #include <sys/inotify.h>
422 
423 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
424 static int sys_inotify_init(void)
425 {
426   return (inotify_init());
427 }
428 #endif
429 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
430 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
431 {
432   return (inotify_add_watch(fd, pathname, mask));
433 }
434 #endif
435 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
436 static int sys_inotify_rm_watch(int fd, int32_t wd)
437 {
438   return (inotify_rm_watch(fd, wd));
439 }
440 #endif
441 #ifdef CONFIG_INOTIFY1
442 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
443 static int sys_inotify_init1(int flags)
444 {
445   return (inotify_init1(flags));
446 }
447 #endif
448 #endif
449 #else
450 /* Userspace can usually survive runtime without inotify */
451 #undef TARGET_NR_inotify_init
452 #undef TARGET_NR_inotify_init1
453 #undef TARGET_NR_inotify_add_watch
454 #undef TARGET_NR_inotify_rm_watch
455 #endif /* CONFIG_INOTIFY  */
456 
457 #if defined(TARGET_NR_prlimit64)
458 #ifndef __NR_prlimit64
459 # define __NR_prlimit64 -1
460 #endif
461 #define __NR_sys_prlimit64 __NR_prlimit64
462 /* The glibc rlimit structure may not be that used by the underlying syscall */
463 struct host_rlimit64 {
464     uint64_t rlim_cur;
465     uint64_t rlim_max;
466 };
467 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
468           const struct host_rlimit64 *, new_limit,
469           struct host_rlimit64 *, old_limit)
470 #endif
471 
472 
473 #if defined(TARGET_NR_timer_create)
474 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
475 static timer_t g_posix_timers[32] = { 0, } ;
476 
477 static inline int next_free_host_timer(void)
478 {
479     int k ;
480     /* FIXME: Does finding the next free slot require a lock? */
481     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
482         if (g_posix_timers[k] == 0) {
483             g_posix_timers[k] = (timer_t) 1;
484             return k;
485         }
486     }
487     return -1;
488 }
489 #endif
490 
491 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
492 #ifdef TARGET_ARM
493 static inline int regpairs_aligned(void *cpu_env, int num)
494 {
495     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
496 }
497 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
498 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
499 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
500 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
501  * of registers which translates to the same as ARM/MIPS, because we start with
502  * r3 as arg1 */
503 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
504 #elif defined(TARGET_SH4)
505 /* SH4 doesn't align register pairs, except for p{read,write}64 */
506 static inline int regpairs_aligned(void *cpu_env, int num)
507 {
508     switch (num) {
509     case TARGET_NR_pread64:
510     case TARGET_NR_pwrite64:
511         return 1;
512 
513     default:
514         return 0;
515     }
516 }
517 #elif defined(TARGET_XTENSA)
518 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
519 #else
520 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
521 #endif
522 
523 #define ERRNO_TABLE_SIZE 1200
524 
525 /* target_to_host_errno_table[] is initialized from
526  * host_to_target_errno_table[] in syscall_init(). */
527 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
528 };
529 
530 /*
531  * This list is the union of errno values overridden in asm-<arch>/errno.h
532  * minus the errnos that are not actually generic to all archs.
533  */
534 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
535     [EAGAIN]		= TARGET_EAGAIN,
536     [EIDRM]		= TARGET_EIDRM,
537     [ECHRNG]		= TARGET_ECHRNG,
538     [EL2NSYNC]		= TARGET_EL2NSYNC,
539     [EL3HLT]		= TARGET_EL3HLT,
540     [EL3RST]		= TARGET_EL3RST,
541     [ELNRNG]		= TARGET_ELNRNG,
542     [EUNATCH]		= TARGET_EUNATCH,
543     [ENOCSI]		= TARGET_ENOCSI,
544     [EL2HLT]		= TARGET_EL2HLT,
545     [EDEADLK]		= TARGET_EDEADLK,
546     [ENOLCK]		= TARGET_ENOLCK,
547     [EBADE]		= TARGET_EBADE,
548     [EBADR]		= TARGET_EBADR,
549     [EXFULL]		= TARGET_EXFULL,
550     [ENOANO]		= TARGET_ENOANO,
551     [EBADRQC]		= TARGET_EBADRQC,
552     [EBADSLT]		= TARGET_EBADSLT,
553     [EBFONT]		= TARGET_EBFONT,
554     [ENOSTR]		= TARGET_ENOSTR,
555     [ENODATA]		= TARGET_ENODATA,
556     [ETIME]		= TARGET_ETIME,
557     [ENOSR]		= TARGET_ENOSR,
558     [ENONET]		= TARGET_ENONET,
559     [ENOPKG]		= TARGET_ENOPKG,
560     [EREMOTE]		= TARGET_EREMOTE,
561     [ENOLINK]		= TARGET_ENOLINK,
562     [EADV]		= TARGET_EADV,
563     [ESRMNT]		= TARGET_ESRMNT,
564     [ECOMM]		= TARGET_ECOMM,
565     [EPROTO]		= TARGET_EPROTO,
566     [EDOTDOT]		= TARGET_EDOTDOT,
567     [EMULTIHOP]		= TARGET_EMULTIHOP,
568     [EBADMSG]		= TARGET_EBADMSG,
569     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
570     [EOVERFLOW]		= TARGET_EOVERFLOW,
571     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
572     [EBADFD]		= TARGET_EBADFD,
573     [EREMCHG]		= TARGET_EREMCHG,
574     [ELIBACC]		= TARGET_ELIBACC,
575     [ELIBBAD]		= TARGET_ELIBBAD,
576     [ELIBSCN]		= TARGET_ELIBSCN,
577     [ELIBMAX]		= TARGET_ELIBMAX,
578     [ELIBEXEC]		= TARGET_ELIBEXEC,
579     [EILSEQ]		= TARGET_EILSEQ,
580     [ENOSYS]		= TARGET_ENOSYS,
581     [ELOOP]		= TARGET_ELOOP,
582     [ERESTART]		= TARGET_ERESTART,
583     [ESTRPIPE]		= TARGET_ESTRPIPE,
584     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
585     [EUSERS]		= TARGET_EUSERS,
586     [ENOTSOCK]		= TARGET_ENOTSOCK,
587     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
588     [EMSGSIZE]		= TARGET_EMSGSIZE,
589     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
590     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
591     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
592     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
593     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
594     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
595     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
596     [EADDRINUSE]	= TARGET_EADDRINUSE,
597     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
598     [ENETDOWN]		= TARGET_ENETDOWN,
599     [ENETUNREACH]	= TARGET_ENETUNREACH,
600     [ENETRESET]		= TARGET_ENETRESET,
601     [ECONNABORTED]	= TARGET_ECONNABORTED,
602     [ECONNRESET]	= TARGET_ECONNRESET,
603     [ENOBUFS]		= TARGET_ENOBUFS,
604     [EISCONN]		= TARGET_EISCONN,
605     [ENOTCONN]		= TARGET_ENOTCONN,
606     [EUCLEAN]		= TARGET_EUCLEAN,
607     [ENOTNAM]		= TARGET_ENOTNAM,
608     [ENAVAIL]		= TARGET_ENAVAIL,
609     [EISNAM]		= TARGET_EISNAM,
610     [EREMOTEIO]		= TARGET_EREMOTEIO,
611     [EDQUOT]            = TARGET_EDQUOT,
612     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
613     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
614     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
615     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
616     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
617     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
618     [EALREADY]		= TARGET_EALREADY,
619     [EINPROGRESS]	= TARGET_EINPROGRESS,
620     [ESTALE]		= TARGET_ESTALE,
621     [ECANCELED]		= TARGET_ECANCELED,
622     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
623     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
624 #ifdef ENOKEY
625     [ENOKEY]		= TARGET_ENOKEY,
626 #endif
627 #ifdef EKEYEXPIRED
628     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
629 #endif
630 #ifdef EKEYREVOKED
631     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
632 #endif
633 #ifdef EKEYREJECTED
634     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
635 #endif
636 #ifdef EOWNERDEAD
637     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
638 #endif
639 #ifdef ENOTRECOVERABLE
640     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
641 #endif
642 #ifdef ENOMSG
643     [ENOMSG]            = TARGET_ENOMSG,
644 #endif
645 #ifdef ERKFILL
646     [ERFKILL]           = TARGET_ERFKILL,
647 #endif
648 #ifdef EHWPOISON
649     [EHWPOISON]         = TARGET_EHWPOISON,
650 #endif
651 };
652 
653 static inline int host_to_target_errno(int err)
654 {
655     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
656         host_to_target_errno_table[err]) {
657         return host_to_target_errno_table[err];
658     }
659     return err;
660 }
661 
662 static inline int target_to_host_errno(int err)
663 {
664     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
665         target_to_host_errno_table[err]) {
666         return target_to_host_errno_table[err];
667     }
668     return err;
669 }
670 
671 static inline abi_long get_errno(abi_long ret)
672 {
673     if (ret == -1)
674         return -host_to_target_errno(errno);
675     else
676         return ret;
677 }
678 
679 const char *target_strerror(int err)
680 {
681     if (err == TARGET_ERESTARTSYS) {
682         return "To be restarted";
683     }
684     if (err == TARGET_QEMU_ESIGRETURN) {
685         return "Successful exit from sigreturn";
686     }
687 
688     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
689         return NULL;
690     }
691     return strerror(target_to_host_errno(err));
692 }
693 
694 #define safe_syscall0(type, name) \
695 static type safe_##name(void) \
696 { \
697     return safe_syscall(__NR_##name); \
698 }
699 
700 #define safe_syscall1(type, name, type1, arg1) \
701 static type safe_##name(type1 arg1) \
702 { \
703     return safe_syscall(__NR_##name, arg1); \
704 }
705 
706 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
707 static type safe_##name(type1 arg1, type2 arg2) \
708 { \
709     return safe_syscall(__NR_##name, arg1, arg2); \
710 }
711 
712 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
714 { \
715     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
716 }
717 
718 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
719     type4, arg4) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
721 { \
722     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
723 }
724 
725 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
726     type4, arg4, type5, arg5) \
727 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
728     type5 arg5) \
729 { \
730     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
731 }
732 
733 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
734     type4, arg4, type5, arg5, type6, arg6) \
735 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
736     type5 arg5, type6 arg6) \
737 { \
738     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
739 }
740 
741 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
742 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
743 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
744               int, flags, mode_t, mode)
745 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
746 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
747               struct rusage *, rusage)
748 #endif
749 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
750               int, options, struct rusage *, rusage)
751 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
752 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
753     defined(TARGET_NR_pselect6)
754 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
755               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
756 #endif
757 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
758 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
759               struct timespec *, tsp, const sigset_t *, sigmask,
760               size_t, sigsetsize)
761 #endif
762 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
763               int, maxevents, int, timeout, const sigset_t *, sigmask,
764               size_t, sigsetsize)
765 #ifdef TARGET_NR_futex
766 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
767               const struct timespec *,timeout,int *,uaddr2,int,val3)
768 #endif
769 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
770 safe_syscall2(int, kill, pid_t, pid, int, sig)
771 safe_syscall2(int, tkill, int, tid, int, sig)
772 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
773 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
774 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
775 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
776               unsigned long, pos_l, unsigned long, pos_h)
777 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
778               unsigned long, pos_l, unsigned long, pos_h)
779 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
780               socklen_t, addrlen)
781 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
782               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
783 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
784               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
785 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
786 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
787 safe_syscall2(int, flock, int, fd, int, operation)
788 #ifdef TARGET_NR_rt_sigtimedwait
789 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
790               const struct timespec *, uts, size_t, sigsetsize)
791 #endif
792 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
793               int, flags)
794 #if defined(TARGET_NR_nanosleep)
795 safe_syscall2(int, nanosleep, const struct timespec *, req,
796               struct timespec *, rem)
797 #endif
798 #ifdef TARGET_NR_clock_nanosleep
799 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
800               const struct timespec *, req, struct timespec *, rem)
801 #endif
802 #ifdef __NR_ipc
803 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
804               void *, ptr, long, fifth)
805 #endif
806 #ifdef __NR_msgsnd
807 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
808               int, flags)
809 #endif
810 #ifdef __NR_msgrcv
811 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
812               long, msgtype, int, flags)
813 #endif
814 #ifdef __NR_semtimedop
815 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
816               unsigned, nsops, const struct timespec *, timeout)
817 #endif
818 #ifdef TARGET_NR_mq_timedsend
819 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
820               size_t, len, unsigned, prio, const struct timespec *, timeout)
821 #endif
822 #ifdef TARGET_NR_mq_timedreceive
823 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
824               size_t, len, unsigned *, prio, const struct timespec *, timeout)
825 #endif
826 /* We do ioctl like this rather than via safe_syscall3 to preserve the
827  * "third argument might be integer or pointer or not present" behaviour of
828  * the libc function.
829  */
830 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
831 /* Similarly for fcntl. Note that callers must always:
832  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
833  *  use the flock64 struct rather than unsuffixed flock
834  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
835  */
836 #ifdef __NR_fcntl64
837 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
838 #else
839 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
840 #endif
841 
842 static inline int host_to_target_sock_type(int host_type)
843 {
844     int target_type;
845 
846     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
847     case SOCK_DGRAM:
848         target_type = TARGET_SOCK_DGRAM;
849         break;
850     case SOCK_STREAM:
851         target_type = TARGET_SOCK_STREAM;
852         break;
853     default:
854         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
855         break;
856     }
857 
858 #if defined(SOCK_CLOEXEC)
859     if (host_type & SOCK_CLOEXEC) {
860         target_type |= TARGET_SOCK_CLOEXEC;
861     }
862 #endif
863 
864 #if defined(SOCK_NONBLOCK)
865     if (host_type & SOCK_NONBLOCK) {
866         target_type |= TARGET_SOCK_NONBLOCK;
867     }
868 #endif
869 
870     return target_type;
871 }
872 
873 static abi_ulong target_brk;
874 static abi_ulong target_original_brk;
875 static abi_ulong brk_page;
876 
877 void target_set_brk(abi_ulong new_brk)
878 {
879     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
880     brk_page = HOST_PAGE_ALIGN(target_brk);
881 }
882 
883 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
884 #define DEBUGF_BRK(message, args...)
885 
886 /* do_brk() must return target values and target errnos. */
887 abi_long do_brk(abi_ulong new_brk)
888 {
889     abi_long mapped_addr;
890     abi_ulong new_alloc_size;
891 
892     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
893 
894     if (!new_brk) {
895         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
896         return target_brk;
897     }
898     if (new_brk < target_original_brk) {
899         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
900                    target_brk);
901         return target_brk;
902     }
903 
904     /* If the new brk is less than the highest page reserved to the
905      * target heap allocation, set it and we're almost done...  */
906     if (new_brk <= brk_page) {
907         /* Heap contents are initialized to zero, as for anonymous
908          * mapped pages.  */
909         if (new_brk > target_brk) {
910             memset(g2h(target_brk), 0, new_brk - target_brk);
911         }
912 	target_brk = new_brk;
913         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
914 	return target_brk;
915     }
916 
917     /* We need to allocate more memory after the brk... Note that
918      * we don't use MAP_FIXED because that will map over the top of
919      * any existing mapping (like the one with the host libc or qemu
920      * itself); instead we treat "mapped but at wrong address" as
921      * a failure and unmap again.
922      */
923     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
924     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
925                                         PROT_READ|PROT_WRITE,
926                                         MAP_ANON|MAP_PRIVATE, 0, 0));
927 
928     if (mapped_addr == brk_page) {
929         /* Heap contents are initialized to zero, as for anonymous
930          * mapped pages.  Technically the new pages are already
931          * initialized to zero since they *are* anonymous mapped
932          * pages, however we have to take care with the contents that
933          * come from the remaining part of the previous page: it may
934          * contains garbage data due to a previous heap usage (grown
935          * then shrunken).  */
936         memset(g2h(target_brk), 0, brk_page - target_brk);
937 
938         target_brk = new_brk;
939         brk_page = HOST_PAGE_ALIGN(target_brk);
940         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
941             target_brk);
942         return target_brk;
943     } else if (mapped_addr != -1) {
944         /* Mapped but at wrong address, meaning there wasn't actually
945          * enough space for this brk.
946          */
947         target_munmap(mapped_addr, new_alloc_size);
948         mapped_addr = -1;
949         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
950     }
951     else {
952         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
953     }
954 
955 #if defined(TARGET_ALPHA)
956     /* We (partially) emulate OSF/1 on Alpha, which requires we
957        return a proper errno, not an unchanged brk value.  */
958     return -TARGET_ENOMEM;
959 #endif
960     /* For everything else, return the previous break. */
961     return target_brk;
962 }
963 
964 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
965     defined(TARGET_NR_pselect6)
966 static inline abi_long copy_from_user_fdset(fd_set *fds,
967                                             abi_ulong target_fds_addr,
968                                             int n)
969 {
970     int i, nw, j, k;
971     abi_ulong b, *target_fds;
972 
973     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
974     if (!(target_fds = lock_user(VERIFY_READ,
975                                  target_fds_addr,
976                                  sizeof(abi_ulong) * nw,
977                                  1)))
978         return -TARGET_EFAULT;
979 
980     FD_ZERO(fds);
981     k = 0;
982     for (i = 0; i < nw; i++) {
983         /* grab the abi_ulong */
984         __get_user(b, &target_fds[i]);
985         for (j = 0; j < TARGET_ABI_BITS; j++) {
986             /* check the bit inside the abi_ulong */
987             if ((b >> j) & 1)
988                 FD_SET(k, fds);
989             k++;
990         }
991     }
992 
993     unlock_user(target_fds, target_fds_addr, 0);
994 
995     return 0;
996 }
997 
998 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
999                                                  abi_ulong target_fds_addr,
1000                                                  int n)
1001 {
1002     if (target_fds_addr) {
1003         if (copy_from_user_fdset(fds, target_fds_addr, n))
1004             return -TARGET_EFAULT;
1005         *fds_ptr = fds;
1006     } else {
1007         *fds_ptr = NULL;
1008     }
1009     return 0;
1010 }
1011 
1012 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1013                                           const fd_set *fds,
1014                                           int n)
1015 {
1016     int i, nw, j, k;
1017     abi_long v;
1018     abi_ulong *target_fds;
1019 
1020     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1021     if (!(target_fds = lock_user(VERIFY_WRITE,
1022                                  target_fds_addr,
1023                                  sizeof(abi_ulong) * nw,
1024                                  0)))
1025         return -TARGET_EFAULT;
1026 
1027     k = 0;
1028     for (i = 0; i < nw; i++) {
1029         v = 0;
1030         for (j = 0; j < TARGET_ABI_BITS; j++) {
1031             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1032             k++;
1033         }
1034         __put_user(v, &target_fds[i]);
1035     }
1036 
1037     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1038 
1039     return 0;
1040 }
1041 #endif
1042 
1043 #if defined(__alpha__)
1044 #define HOST_HZ 1024
1045 #else
1046 #define HOST_HZ 100
1047 #endif
1048 
1049 static inline abi_long host_to_target_clock_t(long ticks)
1050 {
1051 #if HOST_HZ == TARGET_HZ
1052     return ticks;
1053 #else
1054     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1055 #endif
1056 }
1057 
1058 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1059                                              const struct rusage *rusage)
1060 {
1061     struct target_rusage *target_rusage;
1062 
1063     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1064         return -TARGET_EFAULT;
1065     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1066     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1067     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1068     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1069     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1070     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1071     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1072     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1073     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1074     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1075     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1076     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1077     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1078     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1079     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1080     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1081     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1082     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1083     unlock_user_struct(target_rusage, target_addr, 1);
1084 
1085     return 0;
1086 }
1087 
1088 #ifdef TARGET_NR_setrlimit
1089 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1090 {
1091     abi_ulong target_rlim_swap;
1092     rlim_t result;
1093 
1094     target_rlim_swap = tswapal(target_rlim);
1095     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1096         return RLIM_INFINITY;
1097 
1098     result = target_rlim_swap;
1099     if (target_rlim_swap != (rlim_t)result)
1100         return RLIM_INFINITY;
1101 
1102     return result;
1103 }
1104 #endif
1105 
1106 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1107 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1108 {
1109     abi_ulong target_rlim_swap;
1110     abi_ulong result;
1111 
1112     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1113         target_rlim_swap = TARGET_RLIM_INFINITY;
1114     else
1115         target_rlim_swap = rlim;
1116     result = tswapal(target_rlim_swap);
1117 
1118     return result;
1119 }
1120 #endif
1121 
1122 static inline int target_to_host_resource(int code)
1123 {
1124     switch (code) {
1125     case TARGET_RLIMIT_AS:
1126         return RLIMIT_AS;
1127     case TARGET_RLIMIT_CORE:
1128         return RLIMIT_CORE;
1129     case TARGET_RLIMIT_CPU:
1130         return RLIMIT_CPU;
1131     case TARGET_RLIMIT_DATA:
1132         return RLIMIT_DATA;
1133     case TARGET_RLIMIT_FSIZE:
1134         return RLIMIT_FSIZE;
1135     case TARGET_RLIMIT_LOCKS:
1136         return RLIMIT_LOCKS;
1137     case TARGET_RLIMIT_MEMLOCK:
1138         return RLIMIT_MEMLOCK;
1139     case TARGET_RLIMIT_MSGQUEUE:
1140         return RLIMIT_MSGQUEUE;
1141     case TARGET_RLIMIT_NICE:
1142         return RLIMIT_NICE;
1143     case TARGET_RLIMIT_NOFILE:
1144         return RLIMIT_NOFILE;
1145     case TARGET_RLIMIT_NPROC:
1146         return RLIMIT_NPROC;
1147     case TARGET_RLIMIT_RSS:
1148         return RLIMIT_RSS;
1149     case TARGET_RLIMIT_RTPRIO:
1150         return RLIMIT_RTPRIO;
1151     case TARGET_RLIMIT_SIGPENDING:
1152         return RLIMIT_SIGPENDING;
1153     case TARGET_RLIMIT_STACK:
1154         return RLIMIT_STACK;
1155     default:
1156         return code;
1157     }
1158 }
1159 
1160 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1161                                               abi_ulong target_tv_addr)
1162 {
1163     struct target_timeval *target_tv;
1164 
1165     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1166         return -TARGET_EFAULT;
1167     }
1168 
1169     __get_user(tv->tv_sec, &target_tv->tv_sec);
1170     __get_user(tv->tv_usec, &target_tv->tv_usec);
1171 
1172     unlock_user_struct(target_tv, target_tv_addr, 0);
1173 
1174     return 0;
1175 }
1176 
1177 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1178                                             const struct timeval *tv)
1179 {
1180     struct target_timeval *target_tv;
1181 
1182     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1183         return -TARGET_EFAULT;
1184     }
1185 
1186     __put_user(tv->tv_sec, &target_tv->tv_sec);
1187     __put_user(tv->tv_usec, &target_tv->tv_usec);
1188 
1189     unlock_user_struct(target_tv, target_tv_addr, 1);
1190 
1191     return 0;
1192 }
1193 
1194 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1195                                              const struct timeval *tv)
1196 {
1197     struct target__kernel_sock_timeval *target_tv;
1198 
1199     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1200         return -TARGET_EFAULT;
1201     }
1202 
1203     __put_user(tv->tv_sec, &target_tv->tv_sec);
1204     __put_user(tv->tv_usec, &target_tv->tv_usec);
1205 
1206     unlock_user_struct(target_tv, target_tv_addr, 1);
1207 
1208     return 0;
1209 }
1210 
1211 #if defined(TARGET_NR_futex) || \
1212     defined(TARGET_NR_rt_sigtimedwait) || \
1213     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1214     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1215     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1216     defined(TARGET_NR_mq_timedreceive)
1217 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1218                                                abi_ulong target_addr)
1219 {
1220     struct target_timespec *target_ts;
1221 
1222     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1223         return -TARGET_EFAULT;
1224     }
1225     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1226     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1227     unlock_user_struct(target_ts, target_addr, 0);
1228     return 0;
1229 }
1230 #endif
1231 
1232 #if defined(TARGET_NR_clock_settime64)
1233 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1234                                                  abi_ulong target_addr)
1235 {
1236     struct target__kernel_timespec *target_ts;
1237 
1238     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1239         return -TARGET_EFAULT;
1240     }
1241     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1242     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1243     unlock_user_struct(target_ts, target_addr, 0);
1244     return 0;
1245 }
1246 #endif
1247 
1248 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1249                                                struct timespec *host_ts)
1250 {
1251     struct target_timespec *target_ts;
1252 
1253     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1254         return -TARGET_EFAULT;
1255     }
1256     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1257     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1258     unlock_user_struct(target_ts, target_addr, 1);
1259     return 0;
1260 }
1261 
1262 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1263                                                  struct timespec *host_ts)
1264 {
1265     struct target__kernel_timespec *target_ts;
1266 
1267     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1268         return -TARGET_EFAULT;
1269     }
1270     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1271     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1272     unlock_user_struct(target_ts, target_addr, 1);
1273     return 0;
1274 }
1275 
1276 #if defined(TARGET_NR_gettimeofday)
1277 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1278                                              struct timezone *tz)
1279 {
1280     struct target_timezone *target_tz;
1281 
1282     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1283         return -TARGET_EFAULT;
1284     }
1285 
1286     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1287     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1288 
1289     unlock_user_struct(target_tz, target_tz_addr, 1);
1290 
1291     return 0;
1292 }
1293 #endif
1294 
1295 #if defined(TARGET_NR_settimeofday)
1296 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1297                                                abi_ulong target_tz_addr)
1298 {
1299     struct target_timezone *target_tz;
1300 
1301     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1302         return -TARGET_EFAULT;
1303     }
1304 
1305     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1306     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1307 
1308     unlock_user_struct(target_tz, target_tz_addr, 0);
1309 
1310     return 0;
1311 }
1312 #endif
1313 
1314 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1315 #include <mqueue.h>
1316 
1317 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1318                                               abi_ulong target_mq_attr_addr)
1319 {
1320     struct target_mq_attr *target_mq_attr;
1321 
1322     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1323                           target_mq_attr_addr, 1))
1324         return -TARGET_EFAULT;
1325 
1326     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1327     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1328     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1329     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1330 
1331     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1332 
1333     return 0;
1334 }
1335 
1336 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1337                                             const struct mq_attr *attr)
1338 {
1339     struct target_mq_attr *target_mq_attr;
1340 
1341     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1342                           target_mq_attr_addr, 0))
1343         return -TARGET_EFAULT;
1344 
1345     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1346     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1347     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1348     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1349 
1350     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1351 
1352     return 0;
1353 }
1354 #endif
1355 
1356 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1357 /* do_select() must return target values and target errnos. */
1358 static abi_long do_select(int n,
1359                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1360                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1361 {
1362     fd_set rfds, wfds, efds;
1363     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1364     struct timeval tv;
1365     struct timespec ts, *ts_ptr;
1366     abi_long ret;
1367 
1368     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1369     if (ret) {
1370         return ret;
1371     }
1372     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1373     if (ret) {
1374         return ret;
1375     }
1376     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1377     if (ret) {
1378         return ret;
1379     }
1380 
1381     if (target_tv_addr) {
1382         if (copy_from_user_timeval(&tv, target_tv_addr))
1383             return -TARGET_EFAULT;
1384         ts.tv_sec = tv.tv_sec;
1385         ts.tv_nsec = tv.tv_usec * 1000;
1386         ts_ptr = &ts;
1387     } else {
1388         ts_ptr = NULL;
1389     }
1390 
1391     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1392                                   ts_ptr, NULL));
1393 
1394     if (!is_error(ret)) {
1395         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1396             return -TARGET_EFAULT;
1397         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1398             return -TARGET_EFAULT;
1399         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1400             return -TARGET_EFAULT;
1401 
1402         if (target_tv_addr) {
1403             tv.tv_sec = ts.tv_sec;
1404             tv.tv_usec = ts.tv_nsec / 1000;
1405             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1406                 return -TARGET_EFAULT;
1407             }
1408         }
1409     }
1410 
1411     return ret;
1412 }
1413 
1414 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1415 static abi_long do_old_select(abi_ulong arg1)
1416 {
1417     struct target_sel_arg_struct *sel;
1418     abi_ulong inp, outp, exp, tvp;
1419     long nsel;
1420 
1421     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1422         return -TARGET_EFAULT;
1423     }
1424 
1425     nsel = tswapal(sel->n);
1426     inp = tswapal(sel->inp);
1427     outp = tswapal(sel->outp);
1428     exp = tswapal(sel->exp);
1429     tvp = tswapal(sel->tvp);
1430 
1431     unlock_user_struct(sel, arg1, 0);
1432 
1433     return do_select(nsel, inp, outp, exp, tvp);
1434 }
1435 #endif
1436 #endif
1437 
1438 static abi_long do_pipe2(int host_pipe[], int flags)
1439 {
1440 #ifdef CONFIG_PIPE2
1441     return pipe2(host_pipe, flags);
1442 #else
1443     return -ENOSYS;
1444 #endif
1445 }
1446 
1447 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1448                         int flags, int is_pipe2)
1449 {
1450     int host_pipe[2];
1451     abi_long ret;
1452     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1453 
1454     if (is_error(ret))
1455         return get_errno(ret);
1456 
1457     /* Several targets have special calling conventions for the original
1458        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1459     if (!is_pipe2) {
1460 #if defined(TARGET_ALPHA)
1461         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1462         return host_pipe[0];
1463 #elif defined(TARGET_MIPS)
1464         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1465         return host_pipe[0];
1466 #elif defined(TARGET_SH4)
1467         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1468         return host_pipe[0];
1469 #elif defined(TARGET_SPARC)
1470         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1471         return host_pipe[0];
1472 #endif
1473     }
1474 
1475     if (put_user_s32(host_pipe[0], pipedes)
1476         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1477         return -TARGET_EFAULT;
1478     return get_errno(ret);
1479 }
1480 
1481 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1482                                               abi_ulong target_addr,
1483                                               socklen_t len)
1484 {
1485     struct target_ip_mreqn *target_smreqn;
1486 
1487     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1488     if (!target_smreqn)
1489         return -TARGET_EFAULT;
1490     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1491     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1492     if (len == sizeof(struct target_ip_mreqn))
1493         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1494     unlock_user(target_smreqn, target_addr, 0);
1495 
1496     return 0;
1497 }
1498 
1499 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1500                                                abi_ulong target_addr,
1501                                                socklen_t len)
1502 {
1503     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1504     sa_family_t sa_family;
1505     struct target_sockaddr *target_saddr;
1506 
1507     if (fd_trans_target_to_host_addr(fd)) {
1508         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1509     }
1510 
1511     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1512     if (!target_saddr)
1513         return -TARGET_EFAULT;
1514 
1515     sa_family = tswap16(target_saddr->sa_family);
1516 
1517     /* Oops. The caller might send a incomplete sun_path; sun_path
1518      * must be terminated by \0 (see the manual page), but
1519      * unfortunately it is quite common to specify sockaddr_un
1520      * length as "strlen(x->sun_path)" while it should be
1521      * "strlen(...) + 1". We'll fix that here if needed.
1522      * Linux kernel has a similar feature.
1523      */
1524 
1525     if (sa_family == AF_UNIX) {
1526         if (len < unix_maxlen && len > 0) {
1527             char *cp = (char*)target_saddr;
1528 
1529             if ( cp[len-1] && !cp[len] )
1530                 len++;
1531         }
1532         if (len > unix_maxlen)
1533             len = unix_maxlen;
1534     }
1535 
1536     memcpy(addr, target_saddr, len);
1537     addr->sa_family = sa_family;
1538     if (sa_family == AF_NETLINK) {
1539         struct sockaddr_nl *nladdr;
1540 
1541         nladdr = (struct sockaddr_nl *)addr;
1542         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1543         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1544     } else if (sa_family == AF_PACKET) {
1545 	struct target_sockaddr_ll *lladdr;
1546 
1547 	lladdr = (struct target_sockaddr_ll *)addr;
1548 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1549 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1550     }
1551     unlock_user(target_saddr, target_addr, 0);
1552 
1553     return 0;
1554 }
1555 
1556 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1557                                                struct sockaddr *addr,
1558                                                socklen_t len)
1559 {
1560     struct target_sockaddr *target_saddr;
1561 
1562     if (len == 0) {
1563         return 0;
1564     }
1565     assert(addr);
1566 
1567     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1568     if (!target_saddr)
1569         return -TARGET_EFAULT;
1570     memcpy(target_saddr, addr, len);
1571     if (len >= offsetof(struct target_sockaddr, sa_family) +
1572         sizeof(target_saddr->sa_family)) {
1573         target_saddr->sa_family = tswap16(addr->sa_family);
1574     }
1575     if (addr->sa_family == AF_NETLINK &&
1576         len >= sizeof(struct target_sockaddr_nl)) {
1577         struct target_sockaddr_nl *target_nl =
1578                (struct target_sockaddr_nl *)target_saddr;
1579         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1580         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1581     } else if (addr->sa_family == AF_PACKET) {
1582         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1583         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1584         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1585     } else if (addr->sa_family == AF_INET6 &&
1586                len >= sizeof(struct target_sockaddr_in6)) {
1587         struct target_sockaddr_in6 *target_in6 =
1588                (struct target_sockaddr_in6 *)target_saddr;
1589         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1590     }
1591     unlock_user(target_saddr, target_addr, len);
1592 
1593     return 0;
1594 }
1595 
1596 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1597                                            struct target_msghdr *target_msgh)
1598 {
1599     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1600     abi_long msg_controllen;
1601     abi_ulong target_cmsg_addr;
1602     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1603     socklen_t space = 0;
1604 
1605     msg_controllen = tswapal(target_msgh->msg_controllen);
1606     if (msg_controllen < sizeof (struct target_cmsghdr))
1607         goto the_end;
1608     target_cmsg_addr = tswapal(target_msgh->msg_control);
1609     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1610     target_cmsg_start = target_cmsg;
1611     if (!target_cmsg)
1612         return -TARGET_EFAULT;
1613 
1614     while (cmsg && target_cmsg) {
1615         void *data = CMSG_DATA(cmsg);
1616         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1617 
1618         int len = tswapal(target_cmsg->cmsg_len)
1619             - sizeof(struct target_cmsghdr);
1620 
1621         space += CMSG_SPACE(len);
1622         if (space > msgh->msg_controllen) {
1623             space -= CMSG_SPACE(len);
1624             /* This is a QEMU bug, since we allocated the payload
1625              * area ourselves (unlike overflow in host-to-target
1626              * conversion, which is just the guest giving us a buffer
1627              * that's too small). It can't happen for the payload types
1628              * we currently support; if it becomes an issue in future
1629              * we would need to improve our allocation strategy to
1630              * something more intelligent than "twice the size of the
1631              * target buffer we're reading from".
1632              */
1633             qemu_log_mask(LOG_UNIMP,
1634                           ("Unsupported ancillary data %d/%d: "
1635                            "unhandled msg size\n"),
1636                           tswap32(target_cmsg->cmsg_level),
1637                           tswap32(target_cmsg->cmsg_type));
1638             break;
1639         }
1640 
1641         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1642             cmsg->cmsg_level = SOL_SOCKET;
1643         } else {
1644             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1645         }
1646         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1647         cmsg->cmsg_len = CMSG_LEN(len);
1648 
1649         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1650             int *fd = (int *)data;
1651             int *target_fd = (int *)target_data;
1652             int i, numfds = len / sizeof(int);
1653 
1654             for (i = 0; i < numfds; i++) {
1655                 __get_user(fd[i], target_fd + i);
1656             }
1657         } else if (cmsg->cmsg_level == SOL_SOCKET
1658                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1659             struct ucred *cred = (struct ucred *)data;
1660             struct target_ucred *target_cred =
1661                 (struct target_ucred *)target_data;
1662 
1663             __get_user(cred->pid, &target_cred->pid);
1664             __get_user(cred->uid, &target_cred->uid);
1665             __get_user(cred->gid, &target_cred->gid);
1666         } else {
1667             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1668                           cmsg->cmsg_level, cmsg->cmsg_type);
1669             memcpy(data, target_data, len);
1670         }
1671 
1672         cmsg = CMSG_NXTHDR(msgh, cmsg);
1673         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1674                                          target_cmsg_start);
1675     }
1676     unlock_user(target_cmsg, target_cmsg_addr, 0);
1677  the_end:
1678     msgh->msg_controllen = space;
1679     return 0;
1680 }
1681 
1682 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1683                                            struct msghdr *msgh)
1684 {
1685     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1686     abi_long msg_controllen;
1687     abi_ulong target_cmsg_addr;
1688     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1689     socklen_t space = 0;
1690 
1691     msg_controllen = tswapal(target_msgh->msg_controllen);
1692     if (msg_controllen < sizeof (struct target_cmsghdr))
1693         goto the_end;
1694     target_cmsg_addr = tswapal(target_msgh->msg_control);
1695     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1696     target_cmsg_start = target_cmsg;
1697     if (!target_cmsg)
1698         return -TARGET_EFAULT;
1699 
1700     while (cmsg && target_cmsg) {
1701         void *data = CMSG_DATA(cmsg);
1702         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1703 
1704         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1705         int tgt_len, tgt_space;
1706 
1707         /* We never copy a half-header but may copy half-data;
1708          * this is Linux's behaviour in put_cmsg(). Note that
1709          * truncation here is a guest problem (which we report
1710          * to the guest via the CTRUNC bit), unlike truncation
1711          * in target_to_host_cmsg, which is a QEMU bug.
1712          */
1713         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1714             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1715             break;
1716         }
1717 
1718         if (cmsg->cmsg_level == SOL_SOCKET) {
1719             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1720         } else {
1721             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1722         }
1723         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1724 
1725         /* Payload types which need a different size of payload on
1726          * the target must adjust tgt_len here.
1727          */
1728         tgt_len = len;
1729         switch (cmsg->cmsg_level) {
1730         case SOL_SOCKET:
1731             switch (cmsg->cmsg_type) {
1732             case SO_TIMESTAMP:
1733                 tgt_len = sizeof(struct target_timeval);
1734                 break;
1735             default:
1736                 break;
1737             }
1738             break;
1739         default:
1740             break;
1741         }
1742 
1743         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1744             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1745             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1746         }
1747 
1748         /* We must now copy-and-convert len bytes of payload
1749          * into tgt_len bytes of destination space. Bear in mind
1750          * that in both source and destination we may be dealing
1751          * with a truncated value!
1752          */
1753         switch (cmsg->cmsg_level) {
1754         case SOL_SOCKET:
1755             switch (cmsg->cmsg_type) {
1756             case SCM_RIGHTS:
1757             {
1758                 int *fd = (int *)data;
1759                 int *target_fd = (int *)target_data;
1760                 int i, numfds = tgt_len / sizeof(int);
1761 
1762                 for (i = 0; i < numfds; i++) {
1763                     __put_user(fd[i], target_fd + i);
1764                 }
1765                 break;
1766             }
1767             case SO_TIMESTAMP:
1768             {
1769                 struct timeval *tv = (struct timeval *)data;
1770                 struct target_timeval *target_tv =
1771                     (struct target_timeval *)target_data;
1772 
1773                 if (len != sizeof(struct timeval) ||
1774                     tgt_len != sizeof(struct target_timeval)) {
1775                     goto unimplemented;
1776                 }
1777 
1778                 /* copy struct timeval to target */
1779                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1780                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1781                 break;
1782             }
1783             case SCM_CREDENTIALS:
1784             {
1785                 struct ucred *cred = (struct ucred *)data;
1786                 struct target_ucred *target_cred =
1787                     (struct target_ucred *)target_data;
1788 
1789                 __put_user(cred->pid, &target_cred->pid);
1790                 __put_user(cred->uid, &target_cred->uid);
1791                 __put_user(cred->gid, &target_cred->gid);
1792                 break;
1793             }
1794             default:
1795                 goto unimplemented;
1796             }
1797             break;
1798 
1799         case SOL_IP:
1800             switch (cmsg->cmsg_type) {
1801             case IP_TTL:
1802             {
1803                 uint32_t *v = (uint32_t *)data;
1804                 uint32_t *t_int = (uint32_t *)target_data;
1805 
1806                 if (len != sizeof(uint32_t) ||
1807                     tgt_len != sizeof(uint32_t)) {
1808                     goto unimplemented;
1809                 }
1810                 __put_user(*v, t_int);
1811                 break;
1812             }
1813             case IP_RECVERR:
1814             {
1815                 struct errhdr_t {
1816                    struct sock_extended_err ee;
1817                    struct sockaddr_in offender;
1818                 };
1819                 struct errhdr_t *errh = (struct errhdr_t *)data;
1820                 struct errhdr_t *target_errh =
1821                     (struct errhdr_t *)target_data;
1822 
1823                 if (len != sizeof(struct errhdr_t) ||
1824                     tgt_len != sizeof(struct errhdr_t)) {
1825                     goto unimplemented;
1826                 }
1827                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1828                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1829                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1830                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1831                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1832                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1833                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1834                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1835                     (void *) &errh->offender, sizeof(errh->offender));
1836                 break;
1837             }
1838             default:
1839                 goto unimplemented;
1840             }
1841             break;
1842 
1843         case SOL_IPV6:
1844             switch (cmsg->cmsg_type) {
1845             case IPV6_HOPLIMIT:
1846             {
1847                 uint32_t *v = (uint32_t *)data;
1848                 uint32_t *t_int = (uint32_t *)target_data;
1849 
1850                 if (len != sizeof(uint32_t) ||
1851                     tgt_len != sizeof(uint32_t)) {
1852                     goto unimplemented;
1853                 }
1854                 __put_user(*v, t_int);
1855                 break;
1856             }
1857             case IPV6_RECVERR:
1858             {
1859                 struct errhdr6_t {
1860                    struct sock_extended_err ee;
1861                    struct sockaddr_in6 offender;
1862                 };
1863                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1864                 struct errhdr6_t *target_errh =
1865                     (struct errhdr6_t *)target_data;
1866 
1867                 if (len != sizeof(struct errhdr6_t) ||
1868                     tgt_len != sizeof(struct errhdr6_t)) {
1869                     goto unimplemented;
1870                 }
1871                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1872                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1873                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1874                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1875                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1876                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1877                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1878                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1879                     (void *) &errh->offender, sizeof(errh->offender));
1880                 break;
1881             }
1882             default:
1883                 goto unimplemented;
1884             }
1885             break;
1886 
1887         default:
1888         unimplemented:
1889             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1890                           cmsg->cmsg_level, cmsg->cmsg_type);
1891             memcpy(target_data, data, MIN(len, tgt_len));
1892             if (tgt_len > len) {
1893                 memset(target_data + len, 0, tgt_len - len);
1894             }
1895         }
1896 
1897         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1898         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1899         if (msg_controllen < tgt_space) {
1900             tgt_space = msg_controllen;
1901         }
1902         msg_controllen -= tgt_space;
1903         space += tgt_space;
1904         cmsg = CMSG_NXTHDR(msgh, cmsg);
1905         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1906                                          target_cmsg_start);
1907     }
1908     unlock_user(target_cmsg, target_cmsg_addr, space);
1909  the_end:
1910     target_msgh->msg_controllen = tswapal(space);
1911     return 0;
1912 }
1913 
1914 /* do_setsockopt() Must return target values and target errnos. */
1915 static abi_long do_setsockopt(int sockfd, int level, int optname,
1916                               abi_ulong optval_addr, socklen_t optlen)
1917 {
1918     abi_long ret;
1919     int val;
1920     struct ip_mreqn *ip_mreq;
1921     struct ip_mreq_source *ip_mreq_source;
1922 
1923     switch(level) {
1924     case SOL_TCP:
1925         /* TCP options all take an 'int' value.  */
1926         if (optlen < sizeof(uint32_t))
1927             return -TARGET_EINVAL;
1928 
1929         if (get_user_u32(val, optval_addr))
1930             return -TARGET_EFAULT;
1931         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1932         break;
1933     case SOL_IP:
1934         switch(optname) {
1935         case IP_TOS:
1936         case IP_TTL:
1937         case IP_HDRINCL:
1938         case IP_ROUTER_ALERT:
1939         case IP_RECVOPTS:
1940         case IP_RETOPTS:
1941         case IP_PKTINFO:
1942         case IP_MTU_DISCOVER:
1943         case IP_RECVERR:
1944         case IP_RECVTTL:
1945         case IP_RECVTOS:
1946 #ifdef IP_FREEBIND
1947         case IP_FREEBIND:
1948 #endif
1949         case IP_MULTICAST_TTL:
1950         case IP_MULTICAST_LOOP:
1951             val = 0;
1952             if (optlen >= sizeof(uint32_t)) {
1953                 if (get_user_u32(val, optval_addr))
1954                     return -TARGET_EFAULT;
1955             } else if (optlen >= 1) {
1956                 if (get_user_u8(val, optval_addr))
1957                     return -TARGET_EFAULT;
1958             }
1959             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1960             break;
1961         case IP_ADD_MEMBERSHIP:
1962         case IP_DROP_MEMBERSHIP:
1963             if (optlen < sizeof (struct target_ip_mreq) ||
1964                 optlen > sizeof (struct target_ip_mreqn))
1965                 return -TARGET_EINVAL;
1966 
1967             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1968             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1969             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1970             break;
1971 
1972         case IP_BLOCK_SOURCE:
1973         case IP_UNBLOCK_SOURCE:
1974         case IP_ADD_SOURCE_MEMBERSHIP:
1975         case IP_DROP_SOURCE_MEMBERSHIP:
1976             if (optlen != sizeof (struct target_ip_mreq_source))
1977                 return -TARGET_EINVAL;
1978 
1979             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1980             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1981             unlock_user (ip_mreq_source, optval_addr, 0);
1982             break;
1983 
1984         default:
1985             goto unimplemented;
1986         }
1987         break;
1988     case SOL_IPV6:
1989         switch (optname) {
1990         case IPV6_MTU_DISCOVER:
1991         case IPV6_MTU:
1992         case IPV6_V6ONLY:
1993         case IPV6_RECVPKTINFO:
1994         case IPV6_UNICAST_HOPS:
1995         case IPV6_MULTICAST_HOPS:
1996         case IPV6_MULTICAST_LOOP:
1997         case IPV6_RECVERR:
1998         case IPV6_RECVHOPLIMIT:
1999         case IPV6_2292HOPLIMIT:
2000         case IPV6_CHECKSUM:
2001         case IPV6_ADDRFORM:
2002         case IPV6_2292PKTINFO:
2003         case IPV6_RECVTCLASS:
2004         case IPV6_RECVRTHDR:
2005         case IPV6_2292RTHDR:
2006         case IPV6_RECVHOPOPTS:
2007         case IPV6_2292HOPOPTS:
2008         case IPV6_RECVDSTOPTS:
2009         case IPV6_2292DSTOPTS:
2010         case IPV6_TCLASS:
2011 #ifdef IPV6_RECVPATHMTU
2012         case IPV6_RECVPATHMTU:
2013 #endif
2014 #ifdef IPV6_TRANSPARENT
2015         case IPV6_TRANSPARENT:
2016 #endif
2017 #ifdef IPV6_FREEBIND
2018         case IPV6_FREEBIND:
2019 #endif
2020 #ifdef IPV6_RECVORIGDSTADDR
2021         case IPV6_RECVORIGDSTADDR:
2022 #endif
2023             val = 0;
2024             if (optlen < sizeof(uint32_t)) {
2025                 return -TARGET_EINVAL;
2026             }
2027             if (get_user_u32(val, optval_addr)) {
2028                 return -TARGET_EFAULT;
2029             }
2030             ret = get_errno(setsockopt(sockfd, level, optname,
2031                                        &val, sizeof(val)));
2032             break;
2033         case IPV6_PKTINFO:
2034         {
2035             struct in6_pktinfo pki;
2036 
2037             if (optlen < sizeof(pki)) {
2038                 return -TARGET_EINVAL;
2039             }
2040 
2041             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2042                 return -TARGET_EFAULT;
2043             }
2044 
2045             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2046 
2047             ret = get_errno(setsockopt(sockfd, level, optname,
2048                                        &pki, sizeof(pki)));
2049             break;
2050         }
2051         case IPV6_ADD_MEMBERSHIP:
2052         case IPV6_DROP_MEMBERSHIP:
2053         {
2054             struct ipv6_mreq ipv6mreq;
2055 
2056             if (optlen < sizeof(ipv6mreq)) {
2057                 return -TARGET_EINVAL;
2058             }
2059 
2060             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2061                 return -TARGET_EFAULT;
2062             }
2063 
2064             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2065 
2066             ret = get_errno(setsockopt(sockfd, level, optname,
2067                                        &ipv6mreq, sizeof(ipv6mreq)));
2068             break;
2069         }
2070         default:
2071             goto unimplemented;
2072         }
2073         break;
2074     case SOL_ICMPV6:
2075         switch (optname) {
2076         case ICMPV6_FILTER:
2077         {
2078             struct icmp6_filter icmp6f;
2079 
2080             if (optlen > sizeof(icmp6f)) {
2081                 optlen = sizeof(icmp6f);
2082             }
2083 
2084             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2085                 return -TARGET_EFAULT;
2086             }
2087 
2088             for (val = 0; val < 8; val++) {
2089                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2090             }
2091 
2092             ret = get_errno(setsockopt(sockfd, level, optname,
2093                                        &icmp6f, optlen));
2094             break;
2095         }
2096         default:
2097             goto unimplemented;
2098         }
2099         break;
2100     case SOL_RAW:
2101         switch (optname) {
2102         case ICMP_FILTER:
2103         case IPV6_CHECKSUM:
2104             /* those take an u32 value */
2105             if (optlen < sizeof(uint32_t)) {
2106                 return -TARGET_EINVAL;
2107             }
2108 
2109             if (get_user_u32(val, optval_addr)) {
2110                 return -TARGET_EFAULT;
2111             }
2112             ret = get_errno(setsockopt(sockfd, level, optname,
2113                                        &val, sizeof(val)));
2114             break;
2115 
2116         default:
2117             goto unimplemented;
2118         }
2119         break;
2120 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2121     case SOL_ALG:
2122         switch (optname) {
2123         case ALG_SET_KEY:
2124         {
2125             char *alg_key = g_malloc(optlen);
2126 
2127             if (!alg_key) {
2128                 return -TARGET_ENOMEM;
2129             }
2130             if (copy_from_user(alg_key, optval_addr, optlen)) {
2131                 g_free(alg_key);
2132                 return -TARGET_EFAULT;
2133             }
2134             ret = get_errno(setsockopt(sockfd, level, optname,
2135                                        alg_key, optlen));
2136             g_free(alg_key);
2137             break;
2138         }
2139         case ALG_SET_AEAD_AUTHSIZE:
2140         {
2141             ret = get_errno(setsockopt(sockfd, level, optname,
2142                                        NULL, optlen));
2143             break;
2144         }
2145         default:
2146             goto unimplemented;
2147         }
2148         break;
2149 #endif
2150     case TARGET_SOL_SOCKET:
2151         switch (optname) {
2152         case TARGET_SO_RCVTIMEO:
2153         {
2154                 struct timeval tv;
2155 
2156                 optname = SO_RCVTIMEO;
2157 
2158 set_timeout:
2159                 if (optlen != sizeof(struct target_timeval)) {
2160                     return -TARGET_EINVAL;
2161                 }
2162 
2163                 if (copy_from_user_timeval(&tv, optval_addr)) {
2164                     return -TARGET_EFAULT;
2165                 }
2166 
2167                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2168                                 &tv, sizeof(tv)));
2169                 return ret;
2170         }
2171         case TARGET_SO_SNDTIMEO:
2172                 optname = SO_SNDTIMEO;
2173                 goto set_timeout;
2174         case TARGET_SO_ATTACH_FILTER:
2175         {
2176                 struct target_sock_fprog *tfprog;
2177                 struct target_sock_filter *tfilter;
2178                 struct sock_fprog fprog;
2179                 struct sock_filter *filter;
2180                 int i;
2181 
2182                 if (optlen != sizeof(*tfprog)) {
2183                     return -TARGET_EINVAL;
2184                 }
2185                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2186                     return -TARGET_EFAULT;
2187                 }
2188                 if (!lock_user_struct(VERIFY_READ, tfilter,
2189                                       tswapal(tfprog->filter), 0)) {
2190                     unlock_user_struct(tfprog, optval_addr, 1);
2191                     return -TARGET_EFAULT;
2192                 }
2193 
2194                 fprog.len = tswap16(tfprog->len);
2195                 filter = g_try_new(struct sock_filter, fprog.len);
2196                 if (filter == NULL) {
2197                     unlock_user_struct(tfilter, tfprog->filter, 1);
2198                     unlock_user_struct(tfprog, optval_addr, 1);
2199                     return -TARGET_ENOMEM;
2200                 }
2201                 for (i = 0; i < fprog.len; i++) {
2202                     filter[i].code = tswap16(tfilter[i].code);
2203                     filter[i].jt = tfilter[i].jt;
2204                     filter[i].jf = tfilter[i].jf;
2205                     filter[i].k = tswap32(tfilter[i].k);
2206                 }
2207                 fprog.filter = filter;
2208 
2209                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2210                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2211                 g_free(filter);
2212 
2213                 unlock_user_struct(tfilter, tfprog->filter, 1);
2214                 unlock_user_struct(tfprog, optval_addr, 1);
2215                 return ret;
2216         }
2217 	case TARGET_SO_BINDTODEVICE:
2218 	{
2219 		char *dev_ifname, *addr_ifname;
2220 
2221 		if (optlen > IFNAMSIZ - 1) {
2222 		    optlen = IFNAMSIZ - 1;
2223 		}
2224 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2225 		if (!dev_ifname) {
2226 		    return -TARGET_EFAULT;
2227 		}
2228 		optname = SO_BINDTODEVICE;
2229 		addr_ifname = alloca(IFNAMSIZ);
2230 		memcpy(addr_ifname, dev_ifname, optlen);
2231 		addr_ifname[optlen] = 0;
2232 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2233                                            addr_ifname, optlen));
2234 		unlock_user (dev_ifname, optval_addr, 0);
2235 		return ret;
2236 	}
2237         case TARGET_SO_LINGER:
2238         {
2239                 struct linger lg;
2240                 struct target_linger *tlg;
2241 
2242                 if (optlen != sizeof(struct target_linger)) {
2243                     return -TARGET_EINVAL;
2244                 }
2245                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2246                     return -TARGET_EFAULT;
2247                 }
2248                 __get_user(lg.l_onoff, &tlg->l_onoff);
2249                 __get_user(lg.l_linger, &tlg->l_linger);
2250                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2251                                 &lg, sizeof(lg)));
2252                 unlock_user_struct(tlg, optval_addr, 0);
2253                 return ret;
2254         }
2255             /* Options with 'int' argument.  */
2256         case TARGET_SO_DEBUG:
2257 		optname = SO_DEBUG;
2258 		break;
2259         case TARGET_SO_REUSEADDR:
2260 		optname = SO_REUSEADDR;
2261 		break;
2262 #ifdef SO_REUSEPORT
2263         case TARGET_SO_REUSEPORT:
2264                 optname = SO_REUSEPORT;
2265                 break;
2266 #endif
2267         case TARGET_SO_TYPE:
2268 		optname = SO_TYPE;
2269 		break;
2270         case TARGET_SO_ERROR:
2271 		optname = SO_ERROR;
2272 		break;
2273         case TARGET_SO_DONTROUTE:
2274 		optname = SO_DONTROUTE;
2275 		break;
2276         case TARGET_SO_BROADCAST:
2277 		optname = SO_BROADCAST;
2278 		break;
2279         case TARGET_SO_SNDBUF:
2280 		optname = SO_SNDBUF;
2281 		break;
2282         case TARGET_SO_SNDBUFFORCE:
2283                 optname = SO_SNDBUFFORCE;
2284                 break;
2285         case TARGET_SO_RCVBUF:
2286 		optname = SO_RCVBUF;
2287 		break;
2288         case TARGET_SO_RCVBUFFORCE:
2289                 optname = SO_RCVBUFFORCE;
2290                 break;
2291         case TARGET_SO_KEEPALIVE:
2292 		optname = SO_KEEPALIVE;
2293 		break;
2294         case TARGET_SO_OOBINLINE:
2295 		optname = SO_OOBINLINE;
2296 		break;
2297         case TARGET_SO_NO_CHECK:
2298 		optname = SO_NO_CHECK;
2299 		break;
2300         case TARGET_SO_PRIORITY:
2301 		optname = SO_PRIORITY;
2302 		break;
2303 #ifdef SO_BSDCOMPAT
2304         case TARGET_SO_BSDCOMPAT:
2305 		optname = SO_BSDCOMPAT;
2306 		break;
2307 #endif
2308         case TARGET_SO_PASSCRED:
2309 		optname = SO_PASSCRED;
2310 		break;
2311         case TARGET_SO_PASSSEC:
2312                 optname = SO_PASSSEC;
2313                 break;
2314         case TARGET_SO_TIMESTAMP:
2315 		optname = SO_TIMESTAMP;
2316 		break;
2317         case TARGET_SO_RCVLOWAT:
2318 		optname = SO_RCVLOWAT;
2319 		break;
2320         default:
2321             goto unimplemented;
2322         }
2323 	if (optlen < sizeof(uint32_t))
2324             return -TARGET_EINVAL;
2325 
2326 	if (get_user_u32(val, optval_addr))
2327             return -TARGET_EFAULT;
2328 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2329         break;
2330 #ifdef SOL_NETLINK
2331     case SOL_NETLINK:
2332         switch (optname) {
2333         case NETLINK_PKTINFO:
2334         case NETLINK_ADD_MEMBERSHIP:
2335         case NETLINK_DROP_MEMBERSHIP:
2336         case NETLINK_BROADCAST_ERROR:
2337         case NETLINK_NO_ENOBUFS:
2338 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2339         case NETLINK_LISTEN_ALL_NSID:
2340         case NETLINK_CAP_ACK:
2341 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2342 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2343         case NETLINK_EXT_ACK:
2344 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2345 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2346         case NETLINK_GET_STRICT_CHK:
2347 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2348             break;
2349         default:
2350             goto unimplemented;
2351         }
2352         val = 0;
2353         if (optlen < sizeof(uint32_t)) {
2354             return -TARGET_EINVAL;
2355         }
2356         if (get_user_u32(val, optval_addr)) {
2357             return -TARGET_EFAULT;
2358         }
2359         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2360                                    sizeof(val)));
2361         break;
2362 #endif /* SOL_NETLINK */
2363     default:
2364     unimplemented:
2365         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2366                       level, optname);
2367         ret = -TARGET_ENOPROTOOPT;
2368     }
2369     return ret;
2370 }
2371 
2372 /* do_getsockopt() Must return target values and target errnos. */
2373 static abi_long do_getsockopt(int sockfd, int level, int optname,
2374                               abi_ulong optval_addr, abi_ulong optlen)
2375 {
2376     abi_long ret;
2377     int len, val;
2378     socklen_t lv;
2379 
2380     switch(level) {
2381     case TARGET_SOL_SOCKET:
2382         level = SOL_SOCKET;
2383         switch (optname) {
2384         /* These don't just return a single integer */
2385         case TARGET_SO_PEERNAME:
2386             goto unimplemented;
2387         case TARGET_SO_RCVTIMEO: {
2388             struct timeval tv;
2389             socklen_t tvlen;
2390 
2391             optname = SO_RCVTIMEO;
2392 
2393 get_timeout:
2394             if (get_user_u32(len, optlen)) {
2395                 return -TARGET_EFAULT;
2396             }
2397             if (len < 0) {
2398                 return -TARGET_EINVAL;
2399             }
2400 
2401             tvlen = sizeof(tv);
2402             ret = get_errno(getsockopt(sockfd, level, optname,
2403                                        &tv, &tvlen));
2404             if (ret < 0) {
2405                 return ret;
2406             }
2407             if (len > sizeof(struct target_timeval)) {
2408                 len = sizeof(struct target_timeval);
2409             }
2410             if (copy_to_user_timeval(optval_addr, &tv)) {
2411                 return -TARGET_EFAULT;
2412             }
2413             if (put_user_u32(len, optlen)) {
2414                 return -TARGET_EFAULT;
2415             }
2416             break;
2417         }
2418         case TARGET_SO_SNDTIMEO:
2419             optname = SO_SNDTIMEO;
2420             goto get_timeout;
2421         case TARGET_SO_PEERCRED: {
2422             struct ucred cr;
2423             socklen_t crlen;
2424             struct target_ucred *tcr;
2425 
2426             if (get_user_u32(len, optlen)) {
2427                 return -TARGET_EFAULT;
2428             }
2429             if (len < 0) {
2430                 return -TARGET_EINVAL;
2431             }
2432 
2433             crlen = sizeof(cr);
2434             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2435                                        &cr, &crlen));
2436             if (ret < 0) {
2437                 return ret;
2438             }
2439             if (len > crlen) {
2440                 len = crlen;
2441             }
2442             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2443                 return -TARGET_EFAULT;
2444             }
2445             __put_user(cr.pid, &tcr->pid);
2446             __put_user(cr.uid, &tcr->uid);
2447             __put_user(cr.gid, &tcr->gid);
2448             unlock_user_struct(tcr, optval_addr, 1);
2449             if (put_user_u32(len, optlen)) {
2450                 return -TARGET_EFAULT;
2451             }
2452             break;
2453         }
2454         case TARGET_SO_PEERSEC: {
2455             char *name;
2456 
2457             if (get_user_u32(len, optlen)) {
2458                 return -TARGET_EFAULT;
2459             }
2460             if (len < 0) {
2461                 return -TARGET_EINVAL;
2462             }
2463             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2464             if (!name) {
2465                 return -TARGET_EFAULT;
2466             }
2467             lv = len;
2468             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2469                                        name, &lv));
2470             if (put_user_u32(lv, optlen)) {
2471                 ret = -TARGET_EFAULT;
2472             }
2473             unlock_user(name, optval_addr, lv);
2474             break;
2475         }
2476         case TARGET_SO_LINGER:
2477         {
2478             struct linger lg;
2479             socklen_t lglen;
2480             struct target_linger *tlg;
2481 
2482             if (get_user_u32(len, optlen)) {
2483                 return -TARGET_EFAULT;
2484             }
2485             if (len < 0) {
2486                 return -TARGET_EINVAL;
2487             }
2488 
2489             lglen = sizeof(lg);
2490             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2491                                        &lg, &lglen));
2492             if (ret < 0) {
2493                 return ret;
2494             }
2495             if (len > lglen) {
2496                 len = lglen;
2497             }
2498             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2499                 return -TARGET_EFAULT;
2500             }
2501             __put_user(lg.l_onoff, &tlg->l_onoff);
2502             __put_user(lg.l_linger, &tlg->l_linger);
2503             unlock_user_struct(tlg, optval_addr, 1);
2504             if (put_user_u32(len, optlen)) {
2505                 return -TARGET_EFAULT;
2506             }
2507             break;
2508         }
2509         /* Options with 'int' argument.  */
2510         case TARGET_SO_DEBUG:
2511             optname = SO_DEBUG;
2512             goto int_case;
2513         case TARGET_SO_REUSEADDR:
2514             optname = SO_REUSEADDR;
2515             goto int_case;
2516 #ifdef SO_REUSEPORT
2517         case TARGET_SO_REUSEPORT:
2518             optname = SO_REUSEPORT;
2519             goto int_case;
2520 #endif
2521         case TARGET_SO_TYPE:
2522             optname = SO_TYPE;
2523             goto int_case;
2524         case TARGET_SO_ERROR:
2525             optname = SO_ERROR;
2526             goto int_case;
2527         case TARGET_SO_DONTROUTE:
2528             optname = SO_DONTROUTE;
2529             goto int_case;
2530         case TARGET_SO_BROADCAST:
2531             optname = SO_BROADCAST;
2532             goto int_case;
2533         case TARGET_SO_SNDBUF:
2534             optname = SO_SNDBUF;
2535             goto int_case;
2536         case TARGET_SO_RCVBUF:
2537             optname = SO_RCVBUF;
2538             goto int_case;
2539         case TARGET_SO_KEEPALIVE:
2540             optname = SO_KEEPALIVE;
2541             goto int_case;
2542         case TARGET_SO_OOBINLINE:
2543             optname = SO_OOBINLINE;
2544             goto int_case;
2545         case TARGET_SO_NO_CHECK:
2546             optname = SO_NO_CHECK;
2547             goto int_case;
2548         case TARGET_SO_PRIORITY:
2549             optname = SO_PRIORITY;
2550             goto int_case;
2551 #ifdef SO_BSDCOMPAT
2552         case TARGET_SO_BSDCOMPAT:
2553             optname = SO_BSDCOMPAT;
2554             goto int_case;
2555 #endif
2556         case TARGET_SO_PASSCRED:
2557             optname = SO_PASSCRED;
2558             goto int_case;
2559         case TARGET_SO_TIMESTAMP:
2560             optname = SO_TIMESTAMP;
2561             goto int_case;
2562         case TARGET_SO_RCVLOWAT:
2563             optname = SO_RCVLOWAT;
2564             goto int_case;
2565         case TARGET_SO_ACCEPTCONN:
2566             optname = SO_ACCEPTCONN;
2567             goto int_case;
2568         default:
2569             goto int_case;
2570         }
2571         break;
2572     case SOL_TCP:
2573         /* TCP options all take an 'int' value.  */
2574     int_case:
2575         if (get_user_u32(len, optlen))
2576             return -TARGET_EFAULT;
2577         if (len < 0)
2578             return -TARGET_EINVAL;
2579         lv = sizeof(lv);
2580         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2581         if (ret < 0)
2582             return ret;
2583         if (optname == SO_TYPE) {
2584             val = host_to_target_sock_type(val);
2585         }
2586         if (len > lv)
2587             len = lv;
2588         if (len == 4) {
2589             if (put_user_u32(val, optval_addr))
2590                 return -TARGET_EFAULT;
2591         } else {
2592             if (put_user_u8(val, optval_addr))
2593                 return -TARGET_EFAULT;
2594         }
2595         if (put_user_u32(len, optlen))
2596             return -TARGET_EFAULT;
2597         break;
2598     case SOL_IP:
2599         switch(optname) {
2600         case IP_TOS:
2601         case IP_TTL:
2602         case IP_HDRINCL:
2603         case IP_ROUTER_ALERT:
2604         case IP_RECVOPTS:
2605         case IP_RETOPTS:
2606         case IP_PKTINFO:
2607         case IP_MTU_DISCOVER:
2608         case IP_RECVERR:
2609         case IP_RECVTOS:
2610 #ifdef IP_FREEBIND
2611         case IP_FREEBIND:
2612 #endif
2613         case IP_MULTICAST_TTL:
2614         case IP_MULTICAST_LOOP:
2615             if (get_user_u32(len, optlen))
2616                 return -TARGET_EFAULT;
2617             if (len < 0)
2618                 return -TARGET_EINVAL;
2619             lv = sizeof(lv);
2620             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2621             if (ret < 0)
2622                 return ret;
2623             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2624                 len = 1;
2625                 if (put_user_u32(len, optlen)
2626                     || put_user_u8(val, optval_addr))
2627                     return -TARGET_EFAULT;
2628             } else {
2629                 if (len > sizeof(int))
2630                     len = sizeof(int);
2631                 if (put_user_u32(len, optlen)
2632                     || put_user_u32(val, optval_addr))
2633                     return -TARGET_EFAULT;
2634             }
2635             break;
2636         default:
2637             ret = -TARGET_ENOPROTOOPT;
2638             break;
2639         }
2640         break;
2641     case SOL_IPV6:
2642         switch (optname) {
2643         case IPV6_MTU_DISCOVER:
2644         case IPV6_MTU:
2645         case IPV6_V6ONLY:
2646         case IPV6_RECVPKTINFO:
2647         case IPV6_UNICAST_HOPS:
2648         case IPV6_MULTICAST_HOPS:
2649         case IPV6_MULTICAST_LOOP:
2650         case IPV6_RECVERR:
2651         case IPV6_RECVHOPLIMIT:
2652         case IPV6_2292HOPLIMIT:
2653         case IPV6_CHECKSUM:
2654         case IPV6_ADDRFORM:
2655         case IPV6_2292PKTINFO:
2656         case IPV6_RECVTCLASS:
2657         case IPV6_RECVRTHDR:
2658         case IPV6_2292RTHDR:
2659         case IPV6_RECVHOPOPTS:
2660         case IPV6_2292HOPOPTS:
2661         case IPV6_RECVDSTOPTS:
2662         case IPV6_2292DSTOPTS:
2663         case IPV6_TCLASS:
2664 #ifdef IPV6_RECVPATHMTU
2665         case IPV6_RECVPATHMTU:
2666 #endif
2667 #ifdef IPV6_TRANSPARENT
2668         case IPV6_TRANSPARENT:
2669 #endif
2670 #ifdef IPV6_FREEBIND
2671         case IPV6_FREEBIND:
2672 #endif
2673 #ifdef IPV6_RECVORIGDSTADDR
2674         case IPV6_RECVORIGDSTADDR:
2675 #endif
2676             if (get_user_u32(len, optlen))
2677                 return -TARGET_EFAULT;
2678             if (len < 0)
2679                 return -TARGET_EINVAL;
2680             lv = sizeof(lv);
2681             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2682             if (ret < 0)
2683                 return ret;
2684             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2685                 len = 1;
2686                 if (put_user_u32(len, optlen)
2687                     || put_user_u8(val, optval_addr))
2688                     return -TARGET_EFAULT;
2689             } else {
2690                 if (len > sizeof(int))
2691                     len = sizeof(int);
2692                 if (put_user_u32(len, optlen)
2693                     || put_user_u32(val, optval_addr))
2694                     return -TARGET_EFAULT;
2695             }
2696             break;
2697         default:
2698             ret = -TARGET_ENOPROTOOPT;
2699             break;
2700         }
2701         break;
2702 #ifdef SOL_NETLINK
2703     case SOL_NETLINK:
2704         switch (optname) {
2705         case NETLINK_PKTINFO:
2706         case NETLINK_BROADCAST_ERROR:
2707         case NETLINK_NO_ENOBUFS:
2708 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2709         case NETLINK_LISTEN_ALL_NSID:
2710         case NETLINK_CAP_ACK:
2711 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2712 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2713         case NETLINK_EXT_ACK:
2714 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2715 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2716         case NETLINK_GET_STRICT_CHK:
2717 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2718             if (get_user_u32(len, optlen)) {
2719                 return -TARGET_EFAULT;
2720             }
2721             if (len != sizeof(val)) {
2722                 return -TARGET_EINVAL;
2723             }
2724             lv = len;
2725             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2726             if (ret < 0) {
2727                 return ret;
2728             }
2729             if (put_user_u32(lv, optlen)
2730                 || put_user_u32(val, optval_addr)) {
2731                 return -TARGET_EFAULT;
2732             }
2733             break;
2734 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2735         case NETLINK_LIST_MEMBERSHIPS:
2736         {
2737             uint32_t *results;
2738             int i;
2739             if (get_user_u32(len, optlen)) {
2740                 return -TARGET_EFAULT;
2741             }
2742             if (len < 0) {
2743                 return -TARGET_EINVAL;
2744             }
2745             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2746             if (!results) {
2747                 return -TARGET_EFAULT;
2748             }
2749             lv = len;
2750             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2751             if (ret < 0) {
2752                 unlock_user(results, optval_addr, 0);
2753                 return ret;
2754             }
2755             /* swap host endianess to target endianess. */
2756             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2757                 results[i] = tswap32(results[i]);
2758             }
2759             if (put_user_u32(lv, optlen)) {
2760                 return -TARGET_EFAULT;
2761             }
2762             unlock_user(results, optval_addr, 0);
2763             break;
2764         }
2765 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2766         default:
2767             goto unimplemented;
2768         }
2769         break;
2770 #endif /* SOL_NETLINK */
2771     default:
2772     unimplemented:
2773         qemu_log_mask(LOG_UNIMP,
2774                       "getsockopt level=%d optname=%d not yet supported\n",
2775                       level, optname);
2776         ret = -TARGET_EOPNOTSUPP;
2777         break;
2778     }
2779     return ret;
2780 }
2781 
2782 /* Convert target low/high pair representing file offset into the host
2783  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2784  * as the kernel doesn't handle them either.
2785  */
2786 static void target_to_host_low_high(abi_ulong tlow,
2787                                     abi_ulong thigh,
2788                                     unsigned long *hlow,
2789                                     unsigned long *hhigh)
2790 {
2791     uint64_t off = tlow |
2792         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2793         TARGET_LONG_BITS / 2;
2794 
2795     *hlow = off;
2796     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2797 }
2798 
2799 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2800                                 abi_ulong count, int copy)
2801 {
2802     struct target_iovec *target_vec;
2803     struct iovec *vec;
2804     abi_ulong total_len, max_len;
2805     int i;
2806     int err = 0;
2807     bool bad_address = false;
2808 
2809     if (count == 0) {
2810         errno = 0;
2811         return NULL;
2812     }
2813     if (count > IOV_MAX) {
2814         errno = EINVAL;
2815         return NULL;
2816     }
2817 
2818     vec = g_try_new0(struct iovec, count);
2819     if (vec == NULL) {
2820         errno = ENOMEM;
2821         return NULL;
2822     }
2823 
2824     target_vec = lock_user(VERIFY_READ, target_addr,
2825                            count * sizeof(struct target_iovec), 1);
2826     if (target_vec == NULL) {
2827         err = EFAULT;
2828         goto fail2;
2829     }
2830 
2831     /* ??? If host page size > target page size, this will result in a
2832        value larger than what we can actually support.  */
2833     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2834     total_len = 0;
2835 
2836     for (i = 0; i < count; i++) {
2837         abi_ulong base = tswapal(target_vec[i].iov_base);
2838         abi_long len = tswapal(target_vec[i].iov_len);
2839 
2840         if (len < 0) {
2841             err = EINVAL;
2842             goto fail;
2843         } else if (len == 0) {
2844             /* Zero length pointer is ignored.  */
2845             vec[i].iov_base = 0;
2846         } else {
2847             vec[i].iov_base = lock_user(type, base, len, copy);
2848             /* If the first buffer pointer is bad, this is a fault.  But
2849              * subsequent bad buffers will result in a partial write; this
2850              * is realized by filling the vector with null pointers and
2851              * zero lengths. */
2852             if (!vec[i].iov_base) {
2853                 if (i == 0) {
2854                     err = EFAULT;
2855                     goto fail;
2856                 } else {
2857                     bad_address = true;
2858                 }
2859             }
2860             if (bad_address) {
2861                 len = 0;
2862             }
2863             if (len > max_len - total_len) {
2864                 len = max_len - total_len;
2865             }
2866         }
2867         vec[i].iov_len = len;
2868         total_len += len;
2869     }
2870 
2871     unlock_user(target_vec, target_addr, 0);
2872     return vec;
2873 
2874  fail:
2875     while (--i >= 0) {
2876         if (tswapal(target_vec[i].iov_len) > 0) {
2877             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2878         }
2879     }
2880     unlock_user(target_vec, target_addr, 0);
2881  fail2:
2882     g_free(vec);
2883     errno = err;
2884     return NULL;
2885 }
2886 
2887 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2888                          abi_ulong count, int copy)
2889 {
2890     struct target_iovec *target_vec;
2891     int i;
2892 
2893     target_vec = lock_user(VERIFY_READ, target_addr,
2894                            count * sizeof(struct target_iovec), 1);
2895     if (target_vec) {
2896         for (i = 0; i < count; i++) {
2897             abi_ulong base = tswapal(target_vec[i].iov_base);
2898             abi_long len = tswapal(target_vec[i].iov_len);
2899             if (len < 0) {
2900                 break;
2901             }
2902             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2903         }
2904         unlock_user(target_vec, target_addr, 0);
2905     }
2906 
2907     g_free(vec);
2908 }
2909 
2910 static inline int target_to_host_sock_type(int *type)
2911 {
2912     int host_type = 0;
2913     int target_type = *type;
2914 
2915     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2916     case TARGET_SOCK_DGRAM:
2917         host_type = SOCK_DGRAM;
2918         break;
2919     case TARGET_SOCK_STREAM:
2920         host_type = SOCK_STREAM;
2921         break;
2922     default:
2923         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2924         break;
2925     }
2926     if (target_type & TARGET_SOCK_CLOEXEC) {
2927 #if defined(SOCK_CLOEXEC)
2928         host_type |= SOCK_CLOEXEC;
2929 #else
2930         return -TARGET_EINVAL;
2931 #endif
2932     }
2933     if (target_type & TARGET_SOCK_NONBLOCK) {
2934 #if defined(SOCK_NONBLOCK)
2935         host_type |= SOCK_NONBLOCK;
2936 #elif !defined(O_NONBLOCK)
2937         return -TARGET_EINVAL;
2938 #endif
2939     }
2940     *type = host_type;
2941     return 0;
2942 }
2943 
2944 /* Try to emulate socket type flags after socket creation.  */
2945 static int sock_flags_fixup(int fd, int target_type)
2946 {
2947 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2948     if (target_type & TARGET_SOCK_NONBLOCK) {
2949         int flags = fcntl(fd, F_GETFL);
2950         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2951             close(fd);
2952             return -TARGET_EINVAL;
2953         }
2954     }
2955 #endif
2956     return fd;
2957 }
2958 
2959 /* do_socket() Must return target values and target errnos. */
2960 static abi_long do_socket(int domain, int type, int protocol)
2961 {
2962     int target_type = type;
2963     int ret;
2964 
2965     ret = target_to_host_sock_type(&type);
2966     if (ret) {
2967         return ret;
2968     }
2969 
2970     if (domain == PF_NETLINK && !(
2971 #ifdef CONFIG_RTNETLINK
2972          protocol == NETLINK_ROUTE ||
2973 #endif
2974          protocol == NETLINK_KOBJECT_UEVENT ||
2975          protocol == NETLINK_AUDIT)) {
2976         return -EPFNOSUPPORT;
2977     }
2978 
2979     if (domain == AF_PACKET ||
2980         (domain == AF_INET && type == SOCK_PACKET)) {
2981         protocol = tswap16(protocol);
2982     }
2983 
2984     ret = get_errno(socket(domain, type, protocol));
2985     if (ret >= 0) {
2986         ret = sock_flags_fixup(ret, target_type);
2987         if (type == SOCK_PACKET) {
2988             /* Manage an obsolete case :
2989              * if socket type is SOCK_PACKET, bind by name
2990              */
2991             fd_trans_register(ret, &target_packet_trans);
2992         } else if (domain == PF_NETLINK) {
2993             switch (protocol) {
2994 #ifdef CONFIG_RTNETLINK
2995             case NETLINK_ROUTE:
2996                 fd_trans_register(ret, &target_netlink_route_trans);
2997                 break;
2998 #endif
2999             case NETLINK_KOBJECT_UEVENT:
3000                 /* nothing to do: messages are strings */
3001                 break;
3002             case NETLINK_AUDIT:
3003                 fd_trans_register(ret, &target_netlink_audit_trans);
3004                 break;
3005             default:
3006                 g_assert_not_reached();
3007             }
3008         }
3009     }
3010     return ret;
3011 }
3012 
3013 /* do_bind() Must return target values and target errnos. */
3014 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3015                         socklen_t addrlen)
3016 {
3017     void *addr;
3018     abi_long ret;
3019 
3020     if ((int)addrlen < 0) {
3021         return -TARGET_EINVAL;
3022     }
3023 
3024     addr = alloca(addrlen+1);
3025 
3026     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3027     if (ret)
3028         return ret;
3029 
3030     return get_errno(bind(sockfd, addr, addrlen));
3031 }
3032 
3033 /* do_connect() Must return target values and target errnos. */
3034 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3035                            socklen_t addrlen)
3036 {
3037     void *addr;
3038     abi_long ret;
3039 
3040     if ((int)addrlen < 0) {
3041         return -TARGET_EINVAL;
3042     }
3043 
3044     addr = alloca(addrlen+1);
3045 
3046     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3047     if (ret)
3048         return ret;
3049 
3050     return get_errno(safe_connect(sockfd, addr, addrlen));
3051 }
3052 
3053 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3054 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3055                                       int flags, int send)
3056 {
3057     abi_long ret, len;
3058     struct msghdr msg;
3059     abi_ulong count;
3060     struct iovec *vec;
3061     abi_ulong target_vec;
3062 
3063     if (msgp->msg_name) {
3064         msg.msg_namelen = tswap32(msgp->msg_namelen);
3065         msg.msg_name = alloca(msg.msg_namelen+1);
3066         ret = target_to_host_sockaddr(fd, msg.msg_name,
3067                                       tswapal(msgp->msg_name),
3068                                       msg.msg_namelen);
3069         if (ret == -TARGET_EFAULT) {
3070             /* For connected sockets msg_name and msg_namelen must
3071              * be ignored, so returning EFAULT immediately is wrong.
3072              * Instead, pass a bad msg_name to the host kernel, and
3073              * let it decide whether to return EFAULT or not.
3074              */
3075             msg.msg_name = (void *)-1;
3076         } else if (ret) {
3077             goto out2;
3078         }
3079     } else {
3080         msg.msg_name = NULL;
3081         msg.msg_namelen = 0;
3082     }
3083     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3084     msg.msg_control = alloca(msg.msg_controllen);
3085     memset(msg.msg_control, 0, msg.msg_controllen);
3086 
3087     msg.msg_flags = tswap32(msgp->msg_flags);
3088 
3089     count = tswapal(msgp->msg_iovlen);
3090     target_vec = tswapal(msgp->msg_iov);
3091 
3092     if (count > IOV_MAX) {
3093         /* sendrcvmsg returns a different errno for this condition than
3094          * readv/writev, so we must catch it here before lock_iovec() does.
3095          */
3096         ret = -TARGET_EMSGSIZE;
3097         goto out2;
3098     }
3099 
3100     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3101                      target_vec, count, send);
3102     if (vec == NULL) {
3103         ret = -host_to_target_errno(errno);
3104         goto out2;
3105     }
3106     msg.msg_iovlen = count;
3107     msg.msg_iov = vec;
3108 
3109     if (send) {
3110         if (fd_trans_target_to_host_data(fd)) {
3111             void *host_msg;
3112 
3113             host_msg = g_malloc(msg.msg_iov->iov_len);
3114             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3115             ret = fd_trans_target_to_host_data(fd)(host_msg,
3116                                                    msg.msg_iov->iov_len);
3117             if (ret >= 0) {
3118                 msg.msg_iov->iov_base = host_msg;
3119                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3120             }
3121             g_free(host_msg);
3122         } else {
3123             ret = target_to_host_cmsg(&msg, msgp);
3124             if (ret == 0) {
3125                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3126             }
3127         }
3128     } else {
3129         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3130         if (!is_error(ret)) {
3131             len = ret;
3132             if (fd_trans_host_to_target_data(fd)) {
3133                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3134                                                MIN(msg.msg_iov->iov_len, len));
3135             } else {
3136                 ret = host_to_target_cmsg(msgp, &msg);
3137             }
3138             if (!is_error(ret)) {
3139                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3140                 msgp->msg_flags = tswap32(msg.msg_flags);
3141                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3142                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3143                                     msg.msg_name, msg.msg_namelen);
3144                     if (ret) {
3145                         goto out;
3146                     }
3147                 }
3148 
3149                 ret = len;
3150             }
3151         }
3152     }
3153 
3154 out:
3155     unlock_iovec(vec, target_vec, count, !send);
3156 out2:
3157     return ret;
3158 }
3159 
3160 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3161                                int flags, int send)
3162 {
3163     abi_long ret;
3164     struct target_msghdr *msgp;
3165 
3166     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3167                           msgp,
3168                           target_msg,
3169                           send ? 1 : 0)) {
3170         return -TARGET_EFAULT;
3171     }
3172     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3173     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3174     return ret;
3175 }
3176 
3177 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3178  * so it might not have this *mmsg-specific flag either.
3179  */
3180 #ifndef MSG_WAITFORONE
3181 #define MSG_WAITFORONE 0x10000
3182 #endif
3183 
3184 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3185                                 unsigned int vlen, unsigned int flags,
3186                                 int send)
3187 {
3188     struct target_mmsghdr *mmsgp;
3189     abi_long ret = 0;
3190     int i;
3191 
3192     if (vlen > UIO_MAXIOV) {
3193         vlen = UIO_MAXIOV;
3194     }
3195 
3196     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3197     if (!mmsgp) {
3198         return -TARGET_EFAULT;
3199     }
3200 
3201     for (i = 0; i < vlen; i++) {
3202         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3203         if (is_error(ret)) {
3204             break;
3205         }
3206         mmsgp[i].msg_len = tswap32(ret);
3207         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3208         if (flags & MSG_WAITFORONE) {
3209             flags |= MSG_DONTWAIT;
3210         }
3211     }
3212 
3213     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3214 
3215     /* Return number of datagrams sent if we sent any at all;
3216      * otherwise return the error.
3217      */
3218     if (i) {
3219         return i;
3220     }
3221     return ret;
3222 }
3223 
3224 /* do_accept4() Must return target values and target errnos. */
3225 static abi_long do_accept4(int fd, abi_ulong target_addr,
3226                            abi_ulong target_addrlen_addr, int flags)
3227 {
3228     socklen_t addrlen, ret_addrlen;
3229     void *addr;
3230     abi_long ret;
3231     int host_flags;
3232 
3233     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3234 
3235     if (target_addr == 0) {
3236         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3237     }
3238 
3239     /* linux returns EINVAL if addrlen pointer is invalid */
3240     if (get_user_u32(addrlen, target_addrlen_addr))
3241         return -TARGET_EINVAL;
3242 
3243     if ((int)addrlen < 0) {
3244         return -TARGET_EINVAL;
3245     }
3246 
3247     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3248         return -TARGET_EINVAL;
3249 
3250     addr = alloca(addrlen);
3251 
3252     ret_addrlen = addrlen;
3253     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3254     if (!is_error(ret)) {
3255         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3256         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3257             ret = -TARGET_EFAULT;
3258         }
3259     }
3260     return ret;
3261 }
3262 
3263 /* do_getpeername() Must return target values and target errnos. */
3264 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3265                                abi_ulong target_addrlen_addr)
3266 {
3267     socklen_t addrlen, ret_addrlen;
3268     void *addr;
3269     abi_long ret;
3270 
3271     if (get_user_u32(addrlen, target_addrlen_addr))
3272         return -TARGET_EFAULT;
3273 
3274     if ((int)addrlen < 0) {
3275         return -TARGET_EINVAL;
3276     }
3277 
3278     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3279         return -TARGET_EFAULT;
3280 
3281     addr = alloca(addrlen);
3282 
3283     ret_addrlen = addrlen;
3284     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3285     if (!is_error(ret)) {
3286         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3287         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3288             ret = -TARGET_EFAULT;
3289         }
3290     }
3291     return ret;
3292 }
3293 
3294 /* do_getsockname() Must return target values and target errnos. */
3295 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3296                                abi_ulong target_addrlen_addr)
3297 {
3298     socklen_t addrlen, ret_addrlen;
3299     void *addr;
3300     abi_long ret;
3301 
3302     if (get_user_u32(addrlen, target_addrlen_addr))
3303         return -TARGET_EFAULT;
3304 
3305     if ((int)addrlen < 0) {
3306         return -TARGET_EINVAL;
3307     }
3308 
3309     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3310         return -TARGET_EFAULT;
3311 
3312     addr = alloca(addrlen);
3313 
3314     ret_addrlen = addrlen;
3315     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3316     if (!is_error(ret)) {
3317         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3318         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3319             ret = -TARGET_EFAULT;
3320         }
3321     }
3322     return ret;
3323 }
3324 
3325 /* do_socketpair() Must return target values and target errnos. */
3326 static abi_long do_socketpair(int domain, int type, int protocol,
3327                               abi_ulong target_tab_addr)
3328 {
3329     int tab[2];
3330     abi_long ret;
3331 
3332     target_to_host_sock_type(&type);
3333 
3334     ret = get_errno(socketpair(domain, type, protocol, tab));
3335     if (!is_error(ret)) {
3336         if (put_user_s32(tab[0], target_tab_addr)
3337             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3338             ret = -TARGET_EFAULT;
3339     }
3340     return ret;
3341 }
3342 
3343 /* do_sendto() Must return target values and target errnos. */
3344 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3345                           abi_ulong target_addr, socklen_t addrlen)
3346 {
3347     void *addr;
3348     void *host_msg;
3349     void *copy_msg = NULL;
3350     abi_long ret;
3351 
3352     if ((int)addrlen < 0) {
3353         return -TARGET_EINVAL;
3354     }
3355 
3356     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3357     if (!host_msg)
3358         return -TARGET_EFAULT;
3359     if (fd_trans_target_to_host_data(fd)) {
3360         copy_msg = host_msg;
3361         host_msg = g_malloc(len);
3362         memcpy(host_msg, copy_msg, len);
3363         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3364         if (ret < 0) {
3365             goto fail;
3366         }
3367     }
3368     if (target_addr) {
3369         addr = alloca(addrlen+1);
3370         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3371         if (ret) {
3372             goto fail;
3373         }
3374         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3375     } else {
3376         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3377     }
3378 fail:
3379     if (copy_msg) {
3380         g_free(host_msg);
3381         host_msg = copy_msg;
3382     }
3383     unlock_user(host_msg, msg, 0);
3384     return ret;
3385 }
3386 
3387 /* do_recvfrom() Must return target values and target errnos. */
3388 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3389                             abi_ulong target_addr,
3390                             abi_ulong target_addrlen)
3391 {
3392     socklen_t addrlen, ret_addrlen;
3393     void *addr;
3394     void *host_msg;
3395     abi_long ret;
3396 
3397     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3398     if (!host_msg)
3399         return -TARGET_EFAULT;
3400     if (target_addr) {
3401         if (get_user_u32(addrlen, target_addrlen)) {
3402             ret = -TARGET_EFAULT;
3403             goto fail;
3404         }
3405         if ((int)addrlen < 0) {
3406             ret = -TARGET_EINVAL;
3407             goto fail;
3408         }
3409         addr = alloca(addrlen);
3410         ret_addrlen = addrlen;
3411         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3412                                       addr, &ret_addrlen));
3413     } else {
3414         addr = NULL; /* To keep compiler quiet.  */
3415         addrlen = 0; /* To keep compiler quiet.  */
3416         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3417     }
3418     if (!is_error(ret)) {
3419         if (fd_trans_host_to_target_data(fd)) {
3420             abi_long trans;
3421             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3422             if (is_error(trans)) {
3423                 ret = trans;
3424                 goto fail;
3425             }
3426         }
3427         if (target_addr) {
3428             host_to_target_sockaddr(target_addr, addr,
3429                                     MIN(addrlen, ret_addrlen));
3430             if (put_user_u32(ret_addrlen, target_addrlen)) {
3431                 ret = -TARGET_EFAULT;
3432                 goto fail;
3433             }
3434         }
3435         unlock_user(host_msg, msg, len);
3436     } else {
3437 fail:
3438         unlock_user(host_msg, msg, 0);
3439     }
3440     return ret;
3441 }
3442 
3443 #ifdef TARGET_NR_socketcall
3444 /* do_socketcall() must return target values and target errnos. */
3445 static abi_long do_socketcall(int num, abi_ulong vptr)
3446 {
3447     static const unsigned nargs[] = { /* number of arguments per operation */
3448         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3449         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3450         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3451         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3452         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3453         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3454         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3455         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3456         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3457         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3458         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3459         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3460         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3461         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3462         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3463         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3464         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3465         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3466         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3467         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3468     };
3469     abi_long a[6]; /* max 6 args */
3470     unsigned i;
3471 
3472     /* check the range of the first argument num */
3473     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3474     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3475         return -TARGET_EINVAL;
3476     }
3477     /* ensure we have space for args */
3478     if (nargs[num] > ARRAY_SIZE(a)) {
3479         return -TARGET_EINVAL;
3480     }
3481     /* collect the arguments in a[] according to nargs[] */
3482     for (i = 0; i < nargs[num]; ++i) {
3483         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3484             return -TARGET_EFAULT;
3485         }
3486     }
3487     /* now when we have the args, invoke the appropriate underlying function */
3488     switch (num) {
3489     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3490         return do_socket(a[0], a[1], a[2]);
3491     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3492         return do_bind(a[0], a[1], a[2]);
3493     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3494         return do_connect(a[0], a[1], a[2]);
3495     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3496         return get_errno(listen(a[0], a[1]));
3497     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3498         return do_accept4(a[0], a[1], a[2], 0);
3499     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3500         return do_getsockname(a[0], a[1], a[2]);
3501     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3502         return do_getpeername(a[0], a[1], a[2]);
3503     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3504         return do_socketpair(a[0], a[1], a[2], a[3]);
3505     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3506         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3507     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3508         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3509     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3510         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3511     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3512         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3513     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3514         return get_errno(shutdown(a[0], a[1]));
3515     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3516         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3517     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3518         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3519     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3520         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3521     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3522         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3523     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3524         return do_accept4(a[0], a[1], a[2], a[3]);
3525     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3526         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3527     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3528         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3529     default:
3530         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3531         return -TARGET_EINVAL;
3532     }
3533 }
3534 #endif
3535 
3536 #define N_SHM_REGIONS	32
3537 
3538 static struct shm_region {
3539     abi_ulong start;
3540     abi_ulong size;
3541     bool in_use;
3542 } shm_regions[N_SHM_REGIONS];
3543 
3544 #ifndef TARGET_SEMID64_DS
3545 /* asm-generic version of this struct */
3546 struct target_semid64_ds
3547 {
3548   struct target_ipc_perm sem_perm;
3549   abi_ulong sem_otime;
3550 #if TARGET_ABI_BITS == 32
3551   abi_ulong __unused1;
3552 #endif
3553   abi_ulong sem_ctime;
3554 #if TARGET_ABI_BITS == 32
3555   abi_ulong __unused2;
3556 #endif
3557   abi_ulong sem_nsems;
3558   abi_ulong __unused3;
3559   abi_ulong __unused4;
3560 };
3561 #endif
3562 
3563 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3564                                                abi_ulong target_addr)
3565 {
3566     struct target_ipc_perm *target_ip;
3567     struct target_semid64_ds *target_sd;
3568 
3569     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3570         return -TARGET_EFAULT;
3571     target_ip = &(target_sd->sem_perm);
3572     host_ip->__key = tswap32(target_ip->__key);
3573     host_ip->uid = tswap32(target_ip->uid);
3574     host_ip->gid = tswap32(target_ip->gid);
3575     host_ip->cuid = tswap32(target_ip->cuid);
3576     host_ip->cgid = tswap32(target_ip->cgid);
3577 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3578     host_ip->mode = tswap32(target_ip->mode);
3579 #else
3580     host_ip->mode = tswap16(target_ip->mode);
3581 #endif
3582 #if defined(TARGET_PPC)
3583     host_ip->__seq = tswap32(target_ip->__seq);
3584 #else
3585     host_ip->__seq = tswap16(target_ip->__seq);
3586 #endif
3587     unlock_user_struct(target_sd, target_addr, 0);
3588     return 0;
3589 }
3590 
3591 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3592                                                struct ipc_perm *host_ip)
3593 {
3594     struct target_ipc_perm *target_ip;
3595     struct target_semid64_ds *target_sd;
3596 
3597     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3598         return -TARGET_EFAULT;
3599     target_ip = &(target_sd->sem_perm);
3600     target_ip->__key = tswap32(host_ip->__key);
3601     target_ip->uid = tswap32(host_ip->uid);
3602     target_ip->gid = tswap32(host_ip->gid);
3603     target_ip->cuid = tswap32(host_ip->cuid);
3604     target_ip->cgid = tswap32(host_ip->cgid);
3605 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3606     target_ip->mode = tswap32(host_ip->mode);
3607 #else
3608     target_ip->mode = tswap16(host_ip->mode);
3609 #endif
3610 #if defined(TARGET_PPC)
3611     target_ip->__seq = tswap32(host_ip->__seq);
3612 #else
3613     target_ip->__seq = tswap16(host_ip->__seq);
3614 #endif
3615     unlock_user_struct(target_sd, target_addr, 1);
3616     return 0;
3617 }
3618 
3619 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3620                                                abi_ulong target_addr)
3621 {
3622     struct target_semid64_ds *target_sd;
3623 
3624     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3625         return -TARGET_EFAULT;
3626     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3627         return -TARGET_EFAULT;
3628     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3629     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3630     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3631     unlock_user_struct(target_sd, target_addr, 0);
3632     return 0;
3633 }
3634 
3635 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3636                                                struct semid_ds *host_sd)
3637 {
3638     struct target_semid64_ds *target_sd;
3639 
3640     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3641         return -TARGET_EFAULT;
3642     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3643         return -TARGET_EFAULT;
3644     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3645     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3646     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3647     unlock_user_struct(target_sd, target_addr, 1);
3648     return 0;
3649 }
3650 
3651 struct target_seminfo {
3652     int semmap;
3653     int semmni;
3654     int semmns;
3655     int semmnu;
3656     int semmsl;
3657     int semopm;
3658     int semume;
3659     int semusz;
3660     int semvmx;
3661     int semaem;
3662 };
3663 
3664 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3665                                               struct seminfo *host_seminfo)
3666 {
3667     struct target_seminfo *target_seminfo;
3668     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3669         return -TARGET_EFAULT;
3670     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3671     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3672     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3673     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3674     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3675     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3676     __put_user(host_seminfo->semume, &target_seminfo->semume);
3677     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3678     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3679     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3680     unlock_user_struct(target_seminfo, target_addr, 1);
3681     return 0;
3682 }
3683 
3684 union semun {
3685 	int val;
3686 	struct semid_ds *buf;
3687 	unsigned short *array;
3688 	struct seminfo *__buf;
3689 };
3690 
3691 union target_semun {
3692 	int val;
3693 	abi_ulong buf;
3694 	abi_ulong array;
3695 	abi_ulong __buf;
3696 };
3697 
3698 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3699                                                abi_ulong target_addr)
3700 {
3701     int nsems;
3702     unsigned short *array;
3703     union semun semun;
3704     struct semid_ds semid_ds;
3705     int i, ret;
3706 
3707     semun.buf = &semid_ds;
3708 
3709     ret = semctl(semid, 0, IPC_STAT, semun);
3710     if (ret == -1)
3711         return get_errno(ret);
3712 
3713     nsems = semid_ds.sem_nsems;
3714 
3715     *host_array = g_try_new(unsigned short, nsems);
3716     if (!*host_array) {
3717         return -TARGET_ENOMEM;
3718     }
3719     array = lock_user(VERIFY_READ, target_addr,
3720                       nsems*sizeof(unsigned short), 1);
3721     if (!array) {
3722         g_free(*host_array);
3723         return -TARGET_EFAULT;
3724     }
3725 
3726     for(i=0; i<nsems; i++) {
3727         __get_user((*host_array)[i], &array[i]);
3728     }
3729     unlock_user(array, target_addr, 0);
3730 
3731     return 0;
3732 }
3733 
3734 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3735                                                unsigned short **host_array)
3736 {
3737     int nsems;
3738     unsigned short *array;
3739     union semun semun;
3740     struct semid_ds semid_ds;
3741     int i, ret;
3742 
3743     semun.buf = &semid_ds;
3744 
3745     ret = semctl(semid, 0, IPC_STAT, semun);
3746     if (ret == -1)
3747         return get_errno(ret);
3748 
3749     nsems = semid_ds.sem_nsems;
3750 
3751     array = lock_user(VERIFY_WRITE, target_addr,
3752                       nsems*sizeof(unsigned short), 0);
3753     if (!array)
3754         return -TARGET_EFAULT;
3755 
3756     for(i=0; i<nsems; i++) {
3757         __put_user((*host_array)[i], &array[i]);
3758     }
3759     g_free(*host_array);
3760     unlock_user(array, target_addr, 1);
3761 
3762     return 0;
3763 }
3764 
3765 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3766                                  abi_ulong target_arg)
3767 {
3768     union target_semun target_su = { .buf = target_arg };
3769     union semun arg;
3770     struct semid_ds dsarg;
3771     unsigned short *array = NULL;
3772     struct seminfo seminfo;
3773     abi_long ret = -TARGET_EINVAL;
3774     abi_long err;
3775     cmd &= 0xff;
3776 
3777     switch( cmd ) {
3778 	case GETVAL:
3779 	case SETVAL:
3780             /* In 64 bit cross-endian situations, we will erroneously pick up
3781              * the wrong half of the union for the "val" element.  To rectify
3782              * this, the entire 8-byte structure is byteswapped, followed by
3783 	     * a swap of the 4 byte val field. In other cases, the data is
3784 	     * already in proper host byte order. */
3785 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3786 		target_su.buf = tswapal(target_su.buf);
3787 		arg.val = tswap32(target_su.val);
3788 	    } else {
3789 		arg.val = target_su.val;
3790 	    }
3791             ret = get_errno(semctl(semid, semnum, cmd, arg));
3792             break;
3793 	case GETALL:
3794 	case SETALL:
3795             err = target_to_host_semarray(semid, &array, target_su.array);
3796             if (err)
3797                 return err;
3798             arg.array = array;
3799             ret = get_errno(semctl(semid, semnum, cmd, arg));
3800             err = host_to_target_semarray(semid, target_su.array, &array);
3801             if (err)
3802                 return err;
3803             break;
3804 	case IPC_STAT:
3805 	case IPC_SET:
3806 	case SEM_STAT:
3807             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3808             if (err)
3809                 return err;
3810             arg.buf = &dsarg;
3811             ret = get_errno(semctl(semid, semnum, cmd, arg));
3812             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3813             if (err)
3814                 return err;
3815             break;
3816 	case IPC_INFO:
3817 	case SEM_INFO:
3818             arg.__buf = &seminfo;
3819             ret = get_errno(semctl(semid, semnum, cmd, arg));
3820             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3821             if (err)
3822                 return err;
3823             break;
3824 	case IPC_RMID:
3825 	case GETPID:
3826 	case GETNCNT:
3827 	case GETZCNT:
3828             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3829             break;
3830     }
3831 
3832     return ret;
3833 }
3834 
3835 struct target_sembuf {
3836     unsigned short sem_num;
3837     short sem_op;
3838     short sem_flg;
3839 };
3840 
3841 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3842                                              abi_ulong target_addr,
3843                                              unsigned nsops)
3844 {
3845     struct target_sembuf *target_sembuf;
3846     int i;
3847 
3848     target_sembuf = lock_user(VERIFY_READ, target_addr,
3849                               nsops*sizeof(struct target_sembuf), 1);
3850     if (!target_sembuf)
3851         return -TARGET_EFAULT;
3852 
3853     for(i=0; i<nsops; i++) {
3854         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3855         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3856         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3857     }
3858 
3859     unlock_user(target_sembuf, target_addr, 0);
3860 
3861     return 0;
3862 }
3863 
3864 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3865 {
3866     struct sembuf sops[nsops];
3867     abi_long ret;
3868 
3869     if (target_to_host_sembuf(sops, ptr, nsops))
3870         return -TARGET_EFAULT;
3871 
3872     ret = -TARGET_ENOSYS;
3873 #ifdef __NR_semtimedop
3874     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3875 #endif
3876 #ifdef __NR_ipc
3877     if (ret == -TARGET_ENOSYS) {
3878         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3879     }
3880 #endif
3881     return ret;
3882 }
3883 
3884 struct target_msqid_ds
3885 {
3886     struct target_ipc_perm msg_perm;
3887     abi_ulong msg_stime;
3888 #if TARGET_ABI_BITS == 32
3889     abi_ulong __unused1;
3890 #endif
3891     abi_ulong msg_rtime;
3892 #if TARGET_ABI_BITS == 32
3893     abi_ulong __unused2;
3894 #endif
3895     abi_ulong msg_ctime;
3896 #if TARGET_ABI_BITS == 32
3897     abi_ulong __unused3;
3898 #endif
3899     abi_ulong __msg_cbytes;
3900     abi_ulong msg_qnum;
3901     abi_ulong msg_qbytes;
3902     abi_ulong msg_lspid;
3903     abi_ulong msg_lrpid;
3904     abi_ulong __unused4;
3905     abi_ulong __unused5;
3906 };
3907 
3908 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3909                                                abi_ulong target_addr)
3910 {
3911     struct target_msqid_ds *target_md;
3912 
3913     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3914         return -TARGET_EFAULT;
3915     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3916         return -TARGET_EFAULT;
3917     host_md->msg_stime = tswapal(target_md->msg_stime);
3918     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3919     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3920     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3921     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3922     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3923     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3924     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3925     unlock_user_struct(target_md, target_addr, 0);
3926     return 0;
3927 }
3928 
3929 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3930                                                struct msqid_ds *host_md)
3931 {
3932     struct target_msqid_ds *target_md;
3933 
3934     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3935         return -TARGET_EFAULT;
3936     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3937         return -TARGET_EFAULT;
3938     target_md->msg_stime = tswapal(host_md->msg_stime);
3939     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3940     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3941     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3942     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3943     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3944     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3945     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3946     unlock_user_struct(target_md, target_addr, 1);
3947     return 0;
3948 }
3949 
3950 struct target_msginfo {
3951     int msgpool;
3952     int msgmap;
3953     int msgmax;
3954     int msgmnb;
3955     int msgmni;
3956     int msgssz;
3957     int msgtql;
3958     unsigned short int msgseg;
3959 };
3960 
3961 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3962                                               struct msginfo *host_msginfo)
3963 {
3964     struct target_msginfo *target_msginfo;
3965     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3966         return -TARGET_EFAULT;
3967     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3968     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3969     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3970     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3971     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3972     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3973     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3974     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3975     unlock_user_struct(target_msginfo, target_addr, 1);
3976     return 0;
3977 }
3978 
3979 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3980 {
3981     struct msqid_ds dsarg;
3982     struct msginfo msginfo;
3983     abi_long ret = -TARGET_EINVAL;
3984 
3985     cmd &= 0xff;
3986 
3987     switch (cmd) {
3988     case IPC_STAT:
3989     case IPC_SET:
3990     case MSG_STAT:
3991         if (target_to_host_msqid_ds(&dsarg,ptr))
3992             return -TARGET_EFAULT;
3993         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3994         if (host_to_target_msqid_ds(ptr,&dsarg))
3995             return -TARGET_EFAULT;
3996         break;
3997     case IPC_RMID:
3998         ret = get_errno(msgctl(msgid, cmd, NULL));
3999         break;
4000     case IPC_INFO:
4001     case MSG_INFO:
4002         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4003         if (host_to_target_msginfo(ptr, &msginfo))
4004             return -TARGET_EFAULT;
4005         break;
4006     }
4007 
4008     return ret;
4009 }
4010 
4011 struct target_msgbuf {
4012     abi_long mtype;
4013     char	mtext[1];
4014 };
4015 
4016 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4017                                  ssize_t msgsz, int msgflg)
4018 {
4019     struct target_msgbuf *target_mb;
4020     struct msgbuf *host_mb;
4021     abi_long ret = 0;
4022 
4023     if (msgsz < 0) {
4024         return -TARGET_EINVAL;
4025     }
4026 
4027     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4028         return -TARGET_EFAULT;
4029     host_mb = g_try_malloc(msgsz + sizeof(long));
4030     if (!host_mb) {
4031         unlock_user_struct(target_mb, msgp, 0);
4032         return -TARGET_ENOMEM;
4033     }
4034     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4035     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4036     ret = -TARGET_ENOSYS;
4037 #ifdef __NR_msgsnd
4038     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4039 #endif
4040 #ifdef __NR_ipc
4041     if (ret == -TARGET_ENOSYS) {
4042         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4043                                  host_mb, 0));
4044     }
4045 #endif
4046     g_free(host_mb);
4047     unlock_user_struct(target_mb, msgp, 0);
4048 
4049     return ret;
4050 }
4051 
4052 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4053                                  ssize_t msgsz, abi_long msgtyp,
4054                                  int msgflg)
4055 {
4056     struct target_msgbuf *target_mb;
4057     char *target_mtext;
4058     struct msgbuf *host_mb;
4059     abi_long ret = 0;
4060 
4061     if (msgsz < 0) {
4062         return -TARGET_EINVAL;
4063     }
4064 
4065     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4066         return -TARGET_EFAULT;
4067 
4068     host_mb = g_try_malloc(msgsz + sizeof(long));
4069     if (!host_mb) {
4070         ret = -TARGET_ENOMEM;
4071         goto end;
4072     }
4073     ret = -TARGET_ENOSYS;
4074 #ifdef __NR_msgrcv
4075     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4076 #endif
4077 #ifdef __NR_ipc
4078     if (ret == -TARGET_ENOSYS) {
4079         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4080                         msgflg, host_mb, msgtyp));
4081     }
4082 #endif
4083 
4084     if (ret > 0) {
4085         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4086         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4087         if (!target_mtext) {
4088             ret = -TARGET_EFAULT;
4089             goto end;
4090         }
4091         memcpy(target_mb->mtext, host_mb->mtext, ret);
4092         unlock_user(target_mtext, target_mtext_addr, ret);
4093     }
4094 
4095     target_mb->mtype = tswapal(host_mb->mtype);
4096 
4097 end:
4098     if (target_mb)
4099         unlock_user_struct(target_mb, msgp, 1);
4100     g_free(host_mb);
4101     return ret;
4102 }
4103 
4104 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4105                                                abi_ulong target_addr)
4106 {
4107     struct target_shmid_ds *target_sd;
4108 
4109     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4110         return -TARGET_EFAULT;
4111     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4112         return -TARGET_EFAULT;
4113     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4114     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4115     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4116     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4117     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4118     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4119     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4120     unlock_user_struct(target_sd, target_addr, 0);
4121     return 0;
4122 }
4123 
4124 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4125                                                struct shmid_ds *host_sd)
4126 {
4127     struct target_shmid_ds *target_sd;
4128 
4129     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4130         return -TARGET_EFAULT;
4131     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4132         return -TARGET_EFAULT;
4133     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4134     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4135     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4136     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4137     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4138     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4139     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4140     unlock_user_struct(target_sd, target_addr, 1);
4141     return 0;
4142 }
4143 
4144 struct  target_shminfo {
4145     abi_ulong shmmax;
4146     abi_ulong shmmin;
4147     abi_ulong shmmni;
4148     abi_ulong shmseg;
4149     abi_ulong shmall;
4150 };
4151 
4152 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4153                                               struct shminfo *host_shminfo)
4154 {
4155     struct target_shminfo *target_shminfo;
4156     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4157         return -TARGET_EFAULT;
4158     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4159     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4160     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4161     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4162     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4163     unlock_user_struct(target_shminfo, target_addr, 1);
4164     return 0;
4165 }
4166 
4167 struct target_shm_info {
4168     int used_ids;
4169     abi_ulong shm_tot;
4170     abi_ulong shm_rss;
4171     abi_ulong shm_swp;
4172     abi_ulong swap_attempts;
4173     abi_ulong swap_successes;
4174 };
4175 
4176 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4177                                                struct shm_info *host_shm_info)
4178 {
4179     struct target_shm_info *target_shm_info;
4180     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4181         return -TARGET_EFAULT;
4182     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4183     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4184     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4185     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4186     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4187     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4188     unlock_user_struct(target_shm_info, target_addr, 1);
4189     return 0;
4190 }
4191 
4192 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4193 {
4194     struct shmid_ds dsarg;
4195     struct shminfo shminfo;
4196     struct shm_info shm_info;
4197     abi_long ret = -TARGET_EINVAL;
4198 
4199     cmd &= 0xff;
4200 
4201     switch(cmd) {
4202     case IPC_STAT:
4203     case IPC_SET:
4204     case SHM_STAT:
4205         if (target_to_host_shmid_ds(&dsarg, buf))
4206             return -TARGET_EFAULT;
4207         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4208         if (host_to_target_shmid_ds(buf, &dsarg))
4209             return -TARGET_EFAULT;
4210         break;
4211     case IPC_INFO:
4212         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4213         if (host_to_target_shminfo(buf, &shminfo))
4214             return -TARGET_EFAULT;
4215         break;
4216     case SHM_INFO:
4217         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4218         if (host_to_target_shm_info(buf, &shm_info))
4219             return -TARGET_EFAULT;
4220         break;
4221     case IPC_RMID:
4222     case SHM_LOCK:
4223     case SHM_UNLOCK:
4224         ret = get_errno(shmctl(shmid, cmd, NULL));
4225         break;
4226     }
4227 
4228     return ret;
4229 }
4230 
4231 #ifndef TARGET_FORCE_SHMLBA
4232 /* For most architectures, SHMLBA is the same as the page size;
4233  * some architectures have larger values, in which case they should
4234  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4235  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4236  * and defining its own value for SHMLBA.
4237  *
4238  * The kernel also permits SHMLBA to be set by the architecture to a
4239  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4240  * this means that addresses are rounded to the large size if
4241  * SHM_RND is set but addresses not aligned to that size are not rejected
4242  * as long as they are at least page-aligned. Since the only architecture
4243  * which uses this is ia64 this code doesn't provide for that oddity.
4244  */
4245 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4246 {
4247     return TARGET_PAGE_SIZE;
4248 }
4249 #endif
4250 
4251 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4252                                  int shmid, abi_ulong shmaddr, int shmflg)
4253 {
4254     abi_long raddr;
4255     void *host_raddr;
4256     struct shmid_ds shm_info;
4257     int i,ret;
4258     abi_ulong shmlba;
4259 
4260     /* find out the length of the shared memory segment */
4261     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4262     if (is_error(ret)) {
4263         /* can't get length, bail out */
4264         return ret;
4265     }
4266 
4267     shmlba = target_shmlba(cpu_env);
4268 
4269     if (shmaddr & (shmlba - 1)) {
4270         if (shmflg & SHM_RND) {
4271             shmaddr &= ~(shmlba - 1);
4272         } else {
4273             return -TARGET_EINVAL;
4274         }
4275     }
4276     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4277         return -TARGET_EINVAL;
4278     }
4279 
4280     mmap_lock();
4281 
4282     if (shmaddr)
4283         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4284     else {
4285         abi_ulong mmap_start;
4286 
4287         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4288         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4289 
4290         if (mmap_start == -1) {
4291             errno = ENOMEM;
4292             host_raddr = (void *)-1;
4293         } else
4294             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4295     }
4296 
4297     if (host_raddr == (void *)-1) {
4298         mmap_unlock();
4299         return get_errno((long)host_raddr);
4300     }
4301     raddr=h2g((unsigned long)host_raddr);
4302 
4303     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4304                    PAGE_VALID | PAGE_READ |
4305                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4306 
4307     for (i = 0; i < N_SHM_REGIONS; i++) {
4308         if (!shm_regions[i].in_use) {
4309             shm_regions[i].in_use = true;
4310             shm_regions[i].start = raddr;
4311             shm_regions[i].size = shm_info.shm_segsz;
4312             break;
4313         }
4314     }
4315 
4316     mmap_unlock();
4317     return raddr;
4318 
4319 }
4320 
4321 static inline abi_long do_shmdt(abi_ulong shmaddr)
4322 {
4323     int i;
4324     abi_long rv;
4325 
4326     mmap_lock();
4327 
4328     for (i = 0; i < N_SHM_REGIONS; ++i) {
4329         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4330             shm_regions[i].in_use = false;
4331             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4332             break;
4333         }
4334     }
4335     rv = get_errno(shmdt(g2h(shmaddr)));
4336 
4337     mmap_unlock();
4338 
4339     return rv;
4340 }
4341 
4342 #ifdef TARGET_NR_ipc
4343 /* ??? This only works with linear mappings.  */
4344 /* do_ipc() must return target values and target errnos. */
4345 static abi_long do_ipc(CPUArchState *cpu_env,
4346                        unsigned int call, abi_long first,
4347                        abi_long second, abi_long third,
4348                        abi_long ptr, abi_long fifth)
4349 {
4350     int version;
4351     abi_long ret = 0;
4352 
4353     version = call >> 16;
4354     call &= 0xffff;
4355 
4356     switch (call) {
4357     case IPCOP_semop:
4358         ret = do_semop(first, ptr, second);
4359         break;
4360 
4361     case IPCOP_semget:
4362         ret = get_errno(semget(first, second, third));
4363         break;
4364 
4365     case IPCOP_semctl: {
4366         /* The semun argument to semctl is passed by value, so dereference the
4367          * ptr argument. */
4368         abi_ulong atptr;
4369         get_user_ual(atptr, ptr);
4370         ret = do_semctl(first, second, third, atptr);
4371         break;
4372     }
4373 
4374     case IPCOP_msgget:
4375         ret = get_errno(msgget(first, second));
4376         break;
4377 
4378     case IPCOP_msgsnd:
4379         ret = do_msgsnd(first, ptr, second, third);
4380         break;
4381 
4382     case IPCOP_msgctl:
4383         ret = do_msgctl(first, second, ptr);
4384         break;
4385 
4386     case IPCOP_msgrcv:
4387         switch (version) {
4388         case 0:
4389             {
4390                 struct target_ipc_kludge {
4391                     abi_long msgp;
4392                     abi_long msgtyp;
4393                 } *tmp;
4394 
4395                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4396                     ret = -TARGET_EFAULT;
4397                     break;
4398                 }
4399 
4400                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4401 
4402                 unlock_user_struct(tmp, ptr, 0);
4403                 break;
4404             }
4405         default:
4406             ret = do_msgrcv(first, ptr, second, fifth, third);
4407         }
4408         break;
4409 
4410     case IPCOP_shmat:
4411         switch (version) {
4412         default:
4413         {
4414             abi_ulong raddr;
4415             raddr = do_shmat(cpu_env, first, ptr, second);
4416             if (is_error(raddr))
4417                 return get_errno(raddr);
4418             if (put_user_ual(raddr, third))
4419                 return -TARGET_EFAULT;
4420             break;
4421         }
4422         case 1:
4423             ret = -TARGET_EINVAL;
4424             break;
4425         }
4426 	break;
4427     case IPCOP_shmdt:
4428         ret = do_shmdt(ptr);
4429 	break;
4430 
4431     case IPCOP_shmget:
4432 	/* IPC_* flag values are the same on all linux platforms */
4433 	ret = get_errno(shmget(first, second, third));
4434 	break;
4435 
4436 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4437     case IPCOP_shmctl:
4438         ret = do_shmctl(first, second, ptr);
4439         break;
4440     default:
4441         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4442                       call, version);
4443 	ret = -TARGET_ENOSYS;
4444 	break;
4445     }
4446     return ret;
4447 }
4448 #endif
4449 
4450 /* kernel structure types definitions */
4451 
4452 #define STRUCT(name, ...) STRUCT_ ## name,
4453 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4454 enum {
4455 #include "syscall_types.h"
4456 STRUCT_MAX
4457 };
4458 #undef STRUCT
4459 #undef STRUCT_SPECIAL
4460 
4461 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4462 #define STRUCT_SPECIAL(name)
4463 #include "syscall_types.h"
4464 #undef STRUCT
4465 #undef STRUCT_SPECIAL
4466 
4467 typedef struct IOCTLEntry IOCTLEntry;
4468 
4469 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4470                              int fd, int cmd, abi_long arg);
4471 
4472 struct IOCTLEntry {
4473     int target_cmd;
4474     unsigned int host_cmd;
4475     const char *name;
4476     int access;
4477     do_ioctl_fn *do_ioctl;
4478     const argtype arg_type[5];
4479 };
4480 
4481 #define IOC_R 0x0001
4482 #define IOC_W 0x0002
4483 #define IOC_RW (IOC_R | IOC_W)
4484 
4485 #define MAX_STRUCT_SIZE 4096
4486 
4487 #ifdef CONFIG_FIEMAP
4488 /* So fiemap access checks don't overflow on 32 bit systems.
4489  * This is very slightly smaller than the limit imposed by
4490  * the underlying kernel.
4491  */
4492 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4493                             / sizeof(struct fiemap_extent))
4494 
4495 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4496                                        int fd, int cmd, abi_long arg)
4497 {
4498     /* The parameter for this ioctl is a struct fiemap followed
4499      * by an array of struct fiemap_extent whose size is set
4500      * in fiemap->fm_extent_count. The array is filled in by the
4501      * ioctl.
4502      */
4503     int target_size_in, target_size_out;
4504     struct fiemap *fm;
4505     const argtype *arg_type = ie->arg_type;
4506     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4507     void *argptr, *p;
4508     abi_long ret;
4509     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4510     uint32_t outbufsz;
4511     int free_fm = 0;
4512 
4513     assert(arg_type[0] == TYPE_PTR);
4514     assert(ie->access == IOC_RW);
4515     arg_type++;
4516     target_size_in = thunk_type_size(arg_type, 0);
4517     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4518     if (!argptr) {
4519         return -TARGET_EFAULT;
4520     }
4521     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4522     unlock_user(argptr, arg, 0);
4523     fm = (struct fiemap *)buf_temp;
4524     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4525         return -TARGET_EINVAL;
4526     }
4527 
4528     outbufsz = sizeof (*fm) +
4529         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4530 
4531     if (outbufsz > MAX_STRUCT_SIZE) {
4532         /* We can't fit all the extents into the fixed size buffer.
4533          * Allocate one that is large enough and use it instead.
4534          */
4535         fm = g_try_malloc(outbufsz);
4536         if (!fm) {
4537             return -TARGET_ENOMEM;
4538         }
4539         memcpy(fm, buf_temp, sizeof(struct fiemap));
4540         free_fm = 1;
4541     }
4542     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4543     if (!is_error(ret)) {
4544         target_size_out = target_size_in;
4545         /* An extent_count of 0 means we were only counting the extents
4546          * so there are no structs to copy
4547          */
4548         if (fm->fm_extent_count != 0) {
4549             target_size_out += fm->fm_mapped_extents * extent_size;
4550         }
4551         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4552         if (!argptr) {
4553             ret = -TARGET_EFAULT;
4554         } else {
4555             /* Convert the struct fiemap */
4556             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4557             if (fm->fm_extent_count != 0) {
4558                 p = argptr + target_size_in;
4559                 /* ...and then all the struct fiemap_extents */
4560                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4561                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4562                                   THUNK_TARGET);
4563                     p += extent_size;
4564                 }
4565             }
4566             unlock_user(argptr, arg, target_size_out);
4567         }
4568     }
4569     if (free_fm) {
4570         g_free(fm);
4571     }
4572     return ret;
4573 }
4574 #endif
4575 
4576 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4577                                 int fd, int cmd, abi_long arg)
4578 {
4579     const argtype *arg_type = ie->arg_type;
4580     int target_size;
4581     void *argptr;
4582     int ret;
4583     struct ifconf *host_ifconf;
4584     uint32_t outbufsz;
4585     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4586     int target_ifreq_size;
4587     int nb_ifreq;
4588     int free_buf = 0;
4589     int i;
4590     int target_ifc_len;
4591     abi_long target_ifc_buf;
4592     int host_ifc_len;
4593     char *host_ifc_buf;
4594 
4595     assert(arg_type[0] == TYPE_PTR);
4596     assert(ie->access == IOC_RW);
4597 
4598     arg_type++;
4599     target_size = thunk_type_size(arg_type, 0);
4600 
4601     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4602     if (!argptr)
4603         return -TARGET_EFAULT;
4604     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4605     unlock_user(argptr, arg, 0);
4606 
4607     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4608     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4609     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4610 
4611     if (target_ifc_buf != 0) {
4612         target_ifc_len = host_ifconf->ifc_len;
4613         nb_ifreq = target_ifc_len / target_ifreq_size;
4614         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4615 
4616         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4617         if (outbufsz > MAX_STRUCT_SIZE) {
4618             /*
4619              * We can't fit all the extents into the fixed size buffer.
4620              * Allocate one that is large enough and use it instead.
4621              */
4622             host_ifconf = malloc(outbufsz);
4623             if (!host_ifconf) {
4624                 return -TARGET_ENOMEM;
4625             }
4626             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4627             free_buf = 1;
4628         }
4629         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4630 
4631         host_ifconf->ifc_len = host_ifc_len;
4632     } else {
4633       host_ifc_buf = NULL;
4634     }
4635     host_ifconf->ifc_buf = host_ifc_buf;
4636 
4637     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4638     if (!is_error(ret)) {
4639 	/* convert host ifc_len to target ifc_len */
4640 
4641         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4642         target_ifc_len = nb_ifreq * target_ifreq_size;
4643         host_ifconf->ifc_len = target_ifc_len;
4644 
4645 	/* restore target ifc_buf */
4646 
4647         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4648 
4649 	/* copy struct ifconf to target user */
4650 
4651         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4652         if (!argptr)
4653             return -TARGET_EFAULT;
4654         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4655         unlock_user(argptr, arg, target_size);
4656 
4657         if (target_ifc_buf != 0) {
4658             /* copy ifreq[] to target user */
4659             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4660             for (i = 0; i < nb_ifreq ; i++) {
4661                 thunk_convert(argptr + i * target_ifreq_size,
4662                               host_ifc_buf + i * sizeof(struct ifreq),
4663                               ifreq_arg_type, THUNK_TARGET);
4664             }
4665             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4666         }
4667     }
4668 
4669     if (free_buf) {
4670         free(host_ifconf);
4671     }
4672 
4673     return ret;
4674 }
4675 
4676 #if defined(CONFIG_USBFS)
4677 #if HOST_LONG_BITS > 64
4678 #error USBDEVFS thunks do not support >64 bit hosts yet.
4679 #endif
4680 struct live_urb {
4681     uint64_t target_urb_adr;
4682     uint64_t target_buf_adr;
4683     char *target_buf_ptr;
4684     struct usbdevfs_urb host_urb;
4685 };
4686 
4687 static GHashTable *usbdevfs_urb_hashtable(void)
4688 {
4689     static GHashTable *urb_hashtable;
4690 
4691     if (!urb_hashtable) {
4692         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4693     }
4694     return urb_hashtable;
4695 }
4696 
4697 static void urb_hashtable_insert(struct live_urb *urb)
4698 {
4699     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4700     g_hash_table_insert(urb_hashtable, urb, urb);
4701 }
4702 
4703 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4704 {
4705     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4706     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4707 }
4708 
4709 static void urb_hashtable_remove(struct live_urb *urb)
4710 {
4711     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4712     g_hash_table_remove(urb_hashtable, urb);
4713 }
4714 
4715 static abi_long
4716 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4717                           int fd, int cmd, abi_long arg)
4718 {
4719     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4720     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4721     struct live_urb *lurb;
4722     void *argptr;
4723     uint64_t hurb;
4724     int target_size;
4725     uintptr_t target_urb_adr;
4726     abi_long ret;
4727 
4728     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4729 
4730     memset(buf_temp, 0, sizeof(uint64_t));
4731     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4732     if (is_error(ret)) {
4733         return ret;
4734     }
4735 
4736     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4737     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4738     if (!lurb->target_urb_adr) {
4739         return -TARGET_EFAULT;
4740     }
4741     urb_hashtable_remove(lurb);
4742     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4743         lurb->host_urb.buffer_length);
4744     lurb->target_buf_ptr = NULL;
4745 
4746     /* restore the guest buffer pointer */
4747     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4748 
4749     /* update the guest urb struct */
4750     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4751     if (!argptr) {
4752         g_free(lurb);
4753         return -TARGET_EFAULT;
4754     }
4755     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4756     unlock_user(argptr, lurb->target_urb_adr, target_size);
4757 
4758     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4759     /* write back the urb handle */
4760     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4761     if (!argptr) {
4762         g_free(lurb);
4763         return -TARGET_EFAULT;
4764     }
4765 
4766     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4767     target_urb_adr = lurb->target_urb_adr;
4768     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4769     unlock_user(argptr, arg, target_size);
4770 
4771     g_free(lurb);
4772     return ret;
4773 }
4774 
4775 static abi_long
4776 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4777                              uint8_t *buf_temp __attribute__((unused)),
4778                              int fd, int cmd, abi_long arg)
4779 {
4780     struct live_urb *lurb;
4781 
4782     /* map target address back to host URB with metadata. */
4783     lurb = urb_hashtable_lookup(arg);
4784     if (!lurb) {
4785         return -TARGET_EFAULT;
4786     }
4787     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4788 }
4789 
4790 static abi_long
4791 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4792                             int fd, int cmd, abi_long arg)
4793 {
4794     const argtype *arg_type = ie->arg_type;
4795     int target_size;
4796     abi_long ret;
4797     void *argptr;
4798     int rw_dir;
4799     struct live_urb *lurb;
4800 
4801     /*
4802      * each submitted URB needs to map to a unique ID for the
4803      * kernel, and that unique ID needs to be a pointer to
4804      * host memory.  hence, we need to malloc for each URB.
4805      * isochronous transfers have a variable length struct.
4806      */
4807     arg_type++;
4808     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4809 
4810     /* construct host copy of urb and metadata */
4811     lurb = g_try_malloc0(sizeof(struct live_urb));
4812     if (!lurb) {
4813         return -TARGET_ENOMEM;
4814     }
4815 
4816     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4817     if (!argptr) {
4818         g_free(lurb);
4819         return -TARGET_EFAULT;
4820     }
4821     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4822     unlock_user(argptr, arg, 0);
4823 
4824     lurb->target_urb_adr = arg;
4825     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4826 
4827     /* buffer space used depends on endpoint type so lock the entire buffer */
4828     /* control type urbs should check the buffer contents for true direction */
4829     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4830     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4831         lurb->host_urb.buffer_length, 1);
4832     if (lurb->target_buf_ptr == NULL) {
4833         g_free(lurb);
4834         return -TARGET_EFAULT;
4835     }
4836 
4837     /* update buffer pointer in host copy */
4838     lurb->host_urb.buffer = lurb->target_buf_ptr;
4839 
4840     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4841     if (is_error(ret)) {
4842         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4843         g_free(lurb);
4844     } else {
4845         urb_hashtable_insert(lurb);
4846     }
4847 
4848     return ret;
4849 }
4850 #endif /* CONFIG_USBFS */
4851 
4852 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4853                             int cmd, abi_long arg)
4854 {
4855     void *argptr;
4856     struct dm_ioctl *host_dm;
4857     abi_long guest_data;
4858     uint32_t guest_data_size;
4859     int target_size;
4860     const argtype *arg_type = ie->arg_type;
4861     abi_long ret;
4862     void *big_buf = NULL;
4863     char *host_data;
4864 
4865     arg_type++;
4866     target_size = thunk_type_size(arg_type, 0);
4867     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4868     if (!argptr) {
4869         ret = -TARGET_EFAULT;
4870         goto out;
4871     }
4872     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4873     unlock_user(argptr, arg, 0);
4874 
4875     /* buf_temp is too small, so fetch things into a bigger buffer */
4876     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4877     memcpy(big_buf, buf_temp, target_size);
4878     buf_temp = big_buf;
4879     host_dm = big_buf;
4880 
4881     guest_data = arg + host_dm->data_start;
4882     if ((guest_data - arg) < 0) {
4883         ret = -TARGET_EINVAL;
4884         goto out;
4885     }
4886     guest_data_size = host_dm->data_size - host_dm->data_start;
4887     host_data = (char*)host_dm + host_dm->data_start;
4888 
4889     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4890     if (!argptr) {
4891         ret = -TARGET_EFAULT;
4892         goto out;
4893     }
4894 
4895     switch (ie->host_cmd) {
4896     case DM_REMOVE_ALL:
4897     case DM_LIST_DEVICES:
4898     case DM_DEV_CREATE:
4899     case DM_DEV_REMOVE:
4900     case DM_DEV_SUSPEND:
4901     case DM_DEV_STATUS:
4902     case DM_DEV_WAIT:
4903     case DM_TABLE_STATUS:
4904     case DM_TABLE_CLEAR:
4905     case DM_TABLE_DEPS:
4906     case DM_LIST_VERSIONS:
4907         /* no input data */
4908         break;
4909     case DM_DEV_RENAME:
4910     case DM_DEV_SET_GEOMETRY:
4911         /* data contains only strings */
4912         memcpy(host_data, argptr, guest_data_size);
4913         break;
4914     case DM_TARGET_MSG:
4915         memcpy(host_data, argptr, guest_data_size);
4916         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4917         break;
4918     case DM_TABLE_LOAD:
4919     {
4920         void *gspec = argptr;
4921         void *cur_data = host_data;
4922         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4923         int spec_size = thunk_type_size(arg_type, 0);
4924         int i;
4925 
4926         for (i = 0; i < host_dm->target_count; i++) {
4927             struct dm_target_spec *spec = cur_data;
4928             uint32_t next;
4929             int slen;
4930 
4931             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4932             slen = strlen((char*)gspec + spec_size) + 1;
4933             next = spec->next;
4934             spec->next = sizeof(*spec) + slen;
4935             strcpy((char*)&spec[1], gspec + spec_size);
4936             gspec += next;
4937             cur_data += spec->next;
4938         }
4939         break;
4940     }
4941     default:
4942         ret = -TARGET_EINVAL;
4943         unlock_user(argptr, guest_data, 0);
4944         goto out;
4945     }
4946     unlock_user(argptr, guest_data, 0);
4947 
4948     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4949     if (!is_error(ret)) {
4950         guest_data = arg + host_dm->data_start;
4951         guest_data_size = host_dm->data_size - host_dm->data_start;
4952         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4953         switch (ie->host_cmd) {
4954         case DM_REMOVE_ALL:
4955         case DM_DEV_CREATE:
4956         case DM_DEV_REMOVE:
4957         case DM_DEV_RENAME:
4958         case DM_DEV_SUSPEND:
4959         case DM_DEV_STATUS:
4960         case DM_TABLE_LOAD:
4961         case DM_TABLE_CLEAR:
4962         case DM_TARGET_MSG:
4963         case DM_DEV_SET_GEOMETRY:
4964             /* no return data */
4965             break;
4966         case DM_LIST_DEVICES:
4967         {
4968             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4969             uint32_t remaining_data = guest_data_size;
4970             void *cur_data = argptr;
4971             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4972             int nl_size = 12; /* can't use thunk_size due to alignment */
4973 
4974             while (1) {
4975                 uint32_t next = nl->next;
4976                 if (next) {
4977                     nl->next = nl_size + (strlen(nl->name) + 1);
4978                 }
4979                 if (remaining_data < nl->next) {
4980                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4981                     break;
4982                 }
4983                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4984                 strcpy(cur_data + nl_size, nl->name);
4985                 cur_data += nl->next;
4986                 remaining_data -= nl->next;
4987                 if (!next) {
4988                     break;
4989                 }
4990                 nl = (void*)nl + next;
4991             }
4992             break;
4993         }
4994         case DM_DEV_WAIT:
4995         case DM_TABLE_STATUS:
4996         {
4997             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4998             void *cur_data = argptr;
4999             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5000             int spec_size = thunk_type_size(arg_type, 0);
5001             int i;
5002 
5003             for (i = 0; i < host_dm->target_count; i++) {
5004                 uint32_t next = spec->next;
5005                 int slen = strlen((char*)&spec[1]) + 1;
5006                 spec->next = (cur_data - argptr) + spec_size + slen;
5007                 if (guest_data_size < spec->next) {
5008                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5009                     break;
5010                 }
5011                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5012                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5013                 cur_data = argptr + spec->next;
5014                 spec = (void*)host_dm + host_dm->data_start + next;
5015             }
5016             break;
5017         }
5018         case DM_TABLE_DEPS:
5019         {
5020             void *hdata = (void*)host_dm + host_dm->data_start;
5021             int count = *(uint32_t*)hdata;
5022             uint64_t *hdev = hdata + 8;
5023             uint64_t *gdev = argptr + 8;
5024             int i;
5025 
5026             *(uint32_t*)argptr = tswap32(count);
5027             for (i = 0; i < count; i++) {
5028                 *gdev = tswap64(*hdev);
5029                 gdev++;
5030                 hdev++;
5031             }
5032             break;
5033         }
5034         case DM_LIST_VERSIONS:
5035         {
5036             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5037             uint32_t remaining_data = guest_data_size;
5038             void *cur_data = argptr;
5039             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5040             int vers_size = thunk_type_size(arg_type, 0);
5041 
5042             while (1) {
5043                 uint32_t next = vers->next;
5044                 if (next) {
5045                     vers->next = vers_size + (strlen(vers->name) + 1);
5046                 }
5047                 if (remaining_data < vers->next) {
5048                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5049                     break;
5050                 }
5051                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5052                 strcpy(cur_data + vers_size, vers->name);
5053                 cur_data += vers->next;
5054                 remaining_data -= vers->next;
5055                 if (!next) {
5056                     break;
5057                 }
5058                 vers = (void*)vers + next;
5059             }
5060             break;
5061         }
5062         default:
5063             unlock_user(argptr, guest_data, 0);
5064             ret = -TARGET_EINVAL;
5065             goto out;
5066         }
5067         unlock_user(argptr, guest_data, guest_data_size);
5068 
5069         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5070         if (!argptr) {
5071             ret = -TARGET_EFAULT;
5072             goto out;
5073         }
5074         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5075         unlock_user(argptr, arg, target_size);
5076     }
5077 out:
5078     g_free(big_buf);
5079     return ret;
5080 }
5081 
5082 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5083                                int cmd, abi_long arg)
5084 {
5085     void *argptr;
5086     int target_size;
5087     const argtype *arg_type = ie->arg_type;
5088     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5089     abi_long ret;
5090 
5091     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5092     struct blkpg_partition host_part;
5093 
5094     /* Read and convert blkpg */
5095     arg_type++;
5096     target_size = thunk_type_size(arg_type, 0);
5097     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5098     if (!argptr) {
5099         ret = -TARGET_EFAULT;
5100         goto out;
5101     }
5102     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5103     unlock_user(argptr, arg, 0);
5104 
5105     switch (host_blkpg->op) {
5106     case BLKPG_ADD_PARTITION:
5107     case BLKPG_DEL_PARTITION:
5108         /* payload is struct blkpg_partition */
5109         break;
5110     default:
5111         /* Unknown opcode */
5112         ret = -TARGET_EINVAL;
5113         goto out;
5114     }
5115 
5116     /* Read and convert blkpg->data */
5117     arg = (abi_long)(uintptr_t)host_blkpg->data;
5118     target_size = thunk_type_size(part_arg_type, 0);
5119     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5120     if (!argptr) {
5121         ret = -TARGET_EFAULT;
5122         goto out;
5123     }
5124     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5125     unlock_user(argptr, arg, 0);
5126 
5127     /* Swizzle the data pointer to our local copy and call! */
5128     host_blkpg->data = &host_part;
5129     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5130 
5131 out:
5132     return ret;
5133 }
5134 
5135 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5136                                 int fd, int cmd, abi_long arg)
5137 {
5138     const argtype *arg_type = ie->arg_type;
5139     const StructEntry *se;
5140     const argtype *field_types;
5141     const int *dst_offsets, *src_offsets;
5142     int target_size;
5143     void *argptr;
5144     abi_ulong *target_rt_dev_ptr = NULL;
5145     unsigned long *host_rt_dev_ptr = NULL;
5146     abi_long ret;
5147     int i;
5148 
5149     assert(ie->access == IOC_W);
5150     assert(*arg_type == TYPE_PTR);
5151     arg_type++;
5152     assert(*arg_type == TYPE_STRUCT);
5153     target_size = thunk_type_size(arg_type, 0);
5154     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5155     if (!argptr) {
5156         return -TARGET_EFAULT;
5157     }
5158     arg_type++;
5159     assert(*arg_type == (int)STRUCT_rtentry);
5160     se = struct_entries + *arg_type++;
5161     assert(se->convert[0] == NULL);
5162     /* convert struct here to be able to catch rt_dev string */
5163     field_types = se->field_types;
5164     dst_offsets = se->field_offsets[THUNK_HOST];
5165     src_offsets = se->field_offsets[THUNK_TARGET];
5166     for (i = 0; i < se->nb_fields; i++) {
5167         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5168             assert(*field_types == TYPE_PTRVOID);
5169             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5170             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5171             if (*target_rt_dev_ptr != 0) {
5172                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5173                                                   tswapal(*target_rt_dev_ptr));
5174                 if (!*host_rt_dev_ptr) {
5175                     unlock_user(argptr, arg, 0);
5176                     return -TARGET_EFAULT;
5177                 }
5178             } else {
5179                 *host_rt_dev_ptr = 0;
5180             }
5181             field_types++;
5182             continue;
5183         }
5184         field_types = thunk_convert(buf_temp + dst_offsets[i],
5185                                     argptr + src_offsets[i],
5186                                     field_types, THUNK_HOST);
5187     }
5188     unlock_user(argptr, arg, 0);
5189 
5190     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5191 
5192     assert(host_rt_dev_ptr != NULL);
5193     assert(target_rt_dev_ptr != NULL);
5194     if (*host_rt_dev_ptr != 0) {
5195         unlock_user((void *)*host_rt_dev_ptr,
5196                     *target_rt_dev_ptr, 0);
5197     }
5198     return ret;
5199 }
5200 
5201 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5202                                      int fd, int cmd, abi_long arg)
5203 {
5204     int sig = target_to_host_signal(arg);
5205     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5206 }
5207 
5208 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5209                                     int fd, int cmd, abi_long arg)
5210 {
5211     struct timeval tv;
5212     abi_long ret;
5213 
5214     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5215     if (is_error(ret)) {
5216         return ret;
5217     }
5218 
5219     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5220         if (copy_to_user_timeval(arg, &tv)) {
5221             return -TARGET_EFAULT;
5222         }
5223     } else {
5224         if (copy_to_user_timeval64(arg, &tv)) {
5225             return -TARGET_EFAULT;
5226         }
5227     }
5228 
5229     return ret;
5230 }
5231 
5232 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5233                                       int fd, int cmd, abi_long arg)
5234 {
5235     struct timespec ts;
5236     abi_long ret;
5237 
5238     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5239     if (is_error(ret)) {
5240         return ret;
5241     }
5242 
5243     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5244         if (host_to_target_timespec(arg, &ts)) {
5245             return -TARGET_EFAULT;
5246         }
5247     } else{
5248         if (host_to_target_timespec64(arg, &ts)) {
5249             return -TARGET_EFAULT;
5250         }
5251     }
5252 
5253     return ret;
5254 }
5255 
5256 #ifdef TIOCGPTPEER
5257 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5258                                      int fd, int cmd, abi_long arg)
5259 {
5260     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5261     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5262 }
5263 #endif
5264 
5265 static IOCTLEntry ioctl_entries[] = {
5266 #define IOCTL(cmd, access, ...) \
5267     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5268 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5269     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5270 #define IOCTL_IGNORE(cmd) \
5271     { TARGET_ ## cmd, 0, #cmd },
5272 #include "ioctls.h"
5273     { 0, 0, },
5274 };
5275 
5276 /* ??? Implement proper locking for ioctls.  */
5277 /* do_ioctl() Must return target values and target errnos. */
5278 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5279 {
5280     const IOCTLEntry *ie;
5281     const argtype *arg_type;
5282     abi_long ret;
5283     uint8_t buf_temp[MAX_STRUCT_SIZE];
5284     int target_size;
5285     void *argptr;
5286 
5287     ie = ioctl_entries;
5288     for(;;) {
5289         if (ie->target_cmd == 0) {
5290             qemu_log_mask(
5291                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5292             return -TARGET_ENOSYS;
5293         }
5294         if (ie->target_cmd == cmd)
5295             break;
5296         ie++;
5297     }
5298     arg_type = ie->arg_type;
5299     if (ie->do_ioctl) {
5300         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5301     } else if (!ie->host_cmd) {
5302         /* Some architectures define BSD ioctls in their headers
5303            that are not implemented in Linux.  */
5304         return -TARGET_ENOSYS;
5305     }
5306 
5307     switch(arg_type[0]) {
5308     case TYPE_NULL:
5309         /* no argument */
5310         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5311         break;
5312     case TYPE_PTRVOID:
5313     case TYPE_INT:
5314     case TYPE_LONG:
5315     case TYPE_ULONG:
5316         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5317         break;
5318     case TYPE_PTR:
5319         arg_type++;
5320         target_size = thunk_type_size(arg_type, 0);
5321         switch(ie->access) {
5322         case IOC_R:
5323             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5324             if (!is_error(ret)) {
5325                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5326                 if (!argptr)
5327                     return -TARGET_EFAULT;
5328                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5329                 unlock_user(argptr, arg, target_size);
5330             }
5331             break;
5332         case IOC_W:
5333             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5334             if (!argptr)
5335                 return -TARGET_EFAULT;
5336             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5337             unlock_user(argptr, arg, 0);
5338             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5339             break;
5340         default:
5341         case IOC_RW:
5342             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5343             if (!argptr)
5344                 return -TARGET_EFAULT;
5345             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5346             unlock_user(argptr, arg, 0);
5347             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5348             if (!is_error(ret)) {
5349                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5350                 if (!argptr)
5351                     return -TARGET_EFAULT;
5352                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5353                 unlock_user(argptr, arg, target_size);
5354             }
5355             break;
5356         }
5357         break;
5358     default:
5359         qemu_log_mask(LOG_UNIMP,
5360                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5361                       (long)cmd, arg_type[0]);
5362         ret = -TARGET_ENOSYS;
5363         break;
5364     }
5365     return ret;
5366 }
5367 
5368 static const bitmask_transtbl iflag_tbl[] = {
5369         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5370         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5371         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5372         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5373         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5374         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5375         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5376         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5377         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5378         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5379         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5380         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5381         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5382         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5383         { 0, 0, 0, 0 }
5384 };
5385 
5386 static const bitmask_transtbl oflag_tbl[] = {
5387 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5388 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5389 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5390 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5391 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5392 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5393 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5394 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5395 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5396 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5397 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5398 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5399 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5400 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5401 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5402 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5403 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5404 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5405 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5406 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5407 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5408 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5409 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5410 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5411 	{ 0, 0, 0, 0 }
5412 };
5413 
5414 static const bitmask_transtbl cflag_tbl[] = {
5415 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5416 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5417 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5418 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5419 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5420 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5421 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5422 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5423 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5424 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5425 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5426 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5427 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5428 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5429 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5430 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5431 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5432 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5433 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5434 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5435 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5436 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5437 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5438 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5439 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5440 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5441 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5442 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5443 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5444 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5445 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5446 	{ 0, 0, 0, 0 }
5447 };
5448 
5449 static const bitmask_transtbl lflag_tbl[] = {
5450 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5451 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5452 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5453 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5454 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5455 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5456 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5457 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5458 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5459 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5460 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5461 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5462 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5463 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5464 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5465 	{ 0, 0, 0, 0 }
5466 };
5467 
5468 static void target_to_host_termios (void *dst, const void *src)
5469 {
5470     struct host_termios *host = dst;
5471     const struct target_termios *target = src;
5472 
5473     host->c_iflag =
5474         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5475     host->c_oflag =
5476         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5477     host->c_cflag =
5478         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5479     host->c_lflag =
5480         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5481     host->c_line = target->c_line;
5482 
5483     memset(host->c_cc, 0, sizeof(host->c_cc));
5484     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5485     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5486     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5487     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5488     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5489     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5490     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5491     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5492     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5493     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5494     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5495     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5496     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5497     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5498     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5499     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5500     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5501 }
5502 
5503 static void host_to_target_termios (void *dst, const void *src)
5504 {
5505     struct target_termios *target = dst;
5506     const struct host_termios *host = src;
5507 
5508     target->c_iflag =
5509         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5510     target->c_oflag =
5511         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5512     target->c_cflag =
5513         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5514     target->c_lflag =
5515         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5516     target->c_line = host->c_line;
5517 
5518     memset(target->c_cc, 0, sizeof(target->c_cc));
5519     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5520     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5521     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5522     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5523     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5524     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5525     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5526     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5527     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5528     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5529     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5530     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5531     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5532     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5533     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5534     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5535     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5536 }
5537 
5538 static const StructEntry struct_termios_def = {
5539     .convert = { host_to_target_termios, target_to_host_termios },
5540     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5541     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5542 };
5543 
5544 static bitmask_transtbl mmap_flags_tbl[] = {
5545     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5546     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5547     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5548     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5549       MAP_ANONYMOUS, MAP_ANONYMOUS },
5550     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5551       MAP_GROWSDOWN, MAP_GROWSDOWN },
5552     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5553       MAP_DENYWRITE, MAP_DENYWRITE },
5554     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5555       MAP_EXECUTABLE, MAP_EXECUTABLE },
5556     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5557     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5558       MAP_NORESERVE, MAP_NORESERVE },
5559     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5560     /* MAP_STACK had been ignored by the kernel for quite some time.
5561        Recognize it for the target insofar as we do not want to pass
5562        it through to the host.  */
5563     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5564     { 0, 0, 0, 0 }
5565 };
5566 
5567 /*
5568  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5569  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5570  */
5571 #if defined(TARGET_I386)
5572 
5573 /* NOTE: there is really one LDT for all the threads */
5574 static uint8_t *ldt_table;
5575 
5576 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5577 {
5578     int size;
5579     void *p;
5580 
5581     if (!ldt_table)
5582         return 0;
5583     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5584     if (size > bytecount)
5585         size = bytecount;
5586     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5587     if (!p)
5588         return -TARGET_EFAULT;
5589     /* ??? Should this by byteswapped?  */
5590     memcpy(p, ldt_table, size);
5591     unlock_user(p, ptr, size);
5592     return size;
5593 }
5594 
5595 /* XXX: add locking support */
5596 static abi_long write_ldt(CPUX86State *env,
5597                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5598 {
5599     struct target_modify_ldt_ldt_s ldt_info;
5600     struct target_modify_ldt_ldt_s *target_ldt_info;
5601     int seg_32bit, contents, read_exec_only, limit_in_pages;
5602     int seg_not_present, useable, lm;
5603     uint32_t *lp, entry_1, entry_2;
5604 
5605     if (bytecount != sizeof(ldt_info))
5606         return -TARGET_EINVAL;
5607     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5608         return -TARGET_EFAULT;
5609     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5610     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5611     ldt_info.limit = tswap32(target_ldt_info->limit);
5612     ldt_info.flags = tswap32(target_ldt_info->flags);
5613     unlock_user_struct(target_ldt_info, ptr, 0);
5614 
5615     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5616         return -TARGET_EINVAL;
5617     seg_32bit = ldt_info.flags & 1;
5618     contents = (ldt_info.flags >> 1) & 3;
5619     read_exec_only = (ldt_info.flags >> 3) & 1;
5620     limit_in_pages = (ldt_info.flags >> 4) & 1;
5621     seg_not_present = (ldt_info.flags >> 5) & 1;
5622     useable = (ldt_info.flags >> 6) & 1;
5623 #ifdef TARGET_ABI32
5624     lm = 0;
5625 #else
5626     lm = (ldt_info.flags >> 7) & 1;
5627 #endif
5628     if (contents == 3) {
5629         if (oldmode)
5630             return -TARGET_EINVAL;
5631         if (seg_not_present == 0)
5632             return -TARGET_EINVAL;
5633     }
5634     /* allocate the LDT */
5635     if (!ldt_table) {
5636         env->ldt.base = target_mmap(0,
5637                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5638                                     PROT_READ|PROT_WRITE,
5639                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5640         if (env->ldt.base == -1)
5641             return -TARGET_ENOMEM;
5642         memset(g2h(env->ldt.base), 0,
5643                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5644         env->ldt.limit = 0xffff;
5645         ldt_table = g2h(env->ldt.base);
5646     }
5647 
5648     /* NOTE: same code as Linux kernel */
5649     /* Allow LDTs to be cleared by the user. */
5650     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5651         if (oldmode ||
5652             (contents == 0		&&
5653              read_exec_only == 1	&&
5654              seg_32bit == 0		&&
5655              limit_in_pages == 0	&&
5656              seg_not_present == 1	&&
5657              useable == 0 )) {
5658             entry_1 = 0;
5659             entry_2 = 0;
5660             goto install;
5661         }
5662     }
5663 
5664     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5665         (ldt_info.limit & 0x0ffff);
5666     entry_2 = (ldt_info.base_addr & 0xff000000) |
5667         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5668         (ldt_info.limit & 0xf0000) |
5669         ((read_exec_only ^ 1) << 9) |
5670         (contents << 10) |
5671         ((seg_not_present ^ 1) << 15) |
5672         (seg_32bit << 22) |
5673         (limit_in_pages << 23) |
5674         (lm << 21) |
5675         0x7000;
5676     if (!oldmode)
5677         entry_2 |= (useable << 20);
5678 
5679     /* Install the new entry ...  */
5680 install:
5681     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5682     lp[0] = tswap32(entry_1);
5683     lp[1] = tswap32(entry_2);
5684     return 0;
5685 }
5686 
5687 /* specific and weird i386 syscalls */
5688 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5689                               unsigned long bytecount)
5690 {
5691     abi_long ret;
5692 
5693     switch (func) {
5694     case 0:
5695         ret = read_ldt(ptr, bytecount);
5696         break;
5697     case 1:
5698         ret = write_ldt(env, ptr, bytecount, 1);
5699         break;
5700     case 0x11:
5701         ret = write_ldt(env, ptr, bytecount, 0);
5702         break;
5703     default:
5704         ret = -TARGET_ENOSYS;
5705         break;
5706     }
5707     return ret;
5708 }
5709 
5710 #if defined(TARGET_ABI32)
5711 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5712 {
5713     uint64_t *gdt_table = g2h(env->gdt.base);
5714     struct target_modify_ldt_ldt_s ldt_info;
5715     struct target_modify_ldt_ldt_s *target_ldt_info;
5716     int seg_32bit, contents, read_exec_only, limit_in_pages;
5717     int seg_not_present, useable, lm;
5718     uint32_t *lp, entry_1, entry_2;
5719     int i;
5720 
5721     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5722     if (!target_ldt_info)
5723         return -TARGET_EFAULT;
5724     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5725     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5726     ldt_info.limit = tswap32(target_ldt_info->limit);
5727     ldt_info.flags = tswap32(target_ldt_info->flags);
5728     if (ldt_info.entry_number == -1) {
5729         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5730             if (gdt_table[i] == 0) {
5731                 ldt_info.entry_number = i;
5732                 target_ldt_info->entry_number = tswap32(i);
5733                 break;
5734             }
5735         }
5736     }
5737     unlock_user_struct(target_ldt_info, ptr, 1);
5738 
5739     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5740         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5741            return -TARGET_EINVAL;
5742     seg_32bit = ldt_info.flags & 1;
5743     contents = (ldt_info.flags >> 1) & 3;
5744     read_exec_only = (ldt_info.flags >> 3) & 1;
5745     limit_in_pages = (ldt_info.flags >> 4) & 1;
5746     seg_not_present = (ldt_info.flags >> 5) & 1;
5747     useable = (ldt_info.flags >> 6) & 1;
5748 #ifdef TARGET_ABI32
5749     lm = 0;
5750 #else
5751     lm = (ldt_info.flags >> 7) & 1;
5752 #endif
5753 
5754     if (contents == 3) {
5755         if (seg_not_present == 0)
5756             return -TARGET_EINVAL;
5757     }
5758 
5759     /* NOTE: same code as Linux kernel */
5760     /* Allow LDTs to be cleared by the user. */
5761     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5762         if ((contents == 0             &&
5763              read_exec_only == 1       &&
5764              seg_32bit == 0            &&
5765              limit_in_pages == 0       &&
5766              seg_not_present == 1      &&
5767              useable == 0 )) {
5768             entry_1 = 0;
5769             entry_2 = 0;
5770             goto install;
5771         }
5772     }
5773 
5774     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5775         (ldt_info.limit & 0x0ffff);
5776     entry_2 = (ldt_info.base_addr & 0xff000000) |
5777         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5778         (ldt_info.limit & 0xf0000) |
5779         ((read_exec_only ^ 1) << 9) |
5780         (contents << 10) |
5781         ((seg_not_present ^ 1) << 15) |
5782         (seg_32bit << 22) |
5783         (limit_in_pages << 23) |
5784         (useable << 20) |
5785         (lm << 21) |
5786         0x7000;
5787 
5788     /* Install the new entry ...  */
5789 install:
5790     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5791     lp[0] = tswap32(entry_1);
5792     lp[1] = tswap32(entry_2);
5793     return 0;
5794 }
5795 
5796 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5797 {
5798     struct target_modify_ldt_ldt_s *target_ldt_info;
5799     uint64_t *gdt_table = g2h(env->gdt.base);
5800     uint32_t base_addr, limit, flags;
5801     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5802     int seg_not_present, useable, lm;
5803     uint32_t *lp, entry_1, entry_2;
5804 
5805     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5806     if (!target_ldt_info)
5807         return -TARGET_EFAULT;
5808     idx = tswap32(target_ldt_info->entry_number);
5809     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5810         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5811         unlock_user_struct(target_ldt_info, ptr, 1);
5812         return -TARGET_EINVAL;
5813     }
5814     lp = (uint32_t *)(gdt_table + idx);
5815     entry_1 = tswap32(lp[0]);
5816     entry_2 = tswap32(lp[1]);
5817 
5818     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5819     contents = (entry_2 >> 10) & 3;
5820     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5821     seg_32bit = (entry_2 >> 22) & 1;
5822     limit_in_pages = (entry_2 >> 23) & 1;
5823     useable = (entry_2 >> 20) & 1;
5824 #ifdef TARGET_ABI32
5825     lm = 0;
5826 #else
5827     lm = (entry_2 >> 21) & 1;
5828 #endif
5829     flags = (seg_32bit << 0) | (contents << 1) |
5830         (read_exec_only << 3) | (limit_in_pages << 4) |
5831         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5832     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5833     base_addr = (entry_1 >> 16) |
5834         (entry_2 & 0xff000000) |
5835         ((entry_2 & 0xff) << 16);
5836     target_ldt_info->base_addr = tswapal(base_addr);
5837     target_ldt_info->limit = tswap32(limit);
5838     target_ldt_info->flags = tswap32(flags);
5839     unlock_user_struct(target_ldt_info, ptr, 1);
5840     return 0;
5841 }
5842 
5843 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5844 {
5845     return -ENOSYS;
5846 }
5847 #else
5848 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5849 {
5850     abi_long ret = 0;
5851     abi_ulong val;
5852     int idx;
5853 
5854     switch(code) {
5855     case TARGET_ARCH_SET_GS:
5856     case TARGET_ARCH_SET_FS:
5857         if (code == TARGET_ARCH_SET_GS)
5858             idx = R_GS;
5859         else
5860             idx = R_FS;
5861         cpu_x86_load_seg(env, idx, 0);
5862         env->segs[idx].base = addr;
5863         break;
5864     case TARGET_ARCH_GET_GS:
5865     case TARGET_ARCH_GET_FS:
5866         if (code == TARGET_ARCH_GET_GS)
5867             idx = R_GS;
5868         else
5869             idx = R_FS;
5870         val = env->segs[idx].base;
5871         if (put_user(val, addr, abi_ulong))
5872             ret = -TARGET_EFAULT;
5873         break;
5874     default:
5875         ret = -TARGET_EINVAL;
5876         break;
5877     }
5878     return ret;
5879 }
5880 #endif /* defined(TARGET_ABI32 */
5881 
5882 #endif /* defined(TARGET_I386) */
5883 
5884 #define NEW_STACK_SIZE 0x40000
5885 
5886 
5887 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5888 typedef struct {
5889     CPUArchState *env;
5890     pthread_mutex_t mutex;
5891     pthread_cond_t cond;
5892     pthread_t thread;
5893     uint32_t tid;
5894     abi_ulong child_tidptr;
5895     abi_ulong parent_tidptr;
5896     sigset_t sigmask;
5897 } new_thread_info;
5898 
5899 static void *clone_func(void *arg)
5900 {
5901     new_thread_info *info = arg;
5902     CPUArchState *env;
5903     CPUState *cpu;
5904     TaskState *ts;
5905 
5906     rcu_register_thread();
5907     tcg_register_thread();
5908     env = info->env;
5909     cpu = env_cpu(env);
5910     thread_cpu = cpu;
5911     ts = (TaskState *)cpu->opaque;
5912     info->tid = sys_gettid();
5913     task_settid(ts);
5914     if (info->child_tidptr)
5915         put_user_u32(info->tid, info->child_tidptr);
5916     if (info->parent_tidptr)
5917         put_user_u32(info->tid, info->parent_tidptr);
5918     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5919     /* Enable signals.  */
5920     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5921     /* Signal to the parent that we're ready.  */
5922     pthread_mutex_lock(&info->mutex);
5923     pthread_cond_broadcast(&info->cond);
5924     pthread_mutex_unlock(&info->mutex);
5925     /* Wait until the parent has finished initializing the tls state.  */
5926     pthread_mutex_lock(&clone_lock);
5927     pthread_mutex_unlock(&clone_lock);
5928     cpu_loop(env);
5929     /* never exits */
5930     return NULL;
5931 }
5932 
5933 /* do_fork() Must return host values and target errnos (unlike most
5934    do_*() functions). */
5935 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5936                    abi_ulong parent_tidptr, target_ulong newtls,
5937                    abi_ulong child_tidptr)
5938 {
5939     CPUState *cpu = env_cpu(env);
5940     int ret;
5941     TaskState *ts;
5942     CPUState *new_cpu;
5943     CPUArchState *new_env;
5944     sigset_t sigmask;
5945 
5946     flags &= ~CLONE_IGNORED_FLAGS;
5947 
5948     /* Emulate vfork() with fork() */
5949     if (flags & CLONE_VFORK)
5950         flags &= ~(CLONE_VFORK | CLONE_VM);
5951 
5952     if (flags & CLONE_VM) {
5953         TaskState *parent_ts = (TaskState *)cpu->opaque;
5954         new_thread_info info;
5955         pthread_attr_t attr;
5956 
5957         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5958             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5959             return -TARGET_EINVAL;
5960         }
5961 
5962         ts = g_new0(TaskState, 1);
5963         init_task_state(ts);
5964 
5965         /* Grab a mutex so that thread setup appears atomic.  */
5966         pthread_mutex_lock(&clone_lock);
5967 
5968         /* we create a new CPU instance. */
5969         new_env = cpu_copy(env);
5970         /* Init regs that differ from the parent.  */
5971         cpu_clone_regs_child(new_env, newsp, flags);
5972         cpu_clone_regs_parent(env, flags);
5973         new_cpu = env_cpu(new_env);
5974         new_cpu->opaque = ts;
5975         ts->bprm = parent_ts->bprm;
5976         ts->info = parent_ts->info;
5977         ts->signal_mask = parent_ts->signal_mask;
5978 
5979         if (flags & CLONE_CHILD_CLEARTID) {
5980             ts->child_tidptr = child_tidptr;
5981         }
5982 
5983         if (flags & CLONE_SETTLS) {
5984             cpu_set_tls (new_env, newtls);
5985         }
5986 
5987         memset(&info, 0, sizeof(info));
5988         pthread_mutex_init(&info.mutex, NULL);
5989         pthread_mutex_lock(&info.mutex);
5990         pthread_cond_init(&info.cond, NULL);
5991         info.env = new_env;
5992         if (flags & CLONE_CHILD_SETTID) {
5993             info.child_tidptr = child_tidptr;
5994         }
5995         if (flags & CLONE_PARENT_SETTID) {
5996             info.parent_tidptr = parent_tidptr;
5997         }
5998 
5999         ret = pthread_attr_init(&attr);
6000         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6001         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6002         /* It is not safe to deliver signals until the child has finished
6003            initializing, so temporarily block all signals.  */
6004         sigfillset(&sigmask);
6005         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6006         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6007 
6008         /* If this is our first additional thread, we need to ensure we
6009          * generate code for parallel execution and flush old translations.
6010          */
6011         if (!parallel_cpus) {
6012             parallel_cpus = true;
6013             tb_flush(cpu);
6014         }
6015 
6016         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6017         /* TODO: Free new CPU state if thread creation failed.  */
6018 
6019         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6020         pthread_attr_destroy(&attr);
6021         if (ret == 0) {
6022             /* Wait for the child to initialize.  */
6023             pthread_cond_wait(&info.cond, &info.mutex);
6024             ret = info.tid;
6025         } else {
6026             ret = -1;
6027         }
6028         pthread_mutex_unlock(&info.mutex);
6029         pthread_cond_destroy(&info.cond);
6030         pthread_mutex_destroy(&info.mutex);
6031         pthread_mutex_unlock(&clone_lock);
6032     } else {
6033         /* if no CLONE_VM, we consider it is a fork */
6034         if (flags & CLONE_INVALID_FORK_FLAGS) {
6035             return -TARGET_EINVAL;
6036         }
6037 
6038         /* We can't support custom termination signals */
6039         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6040             return -TARGET_EINVAL;
6041         }
6042 
6043         if (block_signals()) {
6044             return -TARGET_ERESTARTSYS;
6045         }
6046 
6047         fork_start();
6048         ret = fork();
6049         if (ret == 0) {
6050             /* Child Process.  */
6051             cpu_clone_regs_child(env, newsp, flags);
6052             fork_end(1);
6053             /* There is a race condition here.  The parent process could
6054                theoretically read the TID in the child process before the child
6055                tid is set.  This would require using either ptrace
6056                (not implemented) or having *_tidptr to point at a shared memory
6057                mapping.  We can't repeat the spinlock hack used above because
6058                the child process gets its own copy of the lock.  */
6059             if (flags & CLONE_CHILD_SETTID)
6060                 put_user_u32(sys_gettid(), child_tidptr);
6061             if (flags & CLONE_PARENT_SETTID)
6062                 put_user_u32(sys_gettid(), parent_tidptr);
6063             ts = (TaskState *)cpu->opaque;
6064             if (flags & CLONE_SETTLS)
6065                 cpu_set_tls (env, newtls);
6066             if (flags & CLONE_CHILD_CLEARTID)
6067                 ts->child_tidptr = child_tidptr;
6068         } else {
6069             cpu_clone_regs_parent(env, flags);
6070             fork_end(0);
6071         }
6072     }
6073     return ret;
6074 }
6075 
6076 /* warning : doesn't handle linux specific flags... */
6077 static int target_to_host_fcntl_cmd(int cmd)
6078 {
6079     int ret;
6080 
6081     switch(cmd) {
6082     case TARGET_F_DUPFD:
6083     case TARGET_F_GETFD:
6084     case TARGET_F_SETFD:
6085     case TARGET_F_GETFL:
6086     case TARGET_F_SETFL:
6087         ret = cmd;
6088         break;
6089     case TARGET_F_GETLK:
6090         ret = F_GETLK64;
6091         break;
6092     case TARGET_F_SETLK:
6093         ret = F_SETLK64;
6094         break;
6095     case TARGET_F_SETLKW:
6096         ret = F_SETLKW64;
6097         break;
6098     case TARGET_F_GETOWN:
6099         ret = F_GETOWN;
6100         break;
6101     case TARGET_F_SETOWN:
6102         ret = F_SETOWN;
6103         break;
6104     case TARGET_F_GETSIG:
6105         ret = F_GETSIG;
6106         break;
6107     case TARGET_F_SETSIG:
6108         ret = F_SETSIG;
6109         break;
6110 #if TARGET_ABI_BITS == 32
6111     case TARGET_F_GETLK64:
6112         ret = F_GETLK64;
6113         break;
6114     case TARGET_F_SETLK64:
6115         ret = F_SETLK64;
6116         break;
6117     case TARGET_F_SETLKW64:
6118         ret = F_SETLKW64;
6119         break;
6120 #endif
6121     case TARGET_F_SETLEASE:
6122         ret = F_SETLEASE;
6123         break;
6124     case TARGET_F_GETLEASE:
6125         ret = F_GETLEASE;
6126         break;
6127 #ifdef F_DUPFD_CLOEXEC
6128     case TARGET_F_DUPFD_CLOEXEC:
6129         ret = F_DUPFD_CLOEXEC;
6130         break;
6131 #endif
6132     case TARGET_F_NOTIFY:
6133         ret = F_NOTIFY;
6134         break;
6135 #ifdef F_GETOWN_EX
6136     case TARGET_F_GETOWN_EX:
6137         ret = F_GETOWN_EX;
6138         break;
6139 #endif
6140 #ifdef F_SETOWN_EX
6141     case TARGET_F_SETOWN_EX:
6142         ret = F_SETOWN_EX;
6143         break;
6144 #endif
6145 #ifdef F_SETPIPE_SZ
6146     case TARGET_F_SETPIPE_SZ:
6147         ret = F_SETPIPE_SZ;
6148         break;
6149     case TARGET_F_GETPIPE_SZ:
6150         ret = F_GETPIPE_SZ;
6151         break;
6152 #endif
6153     default:
6154         ret = -TARGET_EINVAL;
6155         break;
6156     }
6157 
6158 #if defined(__powerpc64__)
6159     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6160      * is not supported by kernel. The glibc fcntl call actually adjusts
6161      * them to 5, 6 and 7 before making the syscall(). Since we make the
6162      * syscall directly, adjust to what is supported by the kernel.
6163      */
6164     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6165         ret -= F_GETLK64 - 5;
6166     }
6167 #endif
6168 
6169     return ret;
6170 }
6171 
6172 #define FLOCK_TRANSTBL \
6173     switch (type) { \
6174     TRANSTBL_CONVERT(F_RDLCK); \
6175     TRANSTBL_CONVERT(F_WRLCK); \
6176     TRANSTBL_CONVERT(F_UNLCK); \
6177     TRANSTBL_CONVERT(F_EXLCK); \
6178     TRANSTBL_CONVERT(F_SHLCK); \
6179     }
6180 
6181 static int target_to_host_flock(int type)
6182 {
6183 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6184     FLOCK_TRANSTBL
6185 #undef  TRANSTBL_CONVERT
6186     return -TARGET_EINVAL;
6187 }
6188 
6189 static int host_to_target_flock(int type)
6190 {
6191 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6192     FLOCK_TRANSTBL
6193 #undef  TRANSTBL_CONVERT
6194     /* if we don't know how to convert the value coming
6195      * from the host we copy to the target field as-is
6196      */
6197     return type;
6198 }
6199 
6200 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6201                                             abi_ulong target_flock_addr)
6202 {
6203     struct target_flock *target_fl;
6204     int l_type;
6205 
6206     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6207         return -TARGET_EFAULT;
6208     }
6209 
6210     __get_user(l_type, &target_fl->l_type);
6211     l_type = target_to_host_flock(l_type);
6212     if (l_type < 0) {
6213         return l_type;
6214     }
6215     fl->l_type = l_type;
6216     __get_user(fl->l_whence, &target_fl->l_whence);
6217     __get_user(fl->l_start, &target_fl->l_start);
6218     __get_user(fl->l_len, &target_fl->l_len);
6219     __get_user(fl->l_pid, &target_fl->l_pid);
6220     unlock_user_struct(target_fl, target_flock_addr, 0);
6221     return 0;
6222 }
6223 
6224 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6225                                           const struct flock64 *fl)
6226 {
6227     struct target_flock *target_fl;
6228     short l_type;
6229 
6230     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6231         return -TARGET_EFAULT;
6232     }
6233 
6234     l_type = host_to_target_flock(fl->l_type);
6235     __put_user(l_type, &target_fl->l_type);
6236     __put_user(fl->l_whence, &target_fl->l_whence);
6237     __put_user(fl->l_start, &target_fl->l_start);
6238     __put_user(fl->l_len, &target_fl->l_len);
6239     __put_user(fl->l_pid, &target_fl->l_pid);
6240     unlock_user_struct(target_fl, target_flock_addr, 1);
6241     return 0;
6242 }
6243 
6244 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6245 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6246 
6247 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6248 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6249                                                    abi_ulong target_flock_addr)
6250 {
6251     struct target_oabi_flock64 *target_fl;
6252     int l_type;
6253 
6254     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6255         return -TARGET_EFAULT;
6256     }
6257 
6258     __get_user(l_type, &target_fl->l_type);
6259     l_type = target_to_host_flock(l_type);
6260     if (l_type < 0) {
6261         return l_type;
6262     }
6263     fl->l_type = l_type;
6264     __get_user(fl->l_whence, &target_fl->l_whence);
6265     __get_user(fl->l_start, &target_fl->l_start);
6266     __get_user(fl->l_len, &target_fl->l_len);
6267     __get_user(fl->l_pid, &target_fl->l_pid);
6268     unlock_user_struct(target_fl, target_flock_addr, 0);
6269     return 0;
6270 }
6271 
6272 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6273                                                  const struct flock64 *fl)
6274 {
6275     struct target_oabi_flock64 *target_fl;
6276     short l_type;
6277 
6278     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6279         return -TARGET_EFAULT;
6280     }
6281 
6282     l_type = host_to_target_flock(fl->l_type);
6283     __put_user(l_type, &target_fl->l_type);
6284     __put_user(fl->l_whence, &target_fl->l_whence);
6285     __put_user(fl->l_start, &target_fl->l_start);
6286     __put_user(fl->l_len, &target_fl->l_len);
6287     __put_user(fl->l_pid, &target_fl->l_pid);
6288     unlock_user_struct(target_fl, target_flock_addr, 1);
6289     return 0;
6290 }
6291 #endif
6292 
6293 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6294                                               abi_ulong target_flock_addr)
6295 {
6296     struct target_flock64 *target_fl;
6297     int l_type;
6298 
6299     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6300         return -TARGET_EFAULT;
6301     }
6302 
6303     __get_user(l_type, &target_fl->l_type);
6304     l_type = target_to_host_flock(l_type);
6305     if (l_type < 0) {
6306         return l_type;
6307     }
6308     fl->l_type = l_type;
6309     __get_user(fl->l_whence, &target_fl->l_whence);
6310     __get_user(fl->l_start, &target_fl->l_start);
6311     __get_user(fl->l_len, &target_fl->l_len);
6312     __get_user(fl->l_pid, &target_fl->l_pid);
6313     unlock_user_struct(target_fl, target_flock_addr, 0);
6314     return 0;
6315 }
6316 
6317 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6318                                             const struct flock64 *fl)
6319 {
6320     struct target_flock64 *target_fl;
6321     short l_type;
6322 
6323     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6324         return -TARGET_EFAULT;
6325     }
6326 
6327     l_type = host_to_target_flock(fl->l_type);
6328     __put_user(l_type, &target_fl->l_type);
6329     __put_user(fl->l_whence, &target_fl->l_whence);
6330     __put_user(fl->l_start, &target_fl->l_start);
6331     __put_user(fl->l_len, &target_fl->l_len);
6332     __put_user(fl->l_pid, &target_fl->l_pid);
6333     unlock_user_struct(target_fl, target_flock_addr, 1);
6334     return 0;
6335 }
6336 
6337 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6338 {
6339     struct flock64 fl64;
6340 #ifdef F_GETOWN_EX
6341     struct f_owner_ex fox;
6342     struct target_f_owner_ex *target_fox;
6343 #endif
6344     abi_long ret;
6345     int host_cmd = target_to_host_fcntl_cmd(cmd);
6346 
6347     if (host_cmd == -TARGET_EINVAL)
6348 	    return host_cmd;
6349 
6350     switch(cmd) {
6351     case TARGET_F_GETLK:
6352         ret = copy_from_user_flock(&fl64, arg);
6353         if (ret) {
6354             return ret;
6355         }
6356         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6357         if (ret == 0) {
6358             ret = copy_to_user_flock(arg, &fl64);
6359         }
6360         break;
6361 
6362     case TARGET_F_SETLK:
6363     case TARGET_F_SETLKW:
6364         ret = copy_from_user_flock(&fl64, arg);
6365         if (ret) {
6366             return ret;
6367         }
6368         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6369         break;
6370 
6371     case TARGET_F_GETLK64:
6372         ret = copy_from_user_flock64(&fl64, arg);
6373         if (ret) {
6374             return ret;
6375         }
6376         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6377         if (ret == 0) {
6378             ret = copy_to_user_flock64(arg, &fl64);
6379         }
6380         break;
6381     case TARGET_F_SETLK64:
6382     case TARGET_F_SETLKW64:
6383         ret = copy_from_user_flock64(&fl64, arg);
6384         if (ret) {
6385             return ret;
6386         }
6387         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6388         break;
6389 
6390     case TARGET_F_GETFL:
6391         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6392         if (ret >= 0) {
6393             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6394         }
6395         break;
6396 
6397     case TARGET_F_SETFL:
6398         ret = get_errno(safe_fcntl(fd, host_cmd,
6399                                    target_to_host_bitmask(arg,
6400                                                           fcntl_flags_tbl)));
6401         break;
6402 
6403 #ifdef F_GETOWN_EX
6404     case TARGET_F_GETOWN_EX:
6405         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6406         if (ret >= 0) {
6407             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6408                 return -TARGET_EFAULT;
6409             target_fox->type = tswap32(fox.type);
6410             target_fox->pid = tswap32(fox.pid);
6411             unlock_user_struct(target_fox, arg, 1);
6412         }
6413         break;
6414 #endif
6415 
6416 #ifdef F_SETOWN_EX
6417     case TARGET_F_SETOWN_EX:
6418         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6419             return -TARGET_EFAULT;
6420         fox.type = tswap32(target_fox->type);
6421         fox.pid = tswap32(target_fox->pid);
6422         unlock_user_struct(target_fox, arg, 0);
6423         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6424         break;
6425 #endif
6426 
6427     case TARGET_F_SETOWN:
6428     case TARGET_F_GETOWN:
6429     case TARGET_F_SETSIG:
6430     case TARGET_F_GETSIG:
6431     case TARGET_F_SETLEASE:
6432     case TARGET_F_GETLEASE:
6433     case TARGET_F_SETPIPE_SZ:
6434     case TARGET_F_GETPIPE_SZ:
6435         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6436         break;
6437 
6438     default:
6439         ret = get_errno(safe_fcntl(fd, cmd, arg));
6440         break;
6441     }
6442     return ret;
6443 }
6444 
6445 #ifdef USE_UID16
6446 
6447 static inline int high2lowuid(int uid)
6448 {
6449     if (uid > 65535)
6450         return 65534;
6451     else
6452         return uid;
6453 }
6454 
6455 static inline int high2lowgid(int gid)
6456 {
6457     if (gid > 65535)
6458         return 65534;
6459     else
6460         return gid;
6461 }
6462 
6463 static inline int low2highuid(int uid)
6464 {
6465     if ((int16_t)uid == -1)
6466         return -1;
6467     else
6468         return uid;
6469 }
6470 
6471 static inline int low2highgid(int gid)
6472 {
6473     if ((int16_t)gid == -1)
6474         return -1;
6475     else
6476         return gid;
6477 }
6478 static inline int tswapid(int id)
6479 {
6480     return tswap16(id);
6481 }
6482 
6483 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6484 
6485 #else /* !USE_UID16 */
6486 static inline int high2lowuid(int uid)
6487 {
6488     return uid;
6489 }
6490 static inline int high2lowgid(int gid)
6491 {
6492     return gid;
6493 }
6494 static inline int low2highuid(int uid)
6495 {
6496     return uid;
6497 }
6498 static inline int low2highgid(int gid)
6499 {
6500     return gid;
6501 }
6502 static inline int tswapid(int id)
6503 {
6504     return tswap32(id);
6505 }
6506 
6507 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6508 
6509 #endif /* USE_UID16 */
6510 
6511 /* We must do direct syscalls for setting UID/GID, because we want to
6512  * implement the Linux system call semantics of "change only for this thread",
6513  * not the libc/POSIX semantics of "change for all threads in process".
6514  * (See http://ewontfix.com/17/ for more details.)
6515  * We use the 32-bit version of the syscalls if present; if it is not
6516  * then either the host architecture supports 32-bit UIDs natively with
6517  * the standard syscall, or the 16-bit UID is the best we can do.
6518  */
6519 #ifdef __NR_setuid32
6520 #define __NR_sys_setuid __NR_setuid32
6521 #else
6522 #define __NR_sys_setuid __NR_setuid
6523 #endif
6524 #ifdef __NR_setgid32
6525 #define __NR_sys_setgid __NR_setgid32
6526 #else
6527 #define __NR_sys_setgid __NR_setgid
6528 #endif
6529 #ifdef __NR_setresuid32
6530 #define __NR_sys_setresuid __NR_setresuid32
6531 #else
6532 #define __NR_sys_setresuid __NR_setresuid
6533 #endif
6534 #ifdef __NR_setresgid32
6535 #define __NR_sys_setresgid __NR_setresgid32
6536 #else
6537 #define __NR_sys_setresgid __NR_setresgid
6538 #endif
6539 
6540 _syscall1(int, sys_setuid, uid_t, uid)
6541 _syscall1(int, sys_setgid, gid_t, gid)
6542 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6543 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6544 
6545 void syscall_init(void)
6546 {
6547     IOCTLEntry *ie;
6548     const argtype *arg_type;
6549     int size;
6550     int i;
6551 
6552     thunk_init(STRUCT_MAX);
6553 
6554 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6555 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6556 #include "syscall_types.h"
6557 #undef STRUCT
6558 #undef STRUCT_SPECIAL
6559 
6560     /* Build target_to_host_errno_table[] table from
6561      * host_to_target_errno_table[]. */
6562     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6563         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6564     }
6565 
6566     /* we patch the ioctl size if necessary. We rely on the fact that
6567        no ioctl has all the bits at '1' in the size field */
6568     ie = ioctl_entries;
6569     while (ie->target_cmd != 0) {
6570         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6571             TARGET_IOC_SIZEMASK) {
6572             arg_type = ie->arg_type;
6573             if (arg_type[0] != TYPE_PTR) {
6574                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6575                         ie->target_cmd);
6576                 exit(1);
6577             }
6578             arg_type++;
6579             size = thunk_type_size(arg_type, 0);
6580             ie->target_cmd = (ie->target_cmd &
6581                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6582                 (size << TARGET_IOC_SIZESHIFT);
6583         }
6584 
6585         /* automatic consistency check if same arch */
6586 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6587     (defined(__x86_64__) && defined(TARGET_X86_64))
6588         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6589             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6590                     ie->name, ie->target_cmd, ie->host_cmd);
6591         }
6592 #endif
6593         ie++;
6594     }
6595 }
6596 
6597 #if TARGET_ABI_BITS == 32
6598 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6599 {
6600 #ifdef TARGET_WORDS_BIGENDIAN
6601     return ((uint64_t)word0 << 32) | word1;
6602 #else
6603     return ((uint64_t)word1 << 32) | word0;
6604 #endif
6605 }
6606 #else /* TARGET_ABI_BITS == 32 */
6607 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6608 {
6609     return word0;
6610 }
6611 #endif /* TARGET_ABI_BITS != 32 */
6612 
6613 #ifdef TARGET_NR_truncate64
6614 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6615                                          abi_long arg2,
6616                                          abi_long arg3,
6617                                          abi_long arg4)
6618 {
6619     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6620         arg2 = arg3;
6621         arg3 = arg4;
6622     }
6623     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6624 }
6625 #endif
6626 
6627 #ifdef TARGET_NR_ftruncate64
6628 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6629                                           abi_long arg2,
6630                                           abi_long arg3,
6631                                           abi_long arg4)
6632 {
6633     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6634         arg2 = arg3;
6635         arg3 = arg4;
6636     }
6637     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6638 }
6639 #endif
6640 
6641 #if defined(TARGET_NR_timer_settime) || \
6642     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6643 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6644                                                  abi_ulong target_addr)
6645 {
6646     struct target_itimerspec *target_itspec;
6647 
6648     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6649         return -TARGET_EFAULT;
6650     }
6651 
6652     host_itspec->it_interval.tv_sec =
6653                             tswapal(target_itspec->it_interval.tv_sec);
6654     host_itspec->it_interval.tv_nsec =
6655                             tswapal(target_itspec->it_interval.tv_nsec);
6656     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6657     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6658 
6659     unlock_user_struct(target_itspec, target_addr, 1);
6660     return 0;
6661 }
6662 #endif
6663 
6664 #if ((defined(TARGET_NR_timerfd_gettime) || \
6665       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6666     defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6667 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6668                                                struct itimerspec *host_its)
6669 {
6670     struct target_itimerspec *target_itspec;
6671 
6672     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6673         return -TARGET_EFAULT;
6674     }
6675 
6676     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6677     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6678 
6679     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6680     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6681 
6682     unlock_user_struct(target_itspec, target_addr, 0);
6683     return 0;
6684 }
6685 #endif
6686 
6687 #if defined(TARGET_NR_adjtimex) || \
6688     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6689 static inline abi_long target_to_host_timex(struct timex *host_tx,
6690                                             abi_long target_addr)
6691 {
6692     struct target_timex *target_tx;
6693 
6694     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6695         return -TARGET_EFAULT;
6696     }
6697 
6698     __get_user(host_tx->modes, &target_tx->modes);
6699     __get_user(host_tx->offset, &target_tx->offset);
6700     __get_user(host_tx->freq, &target_tx->freq);
6701     __get_user(host_tx->maxerror, &target_tx->maxerror);
6702     __get_user(host_tx->esterror, &target_tx->esterror);
6703     __get_user(host_tx->status, &target_tx->status);
6704     __get_user(host_tx->constant, &target_tx->constant);
6705     __get_user(host_tx->precision, &target_tx->precision);
6706     __get_user(host_tx->tolerance, &target_tx->tolerance);
6707     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6708     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6709     __get_user(host_tx->tick, &target_tx->tick);
6710     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6711     __get_user(host_tx->jitter, &target_tx->jitter);
6712     __get_user(host_tx->shift, &target_tx->shift);
6713     __get_user(host_tx->stabil, &target_tx->stabil);
6714     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6715     __get_user(host_tx->calcnt, &target_tx->calcnt);
6716     __get_user(host_tx->errcnt, &target_tx->errcnt);
6717     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6718     __get_user(host_tx->tai, &target_tx->tai);
6719 
6720     unlock_user_struct(target_tx, target_addr, 0);
6721     return 0;
6722 }
6723 
6724 static inline abi_long host_to_target_timex(abi_long target_addr,
6725                                             struct timex *host_tx)
6726 {
6727     struct target_timex *target_tx;
6728 
6729     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6730         return -TARGET_EFAULT;
6731     }
6732 
6733     __put_user(host_tx->modes, &target_tx->modes);
6734     __put_user(host_tx->offset, &target_tx->offset);
6735     __put_user(host_tx->freq, &target_tx->freq);
6736     __put_user(host_tx->maxerror, &target_tx->maxerror);
6737     __put_user(host_tx->esterror, &target_tx->esterror);
6738     __put_user(host_tx->status, &target_tx->status);
6739     __put_user(host_tx->constant, &target_tx->constant);
6740     __put_user(host_tx->precision, &target_tx->precision);
6741     __put_user(host_tx->tolerance, &target_tx->tolerance);
6742     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6743     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6744     __put_user(host_tx->tick, &target_tx->tick);
6745     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6746     __put_user(host_tx->jitter, &target_tx->jitter);
6747     __put_user(host_tx->shift, &target_tx->shift);
6748     __put_user(host_tx->stabil, &target_tx->stabil);
6749     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6750     __put_user(host_tx->calcnt, &target_tx->calcnt);
6751     __put_user(host_tx->errcnt, &target_tx->errcnt);
6752     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6753     __put_user(host_tx->tai, &target_tx->tai);
6754 
6755     unlock_user_struct(target_tx, target_addr, 1);
6756     return 0;
6757 }
6758 #endif
6759 
6760 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6761                                                abi_ulong target_addr)
6762 {
6763     struct target_sigevent *target_sevp;
6764 
6765     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6766         return -TARGET_EFAULT;
6767     }
6768 
6769     /* This union is awkward on 64 bit systems because it has a 32 bit
6770      * integer and a pointer in it; we follow the conversion approach
6771      * used for handling sigval types in signal.c so the guest should get
6772      * the correct value back even if we did a 64 bit byteswap and it's
6773      * using the 32 bit integer.
6774      */
6775     host_sevp->sigev_value.sival_ptr =
6776         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6777     host_sevp->sigev_signo =
6778         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6779     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6780     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6781 
6782     unlock_user_struct(target_sevp, target_addr, 1);
6783     return 0;
6784 }
6785 
6786 #if defined(TARGET_NR_mlockall)
6787 static inline int target_to_host_mlockall_arg(int arg)
6788 {
6789     int result = 0;
6790 
6791     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6792         result |= MCL_CURRENT;
6793     }
6794     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6795         result |= MCL_FUTURE;
6796     }
6797     return result;
6798 }
6799 #endif
6800 
6801 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6802      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6803      defined(TARGET_NR_newfstatat))
6804 static inline abi_long host_to_target_stat64(void *cpu_env,
6805                                              abi_ulong target_addr,
6806                                              struct stat *host_st)
6807 {
6808 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6809     if (((CPUARMState *)cpu_env)->eabi) {
6810         struct target_eabi_stat64 *target_st;
6811 
6812         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6813             return -TARGET_EFAULT;
6814         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6815         __put_user(host_st->st_dev, &target_st->st_dev);
6816         __put_user(host_st->st_ino, &target_st->st_ino);
6817 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6818         __put_user(host_st->st_ino, &target_st->__st_ino);
6819 #endif
6820         __put_user(host_st->st_mode, &target_st->st_mode);
6821         __put_user(host_st->st_nlink, &target_st->st_nlink);
6822         __put_user(host_st->st_uid, &target_st->st_uid);
6823         __put_user(host_st->st_gid, &target_st->st_gid);
6824         __put_user(host_st->st_rdev, &target_st->st_rdev);
6825         __put_user(host_st->st_size, &target_st->st_size);
6826         __put_user(host_st->st_blksize, &target_st->st_blksize);
6827         __put_user(host_st->st_blocks, &target_st->st_blocks);
6828         __put_user(host_st->st_atime, &target_st->target_st_atime);
6829         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6830         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6831 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6832         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6833         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6834         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6835 #endif
6836         unlock_user_struct(target_st, target_addr, 1);
6837     } else
6838 #endif
6839     {
6840 #if defined(TARGET_HAS_STRUCT_STAT64)
6841         struct target_stat64 *target_st;
6842 #else
6843         struct target_stat *target_st;
6844 #endif
6845 
6846         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6847             return -TARGET_EFAULT;
6848         memset(target_st, 0, sizeof(*target_st));
6849         __put_user(host_st->st_dev, &target_st->st_dev);
6850         __put_user(host_st->st_ino, &target_st->st_ino);
6851 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6852         __put_user(host_st->st_ino, &target_st->__st_ino);
6853 #endif
6854         __put_user(host_st->st_mode, &target_st->st_mode);
6855         __put_user(host_st->st_nlink, &target_st->st_nlink);
6856         __put_user(host_st->st_uid, &target_st->st_uid);
6857         __put_user(host_st->st_gid, &target_st->st_gid);
6858         __put_user(host_st->st_rdev, &target_st->st_rdev);
6859         /* XXX: better use of kernel struct */
6860         __put_user(host_st->st_size, &target_st->st_size);
6861         __put_user(host_st->st_blksize, &target_st->st_blksize);
6862         __put_user(host_st->st_blocks, &target_st->st_blocks);
6863         __put_user(host_st->st_atime, &target_st->target_st_atime);
6864         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6865         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6866 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6867         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6868         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6869         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6870 #endif
6871         unlock_user_struct(target_st, target_addr, 1);
6872     }
6873 
6874     return 0;
6875 }
6876 #endif
6877 
6878 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6879 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6880                                             abi_ulong target_addr)
6881 {
6882     struct target_statx *target_stx;
6883 
6884     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6885         return -TARGET_EFAULT;
6886     }
6887     memset(target_stx, 0, sizeof(*target_stx));
6888 
6889     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6890     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6891     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6892     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6893     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6894     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6895     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6896     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6897     __put_user(host_stx->stx_size, &target_stx->stx_size);
6898     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6899     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6900     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6901     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6902     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
6903     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
6904     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
6905     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
6906     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
6907     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
6908     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6909     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6910     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6911     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6912 
6913     unlock_user_struct(target_stx, target_addr, 1);
6914 
6915     return 0;
6916 }
6917 #endif
6918 
6919 
6920 /* ??? Using host futex calls even when target atomic operations
6921    are not really atomic probably breaks things.  However implementing
6922    futexes locally would make futexes shared between multiple processes
6923    tricky.  However they're probably useless because guest atomic
6924    operations won't work either.  */
6925 #if defined(TARGET_NR_futex)
6926 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6927                     target_ulong uaddr2, int val3)
6928 {
6929     struct timespec ts, *pts;
6930     int base_op;
6931 
6932     /* ??? We assume FUTEX_* constants are the same on both host
6933        and target.  */
6934 #ifdef FUTEX_CMD_MASK
6935     base_op = op & FUTEX_CMD_MASK;
6936 #else
6937     base_op = op;
6938 #endif
6939     switch (base_op) {
6940     case FUTEX_WAIT:
6941     case FUTEX_WAIT_BITSET:
6942         if (timeout) {
6943             pts = &ts;
6944             target_to_host_timespec(pts, timeout);
6945         } else {
6946             pts = NULL;
6947         }
6948         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6949                          pts, NULL, val3));
6950     case FUTEX_WAKE:
6951         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6952     case FUTEX_FD:
6953         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6954     case FUTEX_REQUEUE:
6955     case FUTEX_CMP_REQUEUE:
6956     case FUTEX_WAKE_OP:
6957         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6958            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6959            But the prototype takes a `struct timespec *'; insert casts
6960            to satisfy the compiler.  We do not need to tswap TIMEOUT
6961            since it's not compared to guest memory.  */
6962         pts = (struct timespec *)(uintptr_t) timeout;
6963         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6964                                     g2h(uaddr2),
6965                                     (base_op == FUTEX_CMP_REQUEUE
6966                                      ? tswap32(val3)
6967                                      : val3)));
6968     default:
6969         return -TARGET_ENOSYS;
6970     }
6971 }
6972 #endif
6973 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6974 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6975                                      abi_long handle, abi_long mount_id,
6976                                      abi_long flags)
6977 {
6978     struct file_handle *target_fh;
6979     struct file_handle *fh;
6980     int mid = 0;
6981     abi_long ret;
6982     char *name;
6983     unsigned int size, total_size;
6984 
6985     if (get_user_s32(size, handle)) {
6986         return -TARGET_EFAULT;
6987     }
6988 
6989     name = lock_user_string(pathname);
6990     if (!name) {
6991         return -TARGET_EFAULT;
6992     }
6993 
6994     total_size = sizeof(struct file_handle) + size;
6995     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6996     if (!target_fh) {
6997         unlock_user(name, pathname, 0);
6998         return -TARGET_EFAULT;
6999     }
7000 
7001     fh = g_malloc0(total_size);
7002     fh->handle_bytes = size;
7003 
7004     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7005     unlock_user(name, pathname, 0);
7006 
7007     /* man name_to_handle_at(2):
7008      * Other than the use of the handle_bytes field, the caller should treat
7009      * the file_handle structure as an opaque data type
7010      */
7011 
7012     memcpy(target_fh, fh, total_size);
7013     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7014     target_fh->handle_type = tswap32(fh->handle_type);
7015     g_free(fh);
7016     unlock_user(target_fh, handle, total_size);
7017 
7018     if (put_user_s32(mid, mount_id)) {
7019         return -TARGET_EFAULT;
7020     }
7021 
7022     return ret;
7023 
7024 }
7025 #endif
7026 
7027 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7028 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7029                                      abi_long flags)
7030 {
7031     struct file_handle *target_fh;
7032     struct file_handle *fh;
7033     unsigned int size, total_size;
7034     abi_long ret;
7035 
7036     if (get_user_s32(size, handle)) {
7037         return -TARGET_EFAULT;
7038     }
7039 
7040     total_size = sizeof(struct file_handle) + size;
7041     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7042     if (!target_fh) {
7043         return -TARGET_EFAULT;
7044     }
7045 
7046     fh = g_memdup(target_fh, total_size);
7047     fh->handle_bytes = size;
7048     fh->handle_type = tswap32(target_fh->handle_type);
7049 
7050     ret = get_errno(open_by_handle_at(mount_fd, fh,
7051                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7052 
7053     g_free(fh);
7054 
7055     unlock_user(target_fh, handle, total_size);
7056 
7057     return ret;
7058 }
7059 #endif
7060 
7061 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7062 
7063 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7064 {
7065     int host_flags;
7066     target_sigset_t *target_mask;
7067     sigset_t host_mask;
7068     abi_long ret;
7069 
7070     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7071         return -TARGET_EINVAL;
7072     }
7073     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7074         return -TARGET_EFAULT;
7075     }
7076 
7077     target_to_host_sigset(&host_mask, target_mask);
7078 
7079     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7080 
7081     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7082     if (ret >= 0) {
7083         fd_trans_register(ret, &target_signalfd_trans);
7084     }
7085 
7086     unlock_user_struct(target_mask, mask, 0);
7087 
7088     return ret;
7089 }
7090 #endif
7091 
7092 /* Map host to target signal numbers for the wait family of syscalls.
7093    Assume all other status bits are the same.  */
7094 int host_to_target_waitstatus(int status)
7095 {
7096     if (WIFSIGNALED(status)) {
7097         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7098     }
7099     if (WIFSTOPPED(status)) {
7100         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7101                | (status & 0xff);
7102     }
7103     return status;
7104 }
7105 
7106 static int open_self_cmdline(void *cpu_env, int fd)
7107 {
7108     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7109     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7110     int i;
7111 
7112     for (i = 0; i < bprm->argc; i++) {
7113         size_t len = strlen(bprm->argv[i]) + 1;
7114 
7115         if (write(fd, bprm->argv[i], len) != len) {
7116             return -1;
7117         }
7118     }
7119 
7120     return 0;
7121 }
7122 
7123 static int open_self_maps(void *cpu_env, int fd)
7124 {
7125     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7126     TaskState *ts = cpu->opaque;
7127     FILE *fp;
7128     char *line = NULL;
7129     size_t len = 0;
7130     ssize_t read;
7131 
7132     fp = fopen("/proc/self/maps", "r");
7133     if (fp == NULL) {
7134         return -1;
7135     }
7136 
7137     while ((read = getline(&line, &len, fp)) != -1) {
7138         int fields, dev_maj, dev_min, inode;
7139         uint64_t min, max, offset;
7140         char flag_r, flag_w, flag_x, flag_p;
7141         char path[512] = "";
7142         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7143                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7144                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7145 
7146         if ((fields < 10) || (fields > 11)) {
7147             continue;
7148         }
7149         if (h2g_valid(min)) {
7150             int flags = page_get_flags(h2g(min));
7151             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7152             if (page_check_range(h2g(min), max - min, flags) == -1) {
7153                 continue;
7154             }
7155             if (h2g(min) == ts->info->stack_limit) {
7156                 pstrcpy(path, sizeof(path), "      [stack]");
7157             }
7158             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7159                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7160                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7161                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7162                     path[0] ? "         " : "", path);
7163         }
7164     }
7165 
7166 #ifdef TARGET_VSYSCALL_PAGE
7167     /*
7168      * We only support execution from the vsyscall page.
7169      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7170      */
7171     dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7172             " --xp 00000000 00:00 0 [vsyscall]\n",
7173             TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7174 #endif
7175 
7176     free(line);
7177     fclose(fp);
7178 
7179     return 0;
7180 }
7181 
7182 static int open_self_stat(void *cpu_env, int fd)
7183 {
7184     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7185     TaskState *ts = cpu->opaque;
7186     abi_ulong start_stack = ts->info->start_stack;
7187     int i;
7188 
7189     for (i = 0; i < 44; i++) {
7190       char buf[128];
7191       int len;
7192       uint64_t val = 0;
7193 
7194       if (i == 0) {
7195         /* pid */
7196         val = getpid();
7197         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7198       } else if (i == 1) {
7199         /* app name */
7200         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7201       } else if (i == 27) {
7202         /* stack bottom */
7203         val = start_stack;
7204         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7205       } else {
7206         /* for the rest, there is MasterCard */
7207         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7208       }
7209 
7210       len = strlen(buf);
7211       if (write(fd, buf, len) != len) {
7212           return -1;
7213       }
7214     }
7215 
7216     return 0;
7217 }
7218 
7219 static int open_self_auxv(void *cpu_env, int fd)
7220 {
7221     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7222     TaskState *ts = cpu->opaque;
7223     abi_ulong auxv = ts->info->saved_auxv;
7224     abi_ulong len = ts->info->auxv_len;
7225     char *ptr;
7226 
7227     /*
7228      * Auxiliary vector is stored in target process stack.
7229      * read in whole auxv vector and copy it to file
7230      */
7231     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7232     if (ptr != NULL) {
7233         while (len > 0) {
7234             ssize_t r;
7235             r = write(fd, ptr, len);
7236             if (r <= 0) {
7237                 break;
7238             }
7239             len -= r;
7240             ptr += r;
7241         }
7242         lseek(fd, 0, SEEK_SET);
7243         unlock_user(ptr, auxv, len);
7244     }
7245 
7246     return 0;
7247 }
7248 
7249 static int is_proc_myself(const char *filename, const char *entry)
7250 {
7251     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7252         filename += strlen("/proc/");
7253         if (!strncmp(filename, "self/", strlen("self/"))) {
7254             filename += strlen("self/");
7255         } else if (*filename >= '1' && *filename <= '9') {
7256             char myself[80];
7257             snprintf(myself, sizeof(myself), "%d/", getpid());
7258             if (!strncmp(filename, myself, strlen(myself))) {
7259                 filename += strlen(myself);
7260             } else {
7261                 return 0;
7262             }
7263         } else {
7264             return 0;
7265         }
7266         if (!strcmp(filename, entry)) {
7267             return 1;
7268         }
7269     }
7270     return 0;
7271 }
7272 
7273 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7274     defined(TARGET_SPARC) || defined(TARGET_M68K)
7275 static int is_proc(const char *filename, const char *entry)
7276 {
7277     return strcmp(filename, entry) == 0;
7278 }
7279 #endif
7280 
7281 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7282 static int open_net_route(void *cpu_env, int fd)
7283 {
7284     FILE *fp;
7285     char *line = NULL;
7286     size_t len = 0;
7287     ssize_t read;
7288 
7289     fp = fopen("/proc/net/route", "r");
7290     if (fp == NULL) {
7291         return -1;
7292     }
7293 
7294     /* read header */
7295 
7296     read = getline(&line, &len, fp);
7297     dprintf(fd, "%s", line);
7298 
7299     /* read routes */
7300 
7301     while ((read = getline(&line, &len, fp)) != -1) {
7302         char iface[16];
7303         uint32_t dest, gw, mask;
7304         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7305         int fields;
7306 
7307         fields = sscanf(line,
7308                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7309                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7310                         &mask, &mtu, &window, &irtt);
7311         if (fields != 11) {
7312             continue;
7313         }
7314         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7315                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7316                 metric, tswap32(mask), mtu, window, irtt);
7317     }
7318 
7319     free(line);
7320     fclose(fp);
7321 
7322     return 0;
7323 }
7324 #endif
7325 
7326 #if defined(TARGET_SPARC)
7327 static int open_cpuinfo(void *cpu_env, int fd)
7328 {
7329     dprintf(fd, "type\t\t: sun4u\n");
7330     return 0;
7331 }
7332 #endif
7333 
7334 #if defined(TARGET_M68K)
7335 static int open_hardware(void *cpu_env, int fd)
7336 {
7337     dprintf(fd, "Model:\t\tqemu-m68k\n");
7338     return 0;
7339 }
7340 #endif
7341 
7342 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7343 {
7344     struct fake_open {
7345         const char *filename;
7346         int (*fill)(void *cpu_env, int fd);
7347         int (*cmp)(const char *s1, const char *s2);
7348     };
7349     const struct fake_open *fake_open;
7350     static const struct fake_open fakes[] = {
7351         { "maps", open_self_maps, is_proc_myself },
7352         { "stat", open_self_stat, is_proc_myself },
7353         { "auxv", open_self_auxv, is_proc_myself },
7354         { "cmdline", open_self_cmdline, is_proc_myself },
7355 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7356         { "/proc/net/route", open_net_route, is_proc },
7357 #endif
7358 #if defined(TARGET_SPARC)
7359         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7360 #endif
7361 #if defined(TARGET_M68K)
7362         { "/proc/hardware", open_hardware, is_proc },
7363 #endif
7364         { NULL, NULL, NULL }
7365     };
7366 
7367     if (is_proc_myself(pathname, "exe")) {
7368         int execfd = qemu_getauxval(AT_EXECFD);
7369         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7370     }
7371 
7372     for (fake_open = fakes; fake_open->filename; fake_open++) {
7373         if (fake_open->cmp(pathname, fake_open->filename)) {
7374             break;
7375         }
7376     }
7377 
7378     if (fake_open->filename) {
7379         const char *tmpdir;
7380         char filename[PATH_MAX];
7381         int fd, r;
7382 
7383         /* create temporary file to map stat to */
7384         tmpdir = getenv("TMPDIR");
7385         if (!tmpdir)
7386             tmpdir = "/tmp";
7387         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7388         fd = mkstemp(filename);
7389         if (fd < 0) {
7390             return fd;
7391         }
7392         unlink(filename);
7393 
7394         if ((r = fake_open->fill(cpu_env, fd))) {
7395             int e = errno;
7396             close(fd);
7397             errno = e;
7398             return r;
7399         }
7400         lseek(fd, 0, SEEK_SET);
7401 
7402         return fd;
7403     }
7404 
7405     return safe_openat(dirfd, path(pathname), flags, mode);
7406 }
7407 
7408 #define TIMER_MAGIC 0x0caf0000
7409 #define TIMER_MAGIC_MASK 0xffff0000
7410 
7411 /* Convert QEMU provided timer ID back to internal 16bit index format */
7412 static target_timer_t get_timer_id(abi_long arg)
7413 {
7414     target_timer_t timerid = arg;
7415 
7416     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7417         return -TARGET_EINVAL;
7418     }
7419 
7420     timerid &= 0xffff;
7421 
7422     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7423         return -TARGET_EINVAL;
7424     }
7425 
7426     return timerid;
7427 }
7428 
7429 static int target_to_host_cpu_mask(unsigned long *host_mask,
7430                                    size_t host_size,
7431                                    abi_ulong target_addr,
7432                                    size_t target_size)
7433 {
7434     unsigned target_bits = sizeof(abi_ulong) * 8;
7435     unsigned host_bits = sizeof(*host_mask) * 8;
7436     abi_ulong *target_mask;
7437     unsigned i, j;
7438 
7439     assert(host_size >= target_size);
7440 
7441     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7442     if (!target_mask) {
7443         return -TARGET_EFAULT;
7444     }
7445     memset(host_mask, 0, host_size);
7446 
7447     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7448         unsigned bit = i * target_bits;
7449         abi_ulong val;
7450 
7451         __get_user(val, &target_mask[i]);
7452         for (j = 0; j < target_bits; j++, bit++) {
7453             if (val & (1UL << j)) {
7454                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7455             }
7456         }
7457     }
7458 
7459     unlock_user(target_mask, target_addr, 0);
7460     return 0;
7461 }
7462 
7463 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7464                                    size_t host_size,
7465                                    abi_ulong target_addr,
7466                                    size_t target_size)
7467 {
7468     unsigned target_bits = sizeof(abi_ulong) * 8;
7469     unsigned host_bits = sizeof(*host_mask) * 8;
7470     abi_ulong *target_mask;
7471     unsigned i, j;
7472 
7473     assert(host_size >= target_size);
7474 
7475     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7476     if (!target_mask) {
7477         return -TARGET_EFAULT;
7478     }
7479 
7480     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7481         unsigned bit = i * target_bits;
7482         abi_ulong val = 0;
7483 
7484         for (j = 0; j < target_bits; j++, bit++) {
7485             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7486                 val |= 1UL << j;
7487             }
7488         }
7489         __put_user(val, &target_mask[i]);
7490     }
7491 
7492     unlock_user(target_mask, target_addr, target_size);
7493     return 0;
7494 }
7495 
7496 /* This is an internal helper for do_syscall so that it is easier
7497  * to have a single return point, so that actions, such as logging
7498  * of syscall results, can be performed.
7499  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7500  */
7501 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7502                             abi_long arg2, abi_long arg3, abi_long arg4,
7503                             abi_long arg5, abi_long arg6, abi_long arg7,
7504                             abi_long arg8)
7505 {
7506     CPUState *cpu = env_cpu(cpu_env);
7507     abi_long ret;
7508 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7509     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7510     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7511     || defined(TARGET_NR_statx)
7512     struct stat st;
7513 #endif
7514 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7515     || defined(TARGET_NR_fstatfs)
7516     struct statfs stfs;
7517 #endif
7518     void *p;
7519 
7520     switch(num) {
7521     case TARGET_NR_exit:
7522         /* In old applications this may be used to implement _exit(2).
7523            However in threaded applictions it is used for thread termination,
7524            and _exit_group is used for application termination.
7525            Do thread termination if we have more then one thread.  */
7526 
7527         if (block_signals()) {
7528             return -TARGET_ERESTARTSYS;
7529         }
7530 
7531         cpu_list_lock();
7532 
7533         if (CPU_NEXT(first_cpu)) {
7534             TaskState *ts;
7535 
7536             /* Remove the CPU from the list.  */
7537             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7538 
7539             cpu_list_unlock();
7540 
7541             ts = cpu->opaque;
7542             if (ts->child_tidptr) {
7543                 put_user_u32(0, ts->child_tidptr);
7544                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7545                           NULL, NULL, 0);
7546             }
7547             thread_cpu = NULL;
7548             object_unref(OBJECT(cpu));
7549             g_free(ts);
7550             rcu_unregister_thread();
7551             pthread_exit(NULL);
7552         }
7553 
7554         cpu_list_unlock();
7555         preexit_cleanup(cpu_env, arg1);
7556         _exit(arg1);
7557         return 0; /* avoid warning */
7558     case TARGET_NR_read:
7559         if (arg2 == 0 && arg3 == 0) {
7560             return get_errno(safe_read(arg1, 0, 0));
7561         } else {
7562             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7563                 return -TARGET_EFAULT;
7564             ret = get_errno(safe_read(arg1, p, arg3));
7565             if (ret >= 0 &&
7566                 fd_trans_host_to_target_data(arg1)) {
7567                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7568             }
7569             unlock_user(p, arg2, ret);
7570         }
7571         return ret;
7572     case TARGET_NR_write:
7573         if (arg2 == 0 && arg3 == 0) {
7574             return get_errno(safe_write(arg1, 0, 0));
7575         }
7576         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7577             return -TARGET_EFAULT;
7578         if (fd_trans_target_to_host_data(arg1)) {
7579             void *copy = g_malloc(arg3);
7580             memcpy(copy, p, arg3);
7581             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7582             if (ret >= 0) {
7583                 ret = get_errno(safe_write(arg1, copy, ret));
7584             }
7585             g_free(copy);
7586         } else {
7587             ret = get_errno(safe_write(arg1, p, arg3));
7588         }
7589         unlock_user(p, arg2, 0);
7590         return ret;
7591 
7592 #ifdef TARGET_NR_open
7593     case TARGET_NR_open:
7594         if (!(p = lock_user_string(arg1)))
7595             return -TARGET_EFAULT;
7596         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7597                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7598                                   arg3));
7599         fd_trans_unregister(ret);
7600         unlock_user(p, arg1, 0);
7601         return ret;
7602 #endif
7603     case TARGET_NR_openat:
7604         if (!(p = lock_user_string(arg2)))
7605             return -TARGET_EFAULT;
7606         ret = get_errno(do_openat(cpu_env, arg1, p,
7607                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7608                                   arg4));
7609         fd_trans_unregister(ret);
7610         unlock_user(p, arg2, 0);
7611         return ret;
7612 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7613     case TARGET_NR_name_to_handle_at:
7614         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7615         return ret;
7616 #endif
7617 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7618     case TARGET_NR_open_by_handle_at:
7619         ret = do_open_by_handle_at(arg1, arg2, arg3);
7620         fd_trans_unregister(ret);
7621         return ret;
7622 #endif
7623     case TARGET_NR_close:
7624         fd_trans_unregister(arg1);
7625         return get_errno(close(arg1));
7626 
7627     case TARGET_NR_brk:
7628         return do_brk(arg1);
7629 #ifdef TARGET_NR_fork
7630     case TARGET_NR_fork:
7631         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7632 #endif
7633 #ifdef TARGET_NR_waitpid
7634     case TARGET_NR_waitpid:
7635         {
7636             int status;
7637             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7638             if (!is_error(ret) && arg2 && ret
7639                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7640                 return -TARGET_EFAULT;
7641         }
7642         return ret;
7643 #endif
7644 #ifdef TARGET_NR_waitid
7645     case TARGET_NR_waitid:
7646         {
7647             siginfo_t info;
7648             info.si_pid = 0;
7649             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7650             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7651                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7652                     return -TARGET_EFAULT;
7653                 host_to_target_siginfo(p, &info);
7654                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7655             }
7656         }
7657         return ret;
7658 #endif
7659 #ifdef TARGET_NR_creat /* not on alpha */
7660     case TARGET_NR_creat:
7661         if (!(p = lock_user_string(arg1)))
7662             return -TARGET_EFAULT;
7663         ret = get_errno(creat(p, arg2));
7664         fd_trans_unregister(ret);
7665         unlock_user(p, arg1, 0);
7666         return ret;
7667 #endif
7668 #ifdef TARGET_NR_link
7669     case TARGET_NR_link:
7670         {
7671             void * p2;
7672             p = lock_user_string(arg1);
7673             p2 = lock_user_string(arg2);
7674             if (!p || !p2)
7675                 ret = -TARGET_EFAULT;
7676             else
7677                 ret = get_errno(link(p, p2));
7678             unlock_user(p2, arg2, 0);
7679             unlock_user(p, arg1, 0);
7680         }
7681         return ret;
7682 #endif
7683 #if defined(TARGET_NR_linkat)
7684     case TARGET_NR_linkat:
7685         {
7686             void * p2 = NULL;
7687             if (!arg2 || !arg4)
7688                 return -TARGET_EFAULT;
7689             p  = lock_user_string(arg2);
7690             p2 = lock_user_string(arg4);
7691             if (!p || !p2)
7692                 ret = -TARGET_EFAULT;
7693             else
7694                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7695             unlock_user(p, arg2, 0);
7696             unlock_user(p2, arg4, 0);
7697         }
7698         return ret;
7699 #endif
7700 #ifdef TARGET_NR_unlink
7701     case TARGET_NR_unlink:
7702         if (!(p = lock_user_string(arg1)))
7703             return -TARGET_EFAULT;
7704         ret = get_errno(unlink(p));
7705         unlock_user(p, arg1, 0);
7706         return ret;
7707 #endif
7708 #if defined(TARGET_NR_unlinkat)
7709     case TARGET_NR_unlinkat:
7710         if (!(p = lock_user_string(arg2)))
7711             return -TARGET_EFAULT;
7712         ret = get_errno(unlinkat(arg1, p, arg3));
7713         unlock_user(p, arg2, 0);
7714         return ret;
7715 #endif
7716     case TARGET_NR_execve:
7717         {
7718             char **argp, **envp;
7719             int argc, envc;
7720             abi_ulong gp;
7721             abi_ulong guest_argp;
7722             abi_ulong guest_envp;
7723             abi_ulong addr;
7724             char **q;
7725             int total_size = 0;
7726 
7727             argc = 0;
7728             guest_argp = arg2;
7729             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7730                 if (get_user_ual(addr, gp))
7731                     return -TARGET_EFAULT;
7732                 if (!addr)
7733                     break;
7734                 argc++;
7735             }
7736             envc = 0;
7737             guest_envp = arg3;
7738             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7739                 if (get_user_ual(addr, gp))
7740                     return -TARGET_EFAULT;
7741                 if (!addr)
7742                     break;
7743                 envc++;
7744             }
7745 
7746             argp = g_new0(char *, argc + 1);
7747             envp = g_new0(char *, envc + 1);
7748 
7749             for (gp = guest_argp, q = argp; gp;
7750                   gp += sizeof(abi_ulong), q++) {
7751                 if (get_user_ual(addr, gp))
7752                     goto execve_efault;
7753                 if (!addr)
7754                     break;
7755                 if (!(*q = lock_user_string(addr)))
7756                     goto execve_efault;
7757                 total_size += strlen(*q) + 1;
7758             }
7759             *q = NULL;
7760 
7761             for (gp = guest_envp, q = envp; gp;
7762                   gp += sizeof(abi_ulong), q++) {
7763                 if (get_user_ual(addr, gp))
7764                     goto execve_efault;
7765                 if (!addr)
7766                     break;
7767                 if (!(*q = lock_user_string(addr)))
7768                     goto execve_efault;
7769                 total_size += strlen(*q) + 1;
7770             }
7771             *q = NULL;
7772 
7773             if (!(p = lock_user_string(arg1)))
7774                 goto execve_efault;
7775             /* Although execve() is not an interruptible syscall it is
7776              * a special case where we must use the safe_syscall wrapper:
7777              * if we allow a signal to happen before we make the host
7778              * syscall then we will 'lose' it, because at the point of
7779              * execve the process leaves QEMU's control. So we use the
7780              * safe syscall wrapper to ensure that we either take the
7781              * signal as a guest signal, or else it does not happen
7782              * before the execve completes and makes it the other
7783              * program's problem.
7784              */
7785             ret = get_errno(safe_execve(p, argp, envp));
7786             unlock_user(p, arg1, 0);
7787 
7788             goto execve_end;
7789 
7790         execve_efault:
7791             ret = -TARGET_EFAULT;
7792 
7793         execve_end:
7794             for (gp = guest_argp, q = argp; *q;
7795                   gp += sizeof(abi_ulong), q++) {
7796                 if (get_user_ual(addr, gp)
7797                     || !addr)
7798                     break;
7799                 unlock_user(*q, addr, 0);
7800             }
7801             for (gp = guest_envp, q = envp; *q;
7802                   gp += sizeof(abi_ulong), q++) {
7803                 if (get_user_ual(addr, gp)
7804                     || !addr)
7805                     break;
7806                 unlock_user(*q, addr, 0);
7807             }
7808 
7809             g_free(argp);
7810             g_free(envp);
7811         }
7812         return ret;
7813     case TARGET_NR_chdir:
7814         if (!(p = lock_user_string(arg1)))
7815             return -TARGET_EFAULT;
7816         ret = get_errno(chdir(p));
7817         unlock_user(p, arg1, 0);
7818         return ret;
7819 #ifdef TARGET_NR_time
7820     case TARGET_NR_time:
7821         {
7822             time_t host_time;
7823             ret = get_errno(time(&host_time));
7824             if (!is_error(ret)
7825                 && arg1
7826                 && put_user_sal(host_time, arg1))
7827                 return -TARGET_EFAULT;
7828         }
7829         return ret;
7830 #endif
7831 #ifdef TARGET_NR_mknod
7832     case TARGET_NR_mknod:
7833         if (!(p = lock_user_string(arg1)))
7834             return -TARGET_EFAULT;
7835         ret = get_errno(mknod(p, arg2, arg3));
7836         unlock_user(p, arg1, 0);
7837         return ret;
7838 #endif
7839 #if defined(TARGET_NR_mknodat)
7840     case TARGET_NR_mknodat:
7841         if (!(p = lock_user_string(arg2)))
7842             return -TARGET_EFAULT;
7843         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7844         unlock_user(p, arg2, 0);
7845         return ret;
7846 #endif
7847 #ifdef TARGET_NR_chmod
7848     case TARGET_NR_chmod:
7849         if (!(p = lock_user_string(arg1)))
7850             return -TARGET_EFAULT;
7851         ret = get_errno(chmod(p, arg2));
7852         unlock_user(p, arg1, 0);
7853         return ret;
7854 #endif
7855 #ifdef TARGET_NR_lseek
7856     case TARGET_NR_lseek:
7857         return get_errno(lseek(arg1, arg2, arg3));
7858 #endif
7859 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7860     /* Alpha specific */
7861     case TARGET_NR_getxpid:
7862         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7863         return get_errno(getpid());
7864 #endif
7865 #ifdef TARGET_NR_getpid
7866     case TARGET_NR_getpid:
7867         return get_errno(getpid());
7868 #endif
7869     case TARGET_NR_mount:
7870         {
7871             /* need to look at the data field */
7872             void *p2, *p3;
7873 
7874             if (arg1) {
7875                 p = lock_user_string(arg1);
7876                 if (!p) {
7877                     return -TARGET_EFAULT;
7878                 }
7879             } else {
7880                 p = NULL;
7881             }
7882 
7883             p2 = lock_user_string(arg2);
7884             if (!p2) {
7885                 if (arg1) {
7886                     unlock_user(p, arg1, 0);
7887                 }
7888                 return -TARGET_EFAULT;
7889             }
7890 
7891             if (arg3) {
7892                 p3 = lock_user_string(arg3);
7893                 if (!p3) {
7894                     if (arg1) {
7895                         unlock_user(p, arg1, 0);
7896                     }
7897                     unlock_user(p2, arg2, 0);
7898                     return -TARGET_EFAULT;
7899                 }
7900             } else {
7901                 p3 = NULL;
7902             }
7903 
7904             /* FIXME - arg5 should be locked, but it isn't clear how to
7905              * do that since it's not guaranteed to be a NULL-terminated
7906              * string.
7907              */
7908             if (!arg5) {
7909                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7910             } else {
7911                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7912             }
7913             ret = get_errno(ret);
7914 
7915             if (arg1) {
7916                 unlock_user(p, arg1, 0);
7917             }
7918             unlock_user(p2, arg2, 0);
7919             if (arg3) {
7920                 unlock_user(p3, arg3, 0);
7921             }
7922         }
7923         return ret;
7924 #ifdef TARGET_NR_umount
7925     case TARGET_NR_umount:
7926         if (!(p = lock_user_string(arg1)))
7927             return -TARGET_EFAULT;
7928         ret = get_errno(umount(p));
7929         unlock_user(p, arg1, 0);
7930         return ret;
7931 #endif
7932 #ifdef TARGET_NR_stime /* not on alpha */
7933     case TARGET_NR_stime:
7934         {
7935             struct timespec ts;
7936             ts.tv_nsec = 0;
7937             if (get_user_sal(ts.tv_sec, arg1)) {
7938                 return -TARGET_EFAULT;
7939             }
7940             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
7941         }
7942 #endif
7943 #ifdef TARGET_NR_alarm /* not on alpha */
7944     case TARGET_NR_alarm:
7945         return alarm(arg1);
7946 #endif
7947 #ifdef TARGET_NR_pause /* not on alpha */
7948     case TARGET_NR_pause:
7949         if (!block_signals()) {
7950             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7951         }
7952         return -TARGET_EINTR;
7953 #endif
7954 #ifdef TARGET_NR_utime
7955     case TARGET_NR_utime:
7956         {
7957             struct utimbuf tbuf, *host_tbuf;
7958             struct target_utimbuf *target_tbuf;
7959             if (arg2) {
7960                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7961                     return -TARGET_EFAULT;
7962                 tbuf.actime = tswapal(target_tbuf->actime);
7963                 tbuf.modtime = tswapal(target_tbuf->modtime);
7964                 unlock_user_struct(target_tbuf, arg2, 0);
7965                 host_tbuf = &tbuf;
7966             } else {
7967                 host_tbuf = NULL;
7968             }
7969             if (!(p = lock_user_string(arg1)))
7970                 return -TARGET_EFAULT;
7971             ret = get_errno(utime(p, host_tbuf));
7972             unlock_user(p, arg1, 0);
7973         }
7974         return ret;
7975 #endif
7976 #ifdef TARGET_NR_utimes
7977     case TARGET_NR_utimes:
7978         {
7979             struct timeval *tvp, tv[2];
7980             if (arg2) {
7981                 if (copy_from_user_timeval(&tv[0], arg2)
7982                     || copy_from_user_timeval(&tv[1],
7983                                               arg2 + sizeof(struct target_timeval)))
7984                     return -TARGET_EFAULT;
7985                 tvp = tv;
7986             } else {
7987                 tvp = NULL;
7988             }
7989             if (!(p = lock_user_string(arg1)))
7990                 return -TARGET_EFAULT;
7991             ret = get_errno(utimes(p, tvp));
7992             unlock_user(p, arg1, 0);
7993         }
7994         return ret;
7995 #endif
7996 #if defined(TARGET_NR_futimesat)
7997     case TARGET_NR_futimesat:
7998         {
7999             struct timeval *tvp, tv[2];
8000             if (arg3) {
8001                 if (copy_from_user_timeval(&tv[0], arg3)
8002                     || copy_from_user_timeval(&tv[1],
8003                                               arg3 + sizeof(struct target_timeval)))
8004                     return -TARGET_EFAULT;
8005                 tvp = tv;
8006             } else {
8007                 tvp = NULL;
8008             }
8009             if (!(p = lock_user_string(arg2))) {
8010                 return -TARGET_EFAULT;
8011             }
8012             ret = get_errno(futimesat(arg1, path(p), tvp));
8013             unlock_user(p, arg2, 0);
8014         }
8015         return ret;
8016 #endif
8017 #ifdef TARGET_NR_access
8018     case TARGET_NR_access:
8019         if (!(p = lock_user_string(arg1))) {
8020             return -TARGET_EFAULT;
8021         }
8022         ret = get_errno(access(path(p), arg2));
8023         unlock_user(p, arg1, 0);
8024         return ret;
8025 #endif
8026 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8027     case TARGET_NR_faccessat:
8028         if (!(p = lock_user_string(arg2))) {
8029             return -TARGET_EFAULT;
8030         }
8031         ret = get_errno(faccessat(arg1, p, arg3, 0));
8032         unlock_user(p, arg2, 0);
8033         return ret;
8034 #endif
8035 #ifdef TARGET_NR_nice /* not on alpha */
8036     case TARGET_NR_nice:
8037         return get_errno(nice(arg1));
8038 #endif
8039     case TARGET_NR_sync:
8040         sync();
8041         return 0;
8042 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8043     case TARGET_NR_syncfs:
8044         return get_errno(syncfs(arg1));
8045 #endif
8046     case TARGET_NR_kill:
8047         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8048 #ifdef TARGET_NR_rename
8049     case TARGET_NR_rename:
8050         {
8051             void *p2;
8052             p = lock_user_string(arg1);
8053             p2 = lock_user_string(arg2);
8054             if (!p || !p2)
8055                 ret = -TARGET_EFAULT;
8056             else
8057                 ret = get_errno(rename(p, p2));
8058             unlock_user(p2, arg2, 0);
8059             unlock_user(p, arg1, 0);
8060         }
8061         return ret;
8062 #endif
8063 #if defined(TARGET_NR_renameat)
8064     case TARGET_NR_renameat:
8065         {
8066             void *p2;
8067             p  = lock_user_string(arg2);
8068             p2 = lock_user_string(arg4);
8069             if (!p || !p2)
8070                 ret = -TARGET_EFAULT;
8071             else
8072                 ret = get_errno(renameat(arg1, p, arg3, p2));
8073             unlock_user(p2, arg4, 0);
8074             unlock_user(p, arg2, 0);
8075         }
8076         return ret;
8077 #endif
8078 #if defined(TARGET_NR_renameat2)
8079     case TARGET_NR_renameat2:
8080         {
8081             void *p2;
8082             p  = lock_user_string(arg2);
8083             p2 = lock_user_string(arg4);
8084             if (!p || !p2) {
8085                 ret = -TARGET_EFAULT;
8086             } else {
8087                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8088             }
8089             unlock_user(p2, arg4, 0);
8090             unlock_user(p, arg2, 0);
8091         }
8092         return ret;
8093 #endif
8094 #ifdef TARGET_NR_mkdir
8095     case TARGET_NR_mkdir:
8096         if (!(p = lock_user_string(arg1)))
8097             return -TARGET_EFAULT;
8098         ret = get_errno(mkdir(p, arg2));
8099         unlock_user(p, arg1, 0);
8100         return ret;
8101 #endif
8102 #if defined(TARGET_NR_mkdirat)
8103     case TARGET_NR_mkdirat:
8104         if (!(p = lock_user_string(arg2)))
8105             return -TARGET_EFAULT;
8106         ret = get_errno(mkdirat(arg1, p, arg3));
8107         unlock_user(p, arg2, 0);
8108         return ret;
8109 #endif
8110 #ifdef TARGET_NR_rmdir
8111     case TARGET_NR_rmdir:
8112         if (!(p = lock_user_string(arg1)))
8113             return -TARGET_EFAULT;
8114         ret = get_errno(rmdir(p));
8115         unlock_user(p, arg1, 0);
8116         return ret;
8117 #endif
8118     case TARGET_NR_dup:
8119         ret = get_errno(dup(arg1));
8120         if (ret >= 0) {
8121             fd_trans_dup(arg1, ret);
8122         }
8123         return ret;
8124 #ifdef TARGET_NR_pipe
8125     case TARGET_NR_pipe:
8126         return do_pipe(cpu_env, arg1, 0, 0);
8127 #endif
8128 #ifdef TARGET_NR_pipe2
8129     case TARGET_NR_pipe2:
8130         return do_pipe(cpu_env, arg1,
8131                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8132 #endif
8133     case TARGET_NR_times:
8134         {
8135             struct target_tms *tmsp;
8136             struct tms tms;
8137             ret = get_errno(times(&tms));
8138             if (arg1) {
8139                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8140                 if (!tmsp)
8141                     return -TARGET_EFAULT;
8142                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8143                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8144                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8145                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8146             }
8147             if (!is_error(ret))
8148                 ret = host_to_target_clock_t(ret);
8149         }
8150         return ret;
8151     case TARGET_NR_acct:
8152         if (arg1 == 0) {
8153             ret = get_errno(acct(NULL));
8154         } else {
8155             if (!(p = lock_user_string(arg1))) {
8156                 return -TARGET_EFAULT;
8157             }
8158             ret = get_errno(acct(path(p)));
8159             unlock_user(p, arg1, 0);
8160         }
8161         return ret;
8162 #ifdef TARGET_NR_umount2
8163     case TARGET_NR_umount2:
8164         if (!(p = lock_user_string(arg1)))
8165             return -TARGET_EFAULT;
8166         ret = get_errno(umount2(p, arg2));
8167         unlock_user(p, arg1, 0);
8168         return ret;
8169 #endif
8170     case TARGET_NR_ioctl:
8171         return do_ioctl(arg1, arg2, arg3);
8172 #ifdef TARGET_NR_fcntl
8173     case TARGET_NR_fcntl:
8174         return do_fcntl(arg1, arg2, arg3);
8175 #endif
8176     case TARGET_NR_setpgid:
8177         return get_errno(setpgid(arg1, arg2));
8178     case TARGET_NR_umask:
8179         return get_errno(umask(arg1));
8180     case TARGET_NR_chroot:
8181         if (!(p = lock_user_string(arg1)))
8182             return -TARGET_EFAULT;
8183         ret = get_errno(chroot(p));
8184         unlock_user(p, arg1, 0);
8185         return ret;
8186 #ifdef TARGET_NR_dup2
8187     case TARGET_NR_dup2:
8188         ret = get_errno(dup2(arg1, arg2));
8189         if (ret >= 0) {
8190             fd_trans_dup(arg1, arg2);
8191         }
8192         return ret;
8193 #endif
8194 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8195     case TARGET_NR_dup3:
8196     {
8197         int host_flags;
8198 
8199         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8200             return -EINVAL;
8201         }
8202         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8203         ret = get_errno(dup3(arg1, arg2, host_flags));
8204         if (ret >= 0) {
8205             fd_trans_dup(arg1, arg2);
8206         }
8207         return ret;
8208     }
8209 #endif
8210 #ifdef TARGET_NR_getppid /* not on alpha */
8211     case TARGET_NR_getppid:
8212         return get_errno(getppid());
8213 #endif
8214 #ifdef TARGET_NR_getpgrp
8215     case TARGET_NR_getpgrp:
8216         return get_errno(getpgrp());
8217 #endif
8218     case TARGET_NR_setsid:
8219         return get_errno(setsid());
8220 #ifdef TARGET_NR_sigaction
8221     case TARGET_NR_sigaction:
8222         {
8223 #if defined(TARGET_ALPHA)
8224             struct target_sigaction act, oact, *pact = 0;
8225             struct target_old_sigaction *old_act;
8226             if (arg2) {
8227                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8228                     return -TARGET_EFAULT;
8229                 act._sa_handler = old_act->_sa_handler;
8230                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8231                 act.sa_flags = old_act->sa_flags;
8232                 act.sa_restorer = 0;
8233                 unlock_user_struct(old_act, arg2, 0);
8234                 pact = &act;
8235             }
8236             ret = get_errno(do_sigaction(arg1, pact, &oact));
8237             if (!is_error(ret) && arg3) {
8238                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8239                     return -TARGET_EFAULT;
8240                 old_act->_sa_handler = oact._sa_handler;
8241                 old_act->sa_mask = oact.sa_mask.sig[0];
8242                 old_act->sa_flags = oact.sa_flags;
8243                 unlock_user_struct(old_act, arg3, 1);
8244             }
8245 #elif defined(TARGET_MIPS)
8246 	    struct target_sigaction act, oact, *pact, *old_act;
8247 
8248 	    if (arg2) {
8249                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8250                     return -TARGET_EFAULT;
8251 		act._sa_handler = old_act->_sa_handler;
8252 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8253 		act.sa_flags = old_act->sa_flags;
8254 		unlock_user_struct(old_act, arg2, 0);
8255 		pact = &act;
8256 	    } else {
8257 		pact = NULL;
8258 	    }
8259 
8260 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8261 
8262 	    if (!is_error(ret) && arg3) {
8263                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8264                     return -TARGET_EFAULT;
8265 		old_act->_sa_handler = oact._sa_handler;
8266 		old_act->sa_flags = oact.sa_flags;
8267 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8268 		old_act->sa_mask.sig[1] = 0;
8269 		old_act->sa_mask.sig[2] = 0;
8270 		old_act->sa_mask.sig[3] = 0;
8271 		unlock_user_struct(old_act, arg3, 1);
8272 	    }
8273 #else
8274             struct target_old_sigaction *old_act;
8275             struct target_sigaction act, oact, *pact;
8276             if (arg2) {
8277                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8278                     return -TARGET_EFAULT;
8279                 act._sa_handler = old_act->_sa_handler;
8280                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8281                 act.sa_flags = old_act->sa_flags;
8282                 act.sa_restorer = old_act->sa_restorer;
8283 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8284                 act.ka_restorer = 0;
8285 #endif
8286                 unlock_user_struct(old_act, arg2, 0);
8287                 pact = &act;
8288             } else {
8289                 pact = NULL;
8290             }
8291             ret = get_errno(do_sigaction(arg1, pact, &oact));
8292             if (!is_error(ret) && arg3) {
8293                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8294                     return -TARGET_EFAULT;
8295                 old_act->_sa_handler = oact._sa_handler;
8296                 old_act->sa_mask = oact.sa_mask.sig[0];
8297                 old_act->sa_flags = oact.sa_flags;
8298                 old_act->sa_restorer = oact.sa_restorer;
8299                 unlock_user_struct(old_act, arg3, 1);
8300             }
8301 #endif
8302         }
8303         return ret;
8304 #endif
8305     case TARGET_NR_rt_sigaction:
8306         {
8307 #if defined(TARGET_ALPHA)
8308             /* For Alpha and SPARC this is a 5 argument syscall, with
8309              * a 'restorer' parameter which must be copied into the
8310              * sa_restorer field of the sigaction struct.
8311              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8312              * and arg5 is the sigsetsize.
8313              * Alpha also has a separate rt_sigaction struct that it uses
8314              * here; SPARC uses the usual sigaction struct.
8315              */
8316             struct target_rt_sigaction *rt_act;
8317             struct target_sigaction act, oact, *pact = 0;
8318 
8319             if (arg4 != sizeof(target_sigset_t)) {
8320                 return -TARGET_EINVAL;
8321             }
8322             if (arg2) {
8323                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8324                     return -TARGET_EFAULT;
8325                 act._sa_handler = rt_act->_sa_handler;
8326                 act.sa_mask = rt_act->sa_mask;
8327                 act.sa_flags = rt_act->sa_flags;
8328                 act.sa_restorer = arg5;
8329                 unlock_user_struct(rt_act, arg2, 0);
8330                 pact = &act;
8331             }
8332             ret = get_errno(do_sigaction(arg1, pact, &oact));
8333             if (!is_error(ret) && arg3) {
8334                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8335                     return -TARGET_EFAULT;
8336                 rt_act->_sa_handler = oact._sa_handler;
8337                 rt_act->sa_mask = oact.sa_mask;
8338                 rt_act->sa_flags = oact.sa_flags;
8339                 unlock_user_struct(rt_act, arg3, 1);
8340             }
8341 #else
8342 #ifdef TARGET_SPARC
8343             target_ulong restorer = arg4;
8344             target_ulong sigsetsize = arg5;
8345 #else
8346             target_ulong sigsetsize = arg4;
8347 #endif
8348             struct target_sigaction *act;
8349             struct target_sigaction *oact;
8350 
8351             if (sigsetsize != sizeof(target_sigset_t)) {
8352                 return -TARGET_EINVAL;
8353             }
8354             if (arg2) {
8355                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8356                     return -TARGET_EFAULT;
8357                 }
8358 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8359                 act->ka_restorer = restorer;
8360 #endif
8361             } else {
8362                 act = NULL;
8363             }
8364             if (arg3) {
8365                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8366                     ret = -TARGET_EFAULT;
8367                     goto rt_sigaction_fail;
8368                 }
8369             } else
8370                 oact = NULL;
8371             ret = get_errno(do_sigaction(arg1, act, oact));
8372 	rt_sigaction_fail:
8373             if (act)
8374                 unlock_user_struct(act, arg2, 0);
8375             if (oact)
8376                 unlock_user_struct(oact, arg3, 1);
8377 #endif
8378         }
8379         return ret;
8380 #ifdef TARGET_NR_sgetmask /* not on alpha */
8381     case TARGET_NR_sgetmask:
8382         {
8383             sigset_t cur_set;
8384             abi_ulong target_set;
8385             ret = do_sigprocmask(0, NULL, &cur_set);
8386             if (!ret) {
8387                 host_to_target_old_sigset(&target_set, &cur_set);
8388                 ret = target_set;
8389             }
8390         }
8391         return ret;
8392 #endif
8393 #ifdef TARGET_NR_ssetmask /* not on alpha */
8394     case TARGET_NR_ssetmask:
8395         {
8396             sigset_t set, oset;
8397             abi_ulong target_set = arg1;
8398             target_to_host_old_sigset(&set, &target_set);
8399             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8400             if (!ret) {
8401                 host_to_target_old_sigset(&target_set, &oset);
8402                 ret = target_set;
8403             }
8404         }
8405         return ret;
8406 #endif
8407 #ifdef TARGET_NR_sigprocmask
8408     case TARGET_NR_sigprocmask:
8409         {
8410 #if defined(TARGET_ALPHA)
8411             sigset_t set, oldset;
8412             abi_ulong mask;
8413             int how;
8414 
8415             switch (arg1) {
8416             case TARGET_SIG_BLOCK:
8417                 how = SIG_BLOCK;
8418                 break;
8419             case TARGET_SIG_UNBLOCK:
8420                 how = SIG_UNBLOCK;
8421                 break;
8422             case TARGET_SIG_SETMASK:
8423                 how = SIG_SETMASK;
8424                 break;
8425             default:
8426                 return -TARGET_EINVAL;
8427             }
8428             mask = arg2;
8429             target_to_host_old_sigset(&set, &mask);
8430 
8431             ret = do_sigprocmask(how, &set, &oldset);
8432             if (!is_error(ret)) {
8433                 host_to_target_old_sigset(&mask, &oldset);
8434                 ret = mask;
8435                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8436             }
8437 #else
8438             sigset_t set, oldset, *set_ptr;
8439             int how;
8440 
8441             if (arg2) {
8442                 switch (arg1) {
8443                 case TARGET_SIG_BLOCK:
8444                     how = SIG_BLOCK;
8445                     break;
8446                 case TARGET_SIG_UNBLOCK:
8447                     how = SIG_UNBLOCK;
8448                     break;
8449                 case TARGET_SIG_SETMASK:
8450                     how = SIG_SETMASK;
8451                     break;
8452                 default:
8453                     return -TARGET_EINVAL;
8454                 }
8455                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8456                     return -TARGET_EFAULT;
8457                 target_to_host_old_sigset(&set, p);
8458                 unlock_user(p, arg2, 0);
8459                 set_ptr = &set;
8460             } else {
8461                 how = 0;
8462                 set_ptr = NULL;
8463             }
8464             ret = do_sigprocmask(how, set_ptr, &oldset);
8465             if (!is_error(ret) && arg3) {
8466                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8467                     return -TARGET_EFAULT;
8468                 host_to_target_old_sigset(p, &oldset);
8469                 unlock_user(p, arg3, sizeof(target_sigset_t));
8470             }
8471 #endif
8472         }
8473         return ret;
8474 #endif
8475     case TARGET_NR_rt_sigprocmask:
8476         {
8477             int how = arg1;
8478             sigset_t set, oldset, *set_ptr;
8479 
8480             if (arg4 != sizeof(target_sigset_t)) {
8481                 return -TARGET_EINVAL;
8482             }
8483 
8484             if (arg2) {
8485                 switch(how) {
8486                 case TARGET_SIG_BLOCK:
8487                     how = SIG_BLOCK;
8488                     break;
8489                 case TARGET_SIG_UNBLOCK:
8490                     how = SIG_UNBLOCK;
8491                     break;
8492                 case TARGET_SIG_SETMASK:
8493                     how = SIG_SETMASK;
8494                     break;
8495                 default:
8496                     return -TARGET_EINVAL;
8497                 }
8498                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8499                     return -TARGET_EFAULT;
8500                 target_to_host_sigset(&set, p);
8501                 unlock_user(p, arg2, 0);
8502                 set_ptr = &set;
8503             } else {
8504                 how = 0;
8505                 set_ptr = NULL;
8506             }
8507             ret = do_sigprocmask(how, set_ptr, &oldset);
8508             if (!is_error(ret) && arg3) {
8509                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8510                     return -TARGET_EFAULT;
8511                 host_to_target_sigset(p, &oldset);
8512                 unlock_user(p, arg3, sizeof(target_sigset_t));
8513             }
8514         }
8515         return ret;
8516 #ifdef TARGET_NR_sigpending
8517     case TARGET_NR_sigpending:
8518         {
8519             sigset_t set;
8520             ret = get_errno(sigpending(&set));
8521             if (!is_error(ret)) {
8522                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8523                     return -TARGET_EFAULT;
8524                 host_to_target_old_sigset(p, &set);
8525                 unlock_user(p, arg1, sizeof(target_sigset_t));
8526             }
8527         }
8528         return ret;
8529 #endif
8530     case TARGET_NR_rt_sigpending:
8531         {
8532             sigset_t set;
8533 
8534             /* Yes, this check is >, not != like most. We follow the kernel's
8535              * logic and it does it like this because it implements
8536              * NR_sigpending through the same code path, and in that case
8537              * the old_sigset_t is smaller in size.
8538              */
8539             if (arg2 > sizeof(target_sigset_t)) {
8540                 return -TARGET_EINVAL;
8541             }
8542 
8543             ret = get_errno(sigpending(&set));
8544             if (!is_error(ret)) {
8545                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8546                     return -TARGET_EFAULT;
8547                 host_to_target_sigset(p, &set);
8548                 unlock_user(p, arg1, sizeof(target_sigset_t));
8549             }
8550         }
8551         return ret;
8552 #ifdef TARGET_NR_sigsuspend
8553     case TARGET_NR_sigsuspend:
8554         {
8555             TaskState *ts = cpu->opaque;
8556 #if defined(TARGET_ALPHA)
8557             abi_ulong mask = arg1;
8558             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8559 #else
8560             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8561                 return -TARGET_EFAULT;
8562             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8563             unlock_user(p, arg1, 0);
8564 #endif
8565             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8566                                                SIGSET_T_SIZE));
8567             if (ret != -TARGET_ERESTARTSYS) {
8568                 ts->in_sigsuspend = 1;
8569             }
8570         }
8571         return ret;
8572 #endif
8573     case TARGET_NR_rt_sigsuspend:
8574         {
8575             TaskState *ts = cpu->opaque;
8576 
8577             if (arg2 != sizeof(target_sigset_t)) {
8578                 return -TARGET_EINVAL;
8579             }
8580             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8581                 return -TARGET_EFAULT;
8582             target_to_host_sigset(&ts->sigsuspend_mask, p);
8583             unlock_user(p, arg1, 0);
8584             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8585                                                SIGSET_T_SIZE));
8586             if (ret != -TARGET_ERESTARTSYS) {
8587                 ts->in_sigsuspend = 1;
8588             }
8589         }
8590         return ret;
8591 #ifdef TARGET_NR_rt_sigtimedwait
8592     case TARGET_NR_rt_sigtimedwait:
8593         {
8594             sigset_t set;
8595             struct timespec uts, *puts;
8596             siginfo_t uinfo;
8597 
8598             if (arg4 != sizeof(target_sigset_t)) {
8599                 return -TARGET_EINVAL;
8600             }
8601 
8602             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8603                 return -TARGET_EFAULT;
8604             target_to_host_sigset(&set, p);
8605             unlock_user(p, arg1, 0);
8606             if (arg3) {
8607                 puts = &uts;
8608                 target_to_host_timespec(puts, arg3);
8609             } else {
8610                 puts = NULL;
8611             }
8612             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8613                                                  SIGSET_T_SIZE));
8614             if (!is_error(ret)) {
8615                 if (arg2) {
8616                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8617                                   0);
8618                     if (!p) {
8619                         return -TARGET_EFAULT;
8620                     }
8621                     host_to_target_siginfo(p, &uinfo);
8622                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8623                 }
8624                 ret = host_to_target_signal(ret);
8625             }
8626         }
8627         return ret;
8628 #endif
8629     case TARGET_NR_rt_sigqueueinfo:
8630         {
8631             siginfo_t uinfo;
8632 
8633             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8634             if (!p) {
8635                 return -TARGET_EFAULT;
8636             }
8637             target_to_host_siginfo(&uinfo, p);
8638             unlock_user(p, arg3, 0);
8639             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8640         }
8641         return ret;
8642     case TARGET_NR_rt_tgsigqueueinfo:
8643         {
8644             siginfo_t uinfo;
8645 
8646             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8647             if (!p) {
8648                 return -TARGET_EFAULT;
8649             }
8650             target_to_host_siginfo(&uinfo, p);
8651             unlock_user(p, arg4, 0);
8652             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8653         }
8654         return ret;
8655 #ifdef TARGET_NR_sigreturn
8656     case TARGET_NR_sigreturn:
8657         if (block_signals()) {
8658             return -TARGET_ERESTARTSYS;
8659         }
8660         return do_sigreturn(cpu_env);
8661 #endif
8662     case TARGET_NR_rt_sigreturn:
8663         if (block_signals()) {
8664             return -TARGET_ERESTARTSYS;
8665         }
8666         return do_rt_sigreturn(cpu_env);
8667     case TARGET_NR_sethostname:
8668         if (!(p = lock_user_string(arg1)))
8669             return -TARGET_EFAULT;
8670         ret = get_errno(sethostname(p, arg2));
8671         unlock_user(p, arg1, 0);
8672         return ret;
8673 #ifdef TARGET_NR_setrlimit
8674     case TARGET_NR_setrlimit:
8675         {
8676             int resource = target_to_host_resource(arg1);
8677             struct target_rlimit *target_rlim;
8678             struct rlimit rlim;
8679             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8680                 return -TARGET_EFAULT;
8681             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8682             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8683             unlock_user_struct(target_rlim, arg2, 0);
8684             /*
8685              * If we just passed through resource limit settings for memory then
8686              * they would also apply to QEMU's own allocations, and QEMU will
8687              * crash or hang or die if its allocations fail. Ideally we would
8688              * track the guest allocations in QEMU and apply the limits ourselves.
8689              * For now, just tell the guest the call succeeded but don't actually
8690              * limit anything.
8691              */
8692             if (resource != RLIMIT_AS &&
8693                 resource != RLIMIT_DATA &&
8694                 resource != RLIMIT_STACK) {
8695                 return get_errno(setrlimit(resource, &rlim));
8696             } else {
8697                 return 0;
8698             }
8699         }
8700 #endif
8701 #ifdef TARGET_NR_getrlimit
8702     case TARGET_NR_getrlimit:
8703         {
8704             int resource = target_to_host_resource(arg1);
8705             struct target_rlimit *target_rlim;
8706             struct rlimit rlim;
8707 
8708             ret = get_errno(getrlimit(resource, &rlim));
8709             if (!is_error(ret)) {
8710                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8711                     return -TARGET_EFAULT;
8712                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8713                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8714                 unlock_user_struct(target_rlim, arg2, 1);
8715             }
8716         }
8717         return ret;
8718 #endif
8719     case TARGET_NR_getrusage:
8720         {
8721             struct rusage rusage;
8722             ret = get_errno(getrusage(arg1, &rusage));
8723             if (!is_error(ret)) {
8724                 ret = host_to_target_rusage(arg2, &rusage);
8725             }
8726         }
8727         return ret;
8728 #if defined(TARGET_NR_gettimeofday)
8729     case TARGET_NR_gettimeofday:
8730         {
8731             struct timeval tv;
8732             struct timezone tz;
8733 
8734             ret = get_errno(gettimeofday(&tv, &tz));
8735             if (!is_error(ret)) {
8736                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
8737                     return -TARGET_EFAULT;
8738                 }
8739                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
8740                     return -TARGET_EFAULT;
8741                 }
8742             }
8743         }
8744         return ret;
8745 #endif
8746 #if defined(TARGET_NR_settimeofday)
8747     case TARGET_NR_settimeofday:
8748         {
8749             struct timeval tv, *ptv = NULL;
8750             struct timezone tz, *ptz = NULL;
8751 
8752             if (arg1) {
8753                 if (copy_from_user_timeval(&tv, arg1)) {
8754                     return -TARGET_EFAULT;
8755                 }
8756                 ptv = &tv;
8757             }
8758 
8759             if (arg2) {
8760                 if (copy_from_user_timezone(&tz, arg2)) {
8761                     return -TARGET_EFAULT;
8762                 }
8763                 ptz = &tz;
8764             }
8765 
8766             return get_errno(settimeofday(ptv, ptz));
8767         }
8768 #endif
8769 #if defined(TARGET_NR_select)
8770     case TARGET_NR_select:
8771 #if defined(TARGET_WANT_NI_OLD_SELECT)
8772         /* some architectures used to have old_select here
8773          * but now ENOSYS it.
8774          */
8775         ret = -TARGET_ENOSYS;
8776 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8777         ret = do_old_select(arg1);
8778 #else
8779         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8780 #endif
8781         return ret;
8782 #endif
8783 #ifdef TARGET_NR_pselect6
8784     case TARGET_NR_pselect6:
8785         {
8786             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8787             fd_set rfds, wfds, efds;
8788             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8789             struct timespec ts, *ts_ptr;
8790 
8791             /*
8792              * The 6th arg is actually two args smashed together,
8793              * so we cannot use the C library.
8794              */
8795             sigset_t set;
8796             struct {
8797                 sigset_t *set;
8798                 size_t size;
8799             } sig, *sig_ptr;
8800 
8801             abi_ulong arg_sigset, arg_sigsize, *arg7;
8802             target_sigset_t *target_sigset;
8803 
8804             n = arg1;
8805             rfd_addr = arg2;
8806             wfd_addr = arg3;
8807             efd_addr = arg4;
8808             ts_addr = arg5;
8809 
8810             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8811             if (ret) {
8812                 return ret;
8813             }
8814             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8815             if (ret) {
8816                 return ret;
8817             }
8818             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8819             if (ret) {
8820                 return ret;
8821             }
8822 
8823             /*
8824              * This takes a timespec, and not a timeval, so we cannot
8825              * use the do_select() helper ...
8826              */
8827             if (ts_addr) {
8828                 if (target_to_host_timespec(&ts, ts_addr)) {
8829                     return -TARGET_EFAULT;
8830                 }
8831                 ts_ptr = &ts;
8832             } else {
8833                 ts_ptr = NULL;
8834             }
8835 
8836             /* Extract the two packed args for the sigset */
8837             if (arg6) {
8838                 sig_ptr = &sig;
8839                 sig.size = SIGSET_T_SIZE;
8840 
8841                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8842                 if (!arg7) {
8843                     return -TARGET_EFAULT;
8844                 }
8845                 arg_sigset = tswapal(arg7[0]);
8846                 arg_sigsize = tswapal(arg7[1]);
8847                 unlock_user(arg7, arg6, 0);
8848 
8849                 if (arg_sigset) {
8850                     sig.set = &set;
8851                     if (arg_sigsize != sizeof(*target_sigset)) {
8852                         /* Like the kernel, we enforce correct size sigsets */
8853                         return -TARGET_EINVAL;
8854                     }
8855                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8856                                               sizeof(*target_sigset), 1);
8857                     if (!target_sigset) {
8858                         return -TARGET_EFAULT;
8859                     }
8860                     target_to_host_sigset(&set, target_sigset);
8861                     unlock_user(target_sigset, arg_sigset, 0);
8862                 } else {
8863                     sig.set = NULL;
8864                 }
8865             } else {
8866                 sig_ptr = NULL;
8867             }
8868 
8869             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8870                                           ts_ptr, sig_ptr));
8871 
8872             if (!is_error(ret)) {
8873                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8874                     return -TARGET_EFAULT;
8875                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8876                     return -TARGET_EFAULT;
8877                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8878                     return -TARGET_EFAULT;
8879 
8880                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8881                     return -TARGET_EFAULT;
8882             }
8883         }
8884         return ret;
8885 #endif
8886 #ifdef TARGET_NR_symlink
8887     case TARGET_NR_symlink:
8888         {
8889             void *p2;
8890             p = lock_user_string(arg1);
8891             p2 = lock_user_string(arg2);
8892             if (!p || !p2)
8893                 ret = -TARGET_EFAULT;
8894             else
8895                 ret = get_errno(symlink(p, p2));
8896             unlock_user(p2, arg2, 0);
8897             unlock_user(p, arg1, 0);
8898         }
8899         return ret;
8900 #endif
8901 #if defined(TARGET_NR_symlinkat)
8902     case TARGET_NR_symlinkat:
8903         {
8904             void *p2;
8905             p  = lock_user_string(arg1);
8906             p2 = lock_user_string(arg3);
8907             if (!p || !p2)
8908                 ret = -TARGET_EFAULT;
8909             else
8910                 ret = get_errno(symlinkat(p, arg2, p2));
8911             unlock_user(p2, arg3, 0);
8912             unlock_user(p, arg1, 0);
8913         }
8914         return ret;
8915 #endif
8916 #ifdef TARGET_NR_readlink
8917     case TARGET_NR_readlink:
8918         {
8919             void *p2;
8920             p = lock_user_string(arg1);
8921             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8922             if (!p || !p2) {
8923                 ret = -TARGET_EFAULT;
8924             } else if (!arg3) {
8925                 /* Short circuit this for the magic exe check. */
8926                 ret = -TARGET_EINVAL;
8927             } else if (is_proc_myself((const char *)p, "exe")) {
8928                 char real[PATH_MAX], *temp;
8929                 temp = realpath(exec_path, real);
8930                 /* Return value is # of bytes that we wrote to the buffer. */
8931                 if (temp == NULL) {
8932                     ret = get_errno(-1);
8933                 } else {
8934                     /* Don't worry about sign mismatch as earlier mapping
8935                      * logic would have thrown a bad address error. */
8936                     ret = MIN(strlen(real), arg3);
8937                     /* We cannot NUL terminate the string. */
8938                     memcpy(p2, real, ret);
8939                 }
8940             } else {
8941                 ret = get_errno(readlink(path(p), p2, arg3));
8942             }
8943             unlock_user(p2, arg2, ret);
8944             unlock_user(p, arg1, 0);
8945         }
8946         return ret;
8947 #endif
8948 #if defined(TARGET_NR_readlinkat)
8949     case TARGET_NR_readlinkat:
8950         {
8951             void *p2;
8952             p  = lock_user_string(arg2);
8953             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8954             if (!p || !p2) {
8955                 ret = -TARGET_EFAULT;
8956             } else if (is_proc_myself((const char *)p, "exe")) {
8957                 char real[PATH_MAX], *temp;
8958                 temp = realpath(exec_path, real);
8959                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8960                 snprintf((char *)p2, arg4, "%s", real);
8961             } else {
8962                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8963             }
8964             unlock_user(p2, arg3, ret);
8965             unlock_user(p, arg2, 0);
8966         }
8967         return ret;
8968 #endif
8969 #ifdef TARGET_NR_swapon
8970     case TARGET_NR_swapon:
8971         if (!(p = lock_user_string(arg1)))
8972             return -TARGET_EFAULT;
8973         ret = get_errno(swapon(p, arg2));
8974         unlock_user(p, arg1, 0);
8975         return ret;
8976 #endif
8977     case TARGET_NR_reboot:
8978         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8979            /* arg4 must be ignored in all other cases */
8980            p = lock_user_string(arg4);
8981            if (!p) {
8982                return -TARGET_EFAULT;
8983            }
8984            ret = get_errno(reboot(arg1, arg2, arg3, p));
8985            unlock_user(p, arg4, 0);
8986         } else {
8987            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8988         }
8989         return ret;
8990 #ifdef TARGET_NR_mmap
8991     case TARGET_NR_mmap:
8992 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8993     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8994     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8995     || defined(TARGET_S390X)
8996         {
8997             abi_ulong *v;
8998             abi_ulong v1, v2, v3, v4, v5, v6;
8999             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9000                 return -TARGET_EFAULT;
9001             v1 = tswapal(v[0]);
9002             v2 = tswapal(v[1]);
9003             v3 = tswapal(v[2]);
9004             v4 = tswapal(v[3]);
9005             v5 = tswapal(v[4]);
9006             v6 = tswapal(v[5]);
9007             unlock_user(v, arg1, 0);
9008             ret = get_errno(target_mmap(v1, v2, v3,
9009                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9010                                         v5, v6));
9011         }
9012 #else
9013         ret = get_errno(target_mmap(arg1, arg2, arg3,
9014                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9015                                     arg5,
9016                                     arg6));
9017 #endif
9018         return ret;
9019 #endif
9020 #ifdef TARGET_NR_mmap2
9021     case TARGET_NR_mmap2:
9022 #ifndef MMAP_SHIFT
9023 #define MMAP_SHIFT 12
9024 #endif
9025         ret = target_mmap(arg1, arg2, arg3,
9026                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9027                           arg5, arg6 << MMAP_SHIFT);
9028         return get_errno(ret);
9029 #endif
9030     case TARGET_NR_munmap:
9031         return get_errno(target_munmap(arg1, arg2));
9032     case TARGET_NR_mprotect:
9033         {
9034             TaskState *ts = cpu->opaque;
9035             /* Special hack to detect libc making the stack executable.  */
9036             if ((arg3 & PROT_GROWSDOWN)
9037                 && arg1 >= ts->info->stack_limit
9038                 && arg1 <= ts->info->start_stack) {
9039                 arg3 &= ~PROT_GROWSDOWN;
9040                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9041                 arg1 = ts->info->stack_limit;
9042             }
9043         }
9044         return get_errno(target_mprotect(arg1, arg2, arg3));
9045 #ifdef TARGET_NR_mremap
9046     case TARGET_NR_mremap:
9047         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9048 #endif
9049         /* ??? msync/mlock/munlock are broken for softmmu.  */
9050 #ifdef TARGET_NR_msync
9051     case TARGET_NR_msync:
9052         return get_errno(msync(g2h(arg1), arg2, arg3));
9053 #endif
9054 #ifdef TARGET_NR_mlock
9055     case TARGET_NR_mlock:
9056         return get_errno(mlock(g2h(arg1), arg2));
9057 #endif
9058 #ifdef TARGET_NR_munlock
9059     case TARGET_NR_munlock:
9060         return get_errno(munlock(g2h(arg1), arg2));
9061 #endif
9062 #ifdef TARGET_NR_mlockall
9063     case TARGET_NR_mlockall:
9064         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9065 #endif
9066 #ifdef TARGET_NR_munlockall
9067     case TARGET_NR_munlockall:
9068         return get_errno(munlockall());
9069 #endif
9070 #ifdef TARGET_NR_truncate
9071     case TARGET_NR_truncate:
9072         if (!(p = lock_user_string(arg1)))
9073             return -TARGET_EFAULT;
9074         ret = get_errno(truncate(p, arg2));
9075         unlock_user(p, arg1, 0);
9076         return ret;
9077 #endif
9078 #ifdef TARGET_NR_ftruncate
9079     case TARGET_NR_ftruncate:
9080         return get_errno(ftruncate(arg1, arg2));
9081 #endif
9082     case TARGET_NR_fchmod:
9083         return get_errno(fchmod(arg1, arg2));
9084 #if defined(TARGET_NR_fchmodat)
9085     case TARGET_NR_fchmodat:
9086         if (!(p = lock_user_string(arg2)))
9087             return -TARGET_EFAULT;
9088         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9089         unlock_user(p, arg2, 0);
9090         return ret;
9091 #endif
9092     case TARGET_NR_getpriority:
9093         /* Note that negative values are valid for getpriority, so we must
9094            differentiate based on errno settings.  */
9095         errno = 0;
9096         ret = getpriority(arg1, arg2);
9097         if (ret == -1 && errno != 0) {
9098             return -host_to_target_errno(errno);
9099         }
9100 #ifdef TARGET_ALPHA
9101         /* Return value is the unbiased priority.  Signal no error.  */
9102         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9103 #else
9104         /* Return value is a biased priority to avoid negative numbers.  */
9105         ret = 20 - ret;
9106 #endif
9107         return ret;
9108     case TARGET_NR_setpriority:
9109         return get_errno(setpriority(arg1, arg2, arg3));
9110 #ifdef TARGET_NR_statfs
9111     case TARGET_NR_statfs:
9112         if (!(p = lock_user_string(arg1))) {
9113             return -TARGET_EFAULT;
9114         }
9115         ret = get_errno(statfs(path(p), &stfs));
9116         unlock_user(p, arg1, 0);
9117     convert_statfs:
9118         if (!is_error(ret)) {
9119             struct target_statfs *target_stfs;
9120 
9121             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9122                 return -TARGET_EFAULT;
9123             __put_user(stfs.f_type, &target_stfs->f_type);
9124             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9125             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9126             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9127             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9128             __put_user(stfs.f_files, &target_stfs->f_files);
9129             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9130             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9131             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9132             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9133             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9134 #ifdef _STATFS_F_FLAGS
9135             __put_user(stfs.f_flags, &target_stfs->f_flags);
9136 #else
9137             __put_user(0, &target_stfs->f_flags);
9138 #endif
9139             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9140             unlock_user_struct(target_stfs, arg2, 1);
9141         }
9142         return ret;
9143 #endif
9144 #ifdef TARGET_NR_fstatfs
9145     case TARGET_NR_fstatfs:
9146         ret = get_errno(fstatfs(arg1, &stfs));
9147         goto convert_statfs;
9148 #endif
9149 #ifdef TARGET_NR_statfs64
9150     case TARGET_NR_statfs64:
9151         if (!(p = lock_user_string(arg1))) {
9152             return -TARGET_EFAULT;
9153         }
9154         ret = get_errno(statfs(path(p), &stfs));
9155         unlock_user(p, arg1, 0);
9156     convert_statfs64:
9157         if (!is_error(ret)) {
9158             struct target_statfs64 *target_stfs;
9159 
9160             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9161                 return -TARGET_EFAULT;
9162             __put_user(stfs.f_type, &target_stfs->f_type);
9163             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9164             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9165             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9166             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9167             __put_user(stfs.f_files, &target_stfs->f_files);
9168             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9169             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9170             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9171             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9172             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9173             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9174             unlock_user_struct(target_stfs, arg3, 1);
9175         }
9176         return ret;
9177     case TARGET_NR_fstatfs64:
9178         ret = get_errno(fstatfs(arg1, &stfs));
9179         goto convert_statfs64;
9180 #endif
9181 #ifdef TARGET_NR_socketcall
9182     case TARGET_NR_socketcall:
9183         return do_socketcall(arg1, arg2);
9184 #endif
9185 #ifdef TARGET_NR_accept
9186     case TARGET_NR_accept:
9187         return do_accept4(arg1, arg2, arg3, 0);
9188 #endif
9189 #ifdef TARGET_NR_accept4
9190     case TARGET_NR_accept4:
9191         return do_accept4(arg1, arg2, arg3, arg4);
9192 #endif
9193 #ifdef TARGET_NR_bind
9194     case TARGET_NR_bind:
9195         return do_bind(arg1, arg2, arg3);
9196 #endif
9197 #ifdef TARGET_NR_connect
9198     case TARGET_NR_connect:
9199         return do_connect(arg1, arg2, arg3);
9200 #endif
9201 #ifdef TARGET_NR_getpeername
9202     case TARGET_NR_getpeername:
9203         return do_getpeername(arg1, arg2, arg3);
9204 #endif
9205 #ifdef TARGET_NR_getsockname
9206     case TARGET_NR_getsockname:
9207         return do_getsockname(arg1, arg2, arg3);
9208 #endif
9209 #ifdef TARGET_NR_getsockopt
9210     case TARGET_NR_getsockopt:
9211         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9212 #endif
9213 #ifdef TARGET_NR_listen
9214     case TARGET_NR_listen:
9215         return get_errno(listen(arg1, arg2));
9216 #endif
9217 #ifdef TARGET_NR_recv
9218     case TARGET_NR_recv:
9219         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9220 #endif
9221 #ifdef TARGET_NR_recvfrom
9222     case TARGET_NR_recvfrom:
9223         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9224 #endif
9225 #ifdef TARGET_NR_recvmsg
9226     case TARGET_NR_recvmsg:
9227         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9228 #endif
9229 #ifdef TARGET_NR_send
9230     case TARGET_NR_send:
9231         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9232 #endif
9233 #ifdef TARGET_NR_sendmsg
9234     case TARGET_NR_sendmsg:
9235         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9236 #endif
9237 #ifdef TARGET_NR_sendmmsg
9238     case TARGET_NR_sendmmsg:
9239         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9240 #endif
9241 #ifdef TARGET_NR_recvmmsg
9242     case TARGET_NR_recvmmsg:
9243         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9244 #endif
9245 #ifdef TARGET_NR_sendto
9246     case TARGET_NR_sendto:
9247         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9248 #endif
9249 #ifdef TARGET_NR_shutdown
9250     case TARGET_NR_shutdown:
9251         return get_errno(shutdown(arg1, arg2));
9252 #endif
9253 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9254     case TARGET_NR_getrandom:
9255         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9256         if (!p) {
9257             return -TARGET_EFAULT;
9258         }
9259         ret = get_errno(getrandom(p, arg2, arg3));
9260         unlock_user(p, arg1, ret);
9261         return ret;
9262 #endif
9263 #ifdef TARGET_NR_socket
9264     case TARGET_NR_socket:
9265         return do_socket(arg1, arg2, arg3);
9266 #endif
9267 #ifdef TARGET_NR_socketpair
9268     case TARGET_NR_socketpair:
9269         return do_socketpair(arg1, arg2, arg3, arg4);
9270 #endif
9271 #ifdef TARGET_NR_setsockopt
9272     case TARGET_NR_setsockopt:
9273         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9274 #endif
9275 #if defined(TARGET_NR_syslog)
9276     case TARGET_NR_syslog:
9277         {
9278             int len = arg2;
9279 
9280             switch (arg1) {
9281             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9282             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9283             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9284             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9285             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9286             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9287             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9288             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9289                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9290             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9291             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9292             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9293                 {
9294                     if (len < 0) {
9295                         return -TARGET_EINVAL;
9296                     }
9297                     if (len == 0) {
9298                         return 0;
9299                     }
9300                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9301                     if (!p) {
9302                         return -TARGET_EFAULT;
9303                     }
9304                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9305                     unlock_user(p, arg2, arg3);
9306                 }
9307                 return ret;
9308             default:
9309                 return -TARGET_EINVAL;
9310             }
9311         }
9312         break;
9313 #endif
9314     case TARGET_NR_setitimer:
9315         {
9316             struct itimerval value, ovalue, *pvalue;
9317 
9318             if (arg2) {
9319                 pvalue = &value;
9320                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9321                     || copy_from_user_timeval(&pvalue->it_value,
9322                                               arg2 + sizeof(struct target_timeval)))
9323                     return -TARGET_EFAULT;
9324             } else {
9325                 pvalue = NULL;
9326             }
9327             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9328             if (!is_error(ret) && arg3) {
9329                 if (copy_to_user_timeval(arg3,
9330                                          &ovalue.it_interval)
9331                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9332                                             &ovalue.it_value))
9333                     return -TARGET_EFAULT;
9334             }
9335         }
9336         return ret;
9337     case TARGET_NR_getitimer:
9338         {
9339             struct itimerval value;
9340 
9341             ret = get_errno(getitimer(arg1, &value));
9342             if (!is_error(ret) && arg2) {
9343                 if (copy_to_user_timeval(arg2,
9344                                          &value.it_interval)
9345                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9346                                             &value.it_value))
9347                     return -TARGET_EFAULT;
9348             }
9349         }
9350         return ret;
9351 #ifdef TARGET_NR_stat
9352     case TARGET_NR_stat:
9353         if (!(p = lock_user_string(arg1))) {
9354             return -TARGET_EFAULT;
9355         }
9356         ret = get_errno(stat(path(p), &st));
9357         unlock_user(p, arg1, 0);
9358         goto do_stat;
9359 #endif
9360 #ifdef TARGET_NR_lstat
9361     case TARGET_NR_lstat:
9362         if (!(p = lock_user_string(arg1))) {
9363             return -TARGET_EFAULT;
9364         }
9365         ret = get_errno(lstat(path(p), &st));
9366         unlock_user(p, arg1, 0);
9367         goto do_stat;
9368 #endif
9369 #ifdef TARGET_NR_fstat
9370     case TARGET_NR_fstat:
9371         {
9372             ret = get_errno(fstat(arg1, &st));
9373 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9374         do_stat:
9375 #endif
9376             if (!is_error(ret)) {
9377                 struct target_stat *target_st;
9378 
9379                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9380                     return -TARGET_EFAULT;
9381                 memset(target_st, 0, sizeof(*target_st));
9382                 __put_user(st.st_dev, &target_st->st_dev);
9383                 __put_user(st.st_ino, &target_st->st_ino);
9384                 __put_user(st.st_mode, &target_st->st_mode);
9385                 __put_user(st.st_uid, &target_st->st_uid);
9386                 __put_user(st.st_gid, &target_st->st_gid);
9387                 __put_user(st.st_nlink, &target_st->st_nlink);
9388                 __put_user(st.st_rdev, &target_st->st_rdev);
9389                 __put_user(st.st_size, &target_st->st_size);
9390                 __put_user(st.st_blksize, &target_st->st_blksize);
9391                 __put_user(st.st_blocks, &target_st->st_blocks);
9392                 __put_user(st.st_atime, &target_st->target_st_atime);
9393                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9394                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9395 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9396     defined(TARGET_STAT_HAVE_NSEC)
9397                 __put_user(st.st_atim.tv_nsec,
9398                            &target_st->target_st_atime_nsec);
9399                 __put_user(st.st_mtim.tv_nsec,
9400                            &target_st->target_st_mtime_nsec);
9401                 __put_user(st.st_ctim.tv_nsec,
9402                            &target_st->target_st_ctime_nsec);
9403 #endif
9404                 unlock_user_struct(target_st, arg2, 1);
9405             }
9406         }
9407         return ret;
9408 #endif
9409     case TARGET_NR_vhangup:
9410         return get_errno(vhangup());
9411 #ifdef TARGET_NR_syscall
9412     case TARGET_NR_syscall:
9413         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9414                           arg6, arg7, arg8, 0);
9415 #endif
9416 #if defined(TARGET_NR_wait4)
9417     case TARGET_NR_wait4:
9418         {
9419             int status;
9420             abi_long status_ptr = arg2;
9421             struct rusage rusage, *rusage_ptr;
9422             abi_ulong target_rusage = arg4;
9423             abi_long rusage_err;
9424             if (target_rusage)
9425                 rusage_ptr = &rusage;
9426             else
9427                 rusage_ptr = NULL;
9428             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9429             if (!is_error(ret)) {
9430                 if (status_ptr && ret) {
9431                     status = host_to_target_waitstatus(status);
9432                     if (put_user_s32(status, status_ptr))
9433                         return -TARGET_EFAULT;
9434                 }
9435                 if (target_rusage) {
9436                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9437                     if (rusage_err) {
9438                         ret = rusage_err;
9439                     }
9440                 }
9441             }
9442         }
9443         return ret;
9444 #endif
9445 #ifdef TARGET_NR_swapoff
9446     case TARGET_NR_swapoff:
9447         if (!(p = lock_user_string(arg1)))
9448             return -TARGET_EFAULT;
9449         ret = get_errno(swapoff(p));
9450         unlock_user(p, arg1, 0);
9451         return ret;
9452 #endif
9453     case TARGET_NR_sysinfo:
9454         {
9455             struct target_sysinfo *target_value;
9456             struct sysinfo value;
9457             ret = get_errno(sysinfo(&value));
9458             if (!is_error(ret) && arg1)
9459             {
9460                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9461                     return -TARGET_EFAULT;
9462                 __put_user(value.uptime, &target_value->uptime);
9463                 __put_user(value.loads[0], &target_value->loads[0]);
9464                 __put_user(value.loads[1], &target_value->loads[1]);
9465                 __put_user(value.loads[2], &target_value->loads[2]);
9466                 __put_user(value.totalram, &target_value->totalram);
9467                 __put_user(value.freeram, &target_value->freeram);
9468                 __put_user(value.sharedram, &target_value->sharedram);
9469                 __put_user(value.bufferram, &target_value->bufferram);
9470                 __put_user(value.totalswap, &target_value->totalswap);
9471                 __put_user(value.freeswap, &target_value->freeswap);
9472                 __put_user(value.procs, &target_value->procs);
9473                 __put_user(value.totalhigh, &target_value->totalhigh);
9474                 __put_user(value.freehigh, &target_value->freehigh);
9475                 __put_user(value.mem_unit, &target_value->mem_unit);
9476                 unlock_user_struct(target_value, arg1, 1);
9477             }
9478         }
9479         return ret;
9480 #ifdef TARGET_NR_ipc
9481     case TARGET_NR_ipc:
9482         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9483 #endif
9484 #ifdef TARGET_NR_semget
9485     case TARGET_NR_semget:
9486         return get_errno(semget(arg1, arg2, arg3));
9487 #endif
9488 #ifdef TARGET_NR_semop
9489     case TARGET_NR_semop:
9490         return do_semop(arg1, arg2, arg3);
9491 #endif
9492 #ifdef TARGET_NR_semctl
9493     case TARGET_NR_semctl:
9494         return do_semctl(arg1, arg2, arg3, arg4);
9495 #endif
9496 #ifdef TARGET_NR_msgctl
9497     case TARGET_NR_msgctl:
9498         return do_msgctl(arg1, arg2, arg3);
9499 #endif
9500 #ifdef TARGET_NR_msgget
9501     case TARGET_NR_msgget:
9502         return get_errno(msgget(arg1, arg2));
9503 #endif
9504 #ifdef TARGET_NR_msgrcv
9505     case TARGET_NR_msgrcv:
9506         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9507 #endif
9508 #ifdef TARGET_NR_msgsnd
9509     case TARGET_NR_msgsnd:
9510         return do_msgsnd(arg1, arg2, arg3, arg4);
9511 #endif
9512 #ifdef TARGET_NR_shmget
9513     case TARGET_NR_shmget:
9514         return get_errno(shmget(arg1, arg2, arg3));
9515 #endif
9516 #ifdef TARGET_NR_shmctl
9517     case TARGET_NR_shmctl:
9518         return do_shmctl(arg1, arg2, arg3);
9519 #endif
9520 #ifdef TARGET_NR_shmat
9521     case TARGET_NR_shmat:
9522         return do_shmat(cpu_env, arg1, arg2, arg3);
9523 #endif
9524 #ifdef TARGET_NR_shmdt
9525     case TARGET_NR_shmdt:
9526         return do_shmdt(arg1);
9527 #endif
9528     case TARGET_NR_fsync:
9529         return get_errno(fsync(arg1));
9530     case TARGET_NR_clone:
9531         /* Linux manages to have three different orderings for its
9532          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9533          * match the kernel's CONFIG_CLONE_* settings.
9534          * Microblaze is further special in that it uses a sixth
9535          * implicit argument to clone for the TLS pointer.
9536          */
9537 #if defined(TARGET_MICROBLAZE)
9538         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9539 #elif defined(TARGET_CLONE_BACKWARDS)
9540         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9541 #elif defined(TARGET_CLONE_BACKWARDS2)
9542         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9543 #else
9544         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9545 #endif
9546         return ret;
9547 #ifdef __NR_exit_group
9548         /* new thread calls */
9549     case TARGET_NR_exit_group:
9550         preexit_cleanup(cpu_env, arg1);
9551         return get_errno(exit_group(arg1));
9552 #endif
9553     case TARGET_NR_setdomainname:
9554         if (!(p = lock_user_string(arg1)))
9555             return -TARGET_EFAULT;
9556         ret = get_errno(setdomainname(p, arg2));
9557         unlock_user(p, arg1, 0);
9558         return ret;
9559     case TARGET_NR_uname:
9560         /* no need to transcode because we use the linux syscall */
9561         {
9562             struct new_utsname * buf;
9563 
9564             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9565                 return -TARGET_EFAULT;
9566             ret = get_errno(sys_uname(buf));
9567             if (!is_error(ret)) {
9568                 /* Overwrite the native machine name with whatever is being
9569                    emulated. */
9570                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9571                           sizeof(buf->machine));
9572                 /* Allow the user to override the reported release.  */
9573                 if (qemu_uname_release && *qemu_uname_release) {
9574                     g_strlcpy(buf->release, qemu_uname_release,
9575                               sizeof(buf->release));
9576                 }
9577             }
9578             unlock_user_struct(buf, arg1, 1);
9579         }
9580         return ret;
9581 #ifdef TARGET_I386
9582     case TARGET_NR_modify_ldt:
9583         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9584 #if !defined(TARGET_X86_64)
9585     case TARGET_NR_vm86:
9586         return do_vm86(cpu_env, arg1, arg2);
9587 #endif
9588 #endif
9589 #if defined(TARGET_NR_adjtimex)
9590     case TARGET_NR_adjtimex:
9591         {
9592             struct timex host_buf;
9593 
9594             if (target_to_host_timex(&host_buf, arg1) != 0) {
9595                 return -TARGET_EFAULT;
9596             }
9597             ret = get_errno(adjtimex(&host_buf));
9598             if (!is_error(ret)) {
9599                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9600                     return -TARGET_EFAULT;
9601                 }
9602             }
9603         }
9604         return ret;
9605 #endif
9606 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9607     case TARGET_NR_clock_adjtime:
9608         {
9609             struct timex htx, *phtx = &htx;
9610 
9611             if (target_to_host_timex(phtx, arg2) != 0) {
9612                 return -TARGET_EFAULT;
9613             }
9614             ret = get_errno(clock_adjtime(arg1, phtx));
9615             if (!is_error(ret) && phtx) {
9616                 if (host_to_target_timex(arg2, phtx) != 0) {
9617                     return -TARGET_EFAULT;
9618                 }
9619             }
9620         }
9621         return ret;
9622 #endif
9623     case TARGET_NR_getpgid:
9624         return get_errno(getpgid(arg1));
9625     case TARGET_NR_fchdir:
9626         return get_errno(fchdir(arg1));
9627     case TARGET_NR_personality:
9628         return get_errno(personality(arg1));
9629 #ifdef TARGET_NR__llseek /* Not on alpha */
9630     case TARGET_NR__llseek:
9631         {
9632             int64_t res;
9633 #if !defined(__NR_llseek)
9634             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9635             if (res == -1) {
9636                 ret = get_errno(res);
9637             } else {
9638                 ret = 0;
9639             }
9640 #else
9641             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9642 #endif
9643             if ((ret == 0) && put_user_s64(res, arg4)) {
9644                 return -TARGET_EFAULT;
9645             }
9646         }
9647         return ret;
9648 #endif
9649 #ifdef TARGET_NR_getdents
9650     case TARGET_NR_getdents:
9651 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9652 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9653         {
9654             struct target_dirent *target_dirp;
9655             struct linux_dirent *dirp;
9656             abi_long count = arg3;
9657 
9658             dirp = g_try_malloc(count);
9659             if (!dirp) {
9660                 return -TARGET_ENOMEM;
9661             }
9662 
9663             ret = get_errno(sys_getdents(arg1, dirp, count));
9664             if (!is_error(ret)) {
9665                 struct linux_dirent *de;
9666 		struct target_dirent *tde;
9667                 int len = ret;
9668                 int reclen, treclen;
9669 		int count1, tnamelen;
9670 
9671 		count1 = 0;
9672                 de = dirp;
9673                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9674                     return -TARGET_EFAULT;
9675 		tde = target_dirp;
9676                 while (len > 0) {
9677                     reclen = de->d_reclen;
9678                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9679                     assert(tnamelen >= 0);
9680                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9681                     assert(count1 + treclen <= count);
9682                     tde->d_reclen = tswap16(treclen);
9683                     tde->d_ino = tswapal(de->d_ino);
9684                     tde->d_off = tswapal(de->d_off);
9685                     memcpy(tde->d_name, de->d_name, tnamelen);
9686                     de = (struct linux_dirent *)((char *)de + reclen);
9687                     len -= reclen;
9688                     tde = (struct target_dirent *)((char *)tde + treclen);
9689 		    count1 += treclen;
9690                 }
9691 		ret = count1;
9692                 unlock_user(target_dirp, arg2, ret);
9693             }
9694             g_free(dirp);
9695         }
9696 #else
9697         {
9698             struct linux_dirent *dirp;
9699             abi_long count = arg3;
9700 
9701             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9702                 return -TARGET_EFAULT;
9703             ret = get_errno(sys_getdents(arg1, dirp, count));
9704             if (!is_error(ret)) {
9705                 struct linux_dirent *de;
9706                 int len = ret;
9707                 int reclen;
9708                 de = dirp;
9709                 while (len > 0) {
9710                     reclen = de->d_reclen;
9711                     if (reclen > len)
9712                         break;
9713                     de->d_reclen = tswap16(reclen);
9714                     tswapls(&de->d_ino);
9715                     tswapls(&de->d_off);
9716                     de = (struct linux_dirent *)((char *)de + reclen);
9717                     len -= reclen;
9718                 }
9719             }
9720             unlock_user(dirp, arg2, ret);
9721         }
9722 #endif
9723 #else
9724         /* Implement getdents in terms of getdents64 */
9725         {
9726             struct linux_dirent64 *dirp;
9727             abi_long count = arg3;
9728 
9729             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9730             if (!dirp) {
9731                 return -TARGET_EFAULT;
9732             }
9733             ret = get_errno(sys_getdents64(arg1, dirp, count));
9734             if (!is_error(ret)) {
9735                 /* Convert the dirent64 structs to target dirent.  We do this
9736                  * in-place, since we can guarantee that a target_dirent is no
9737                  * larger than a dirent64; however this means we have to be
9738                  * careful to read everything before writing in the new format.
9739                  */
9740                 struct linux_dirent64 *de;
9741                 struct target_dirent *tde;
9742                 int len = ret;
9743                 int tlen = 0;
9744 
9745                 de = dirp;
9746                 tde = (struct target_dirent *)dirp;
9747                 while (len > 0) {
9748                     int namelen, treclen;
9749                     int reclen = de->d_reclen;
9750                     uint64_t ino = de->d_ino;
9751                     int64_t off = de->d_off;
9752                     uint8_t type = de->d_type;
9753 
9754                     namelen = strlen(de->d_name);
9755                     treclen = offsetof(struct target_dirent, d_name)
9756                         + namelen + 2;
9757                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9758 
9759                     memmove(tde->d_name, de->d_name, namelen + 1);
9760                     tde->d_ino = tswapal(ino);
9761                     tde->d_off = tswapal(off);
9762                     tde->d_reclen = tswap16(treclen);
9763                     /* The target_dirent type is in what was formerly a padding
9764                      * byte at the end of the structure:
9765                      */
9766                     *(((char *)tde) + treclen - 1) = type;
9767 
9768                     de = (struct linux_dirent64 *)((char *)de + reclen);
9769                     tde = (struct target_dirent *)((char *)tde + treclen);
9770                     len -= reclen;
9771                     tlen += treclen;
9772                 }
9773                 ret = tlen;
9774             }
9775             unlock_user(dirp, arg2, ret);
9776         }
9777 #endif
9778         return ret;
9779 #endif /* TARGET_NR_getdents */
9780 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9781     case TARGET_NR_getdents64:
9782         {
9783             struct linux_dirent64 *dirp;
9784             abi_long count = arg3;
9785             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9786                 return -TARGET_EFAULT;
9787             ret = get_errno(sys_getdents64(arg1, dirp, count));
9788             if (!is_error(ret)) {
9789                 struct linux_dirent64 *de;
9790                 int len = ret;
9791                 int reclen;
9792                 de = dirp;
9793                 while (len > 0) {
9794                     reclen = de->d_reclen;
9795                     if (reclen > len)
9796                         break;
9797                     de->d_reclen = tswap16(reclen);
9798                     tswap64s((uint64_t *)&de->d_ino);
9799                     tswap64s((uint64_t *)&de->d_off);
9800                     de = (struct linux_dirent64 *)((char *)de + reclen);
9801                     len -= reclen;
9802                 }
9803             }
9804             unlock_user(dirp, arg2, ret);
9805         }
9806         return ret;
9807 #endif /* TARGET_NR_getdents64 */
9808 #if defined(TARGET_NR__newselect)
9809     case TARGET_NR__newselect:
9810         return do_select(arg1, arg2, arg3, arg4, arg5);
9811 #endif
9812 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9813 # ifdef TARGET_NR_poll
9814     case TARGET_NR_poll:
9815 # endif
9816 # ifdef TARGET_NR_ppoll
9817     case TARGET_NR_ppoll:
9818 # endif
9819         {
9820             struct target_pollfd *target_pfd;
9821             unsigned int nfds = arg2;
9822             struct pollfd *pfd;
9823             unsigned int i;
9824 
9825             pfd = NULL;
9826             target_pfd = NULL;
9827             if (nfds) {
9828                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9829                     return -TARGET_EINVAL;
9830                 }
9831 
9832                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9833                                        sizeof(struct target_pollfd) * nfds, 1);
9834                 if (!target_pfd) {
9835                     return -TARGET_EFAULT;
9836                 }
9837 
9838                 pfd = alloca(sizeof(struct pollfd) * nfds);
9839                 for (i = 0; i < nfds; i++) {
9840                     pfd[i].fd = tswap32(target_pfd[i].fd);
9841                     pfd[i].events = tswap16(target_pfd[i].events);
9842                 }
9843             }
9844 
9845             switch (num) {
9846 # ifdef TARGET_NR_ppoll
9847             case TARGET_NR_ppoll:
9848             {
9849                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9850                 target_sigset_t *target_set;
9851                 sigset_t _set, *set = &_set;
9852 
9853                 if (arg3) {
9854                     if (target_to_host_timespec(timeout_ts, arg3)) {
9855                         unlock_user(target_pfd, arg1, 0);
9856                         return -TARGET_EFAULT;
9857                     }
9858                 } else {
9859                     timeout_ts = NULL;
9860                 }
9861 
9862                 if (arg4) {
9863                     if (arg5 != sizeof(target_sigset_t)) {
9864                         unlock_user(target_pfd, arg1, 0);
9865                         return -TARGET_EINVAL;
9866                     }
9867 
9868                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9869                     if (!target_set) {
9870                         unlock_user(target_pfd, arg1, 0);
9871                         return -TARGET_EFAULT;
9872                     }
9873                     target_to_host_sigset(set, target_set);
9874                 } else {
9875                     set = NULL;
9876                 }
9877 
9878                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9879                                            set, SIGSET_T_SIZE));
9880 
9881                 if (!is_error(ret) && arg3) {
9882                     host_to_target_timespec(arg3, timeout_ts);
9883                 }
9884                 if (arg4) {
9885                     unlock_user(target_set, arg4, 0);
9886                 }
9887                 break;
9888             }
9889 # endif
9890 # ifdef TARGET_NR_poll
9891             case TARGET_NR_poll:
9892             {
9893                 struct timespec ts, *pts;
9894 
9895                 if (arg3 >= 0) {
9896                     /* Convert ms to secs, ns */
9897                     ts.tv_sec = arg3 / 1000;
9898                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9899                     pts = &ts;
9900                 } else {
9901                     /* -ve poll() timeout means "infinite" */
9902                     pts = NULL;
9903                 }
9904                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9905                 break;
9906             }
9907 # endif
9908             default:
9909                 g_assert_not_reached();
9910             }
9911 
9912             if (!is_error(ret)) {
9913                 for(i = 0; i < nfds; i++) {
9914                     target_pfd[i].revents = tswap16(pfd[i].revents);
9915                 }
9916             }
9917             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9918         }
9919         return ret;
9920 #endif
9921     case TARGET_NR_flock:
9922         /* NOTE: the flock constant seems to be the same for every
9923            Linux platform */
9924         return get_errno(safe_flock(arg1, arg2));
9925     case TARGET_NR_readv:
9926         {
9927             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9928             if (vec != NULL) {
9929                 ret = get_errno(safe_readv(arg1, vec, arg3));
9930                 unlock_iovec(vec, arg2, arg3, 1);
9931             } else {
9932                 ret = -host_to_target_errno(errno);
9933             }
9934         }
9935         return ret;
9936     case TARGET_NR_writev:
9937         {
9938             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9939             if (vec != NULL) {
9940                 ret = get_errno(safe_writev(arg1, vec, arg3));
9941                 unlock_iovec(vec, arg2, arg3, 0);
9942             } else {
9943                 ret = -host_to_target_errno(errno);
9944             }
9945         }
9946         return ret;
9947 #if defined(TARGET_NR_preadv)
9948     case TARGET_NR_preadv:
9949         {
9950             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9951             if (vec != NULL) {
9952                 unsigned long low, high;
9953 
9954                 target_to_host_low_high(arg4, arg5, &low, &high);
9955                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9956                 unlock_iovec(vec, arg2, arg3, 1);
9957             } else {
9958                 ret = -host_to_target_errno(errno);
9959            }
9960         }
9961         return ret;
9962 #endif
9963 #if defined(TARGET_NR_pwritev)
9964     case TARGET_NR_pwritev:
9965         {
9966             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9967             if (vec != NULL) {
9968                 unsigned long low, high;
9969 
9970                 target_to_host_low_high(arg4, arg5, &low, &high);
9971                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9972                 unlock_iovec(vec, arg2, arg3, 0);
9973             } else {
9974                 ret = -host_to_target_errno(errno);
9975            }
9976         }
9977         return ret;
9978 #endif
9979     case TARGET_NR_getsid:
9980         return get_errno(getsid(arg1));
9981 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9982     case TARGET_NR_fdatasync:
9983         return get_errno(fdatasync(arg1));
9984 #endif
9985 #ifdef TARGET_NR__sysctl
9986     case TARGET_NR__sysctl:
9987         /* We don't implement this, but ENOTDIR is always a safe
9988            return value. */
9989         return -TARGET_ENOTDIR;
9990 #endif
9991     case TARGET_NR_sched_getaffinity:
9992         {
9993             unsigned int mask_size;
9994             unsigned long *mask;
9995 
9996             /*
9997              * sched_getaffinity needs multiples of ulong, so need to take
9998              * care of mismatches between target ulong and host ulong sizes.
9999              */
10000             if (arg2 & (sizeof(abi_ulong) - 1)) {
10001                 return -TARGET_EINVAL;
10002             }
10003             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10004 
10005             mask = alloca(mask_size);
10006             memset(mask, 0, mask_size);
10007             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10008 
10009             if (!is_error(ret)) {
10010                 if (ret > arg2) {
10011                     /* More data returned than the caller's buffer will fit.
10012                      * This only happens if sizeof(abi_long) < sizeof(long)
10013                      * and the caller passed us a buffer holding an odd number
10014                      * of abi_longs. If the host kernel is actually using the
10015                      * extra 4 bytes then fail EINVAL; otherwise we can just
10016                      * ignore them and only copy the interesting part.
10017                      */
10018                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10019                     if (numcpus > arg2 * 8) {
10020                         return -TARGET_EINVAL;
10021                     }
10022                     ret = arg2;
10023                 }
10024 
10025                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10026                     return -TARGET_EFAULT;
10027                 }
10028             }
10029         }
10030         return ret;
10031     case TARGET_NR_sched_setaffinity:
10032         {
10033             unsigned int mask_size;
10034             unsigned long *mask;
10035 
10036             /*
10037              * sched_setaffinity needs multiples of ulong, so need to take
10038              * care of mismatches between target ulong and host ulong sizes.
10039              */
10040             if (arg2 & (sizeof(abi_ulong) - 1)) {
10041                 return -TARGET_EINVAL;
10042             }
10043             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10044             mask = alloca(mask_size);
10045 
10046             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10047             if (ret) {
10048                 return ret;
10049             }
10050 
10051             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10052         }
10053     case TARGET_NR_getcpu:
10054         {
10055             unsigned cpu, node;
10056             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10057                                        arg2 ? &node : NULL,
10058                                        NULL));
10059             if (is_error(ret)) {
10060                 return ret;
10061             }
10062             if (arg1 && put_user_u32(cpu, arg1)) {
10063                 return -TARGET_EFAULT;
10064             }
10065             if (arg2 && put_user_u32(node, arg2)) {
10066                 return -TARGET_EFAULT;
10067             }
10068         }
10069         return ret;
10070     case TARGET_NR_sched_setparam:
10071         {
10072             struct sched_param *target_schp;
10073             struct sched_param schp;
10074 
10075             if (arg2 == 0) {
10076                 return -TARGET_EINVAL;
10077             }
10078             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10079                 return -TARGET_EFAULT;
10080             schp.sched_priority = tswap32(target_schp->sched_priority);
10081             unlock_user_struct(target_schp, arg2, 0);
10082             return get_errno(sched_setparam(arg1, &schp));
10083         }
10084     case TARGET_NR_sched_getparam:
10085         {
10086             struct sched_param *target_schp;
10087             struct sched_param schp;
10088 
10089             if (arg2 == 0) {
10090                 return -TARGET_EINVAL;
10091             }
10092             ret = get_errno(sched_getparam(arg1, &schp));
10093             if (!is_error(ret)) {
10094                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10095                     return -TARGET_EFAULT;
10096                 target_schp->sched_priority = tswap32(schp.sched_priority);
10097                 unlock_user_struct(target_schp, arg2, 1);
10098             }
10099         }
10100         return ret;
10101     case TARGET_NR_sched_setscheduler:
10102         {
10103             struct sched_param *target_schp;
10104             struct sched_param schp;
10105             if (arg3 == 0) {
10106                 return -TARGET_EINVAL;
10107             }
10108             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10109                 return -TARGET_EFAULT;
10110             schp.sched_priority = tswap32(target_schp->sched_priority);
10111             unlock_user_struct(target_schp, arg3, 0);
10112             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10113         }
10114     case TARGET_NR_sched_getscheduler:
10115         return get_errno(sched_getscheduler(arg1));
10116     case TARGET_NR_sched_yield:
10117         return get_errno(sched_yield());
10118     case TARGET_NR_sched_get_priority_max:
10119         return get_errno(sched_get_priority_max(arg1));
10120     case TARGET_NR_sched_get_priority_min:
10121         return get_errno(sched_get_priority_min(arg1));
10122 #ifdef TARGET_NR_sched_rr_get_interval
10123     case TARGET_NR_sched_rr_get_interval:
10124         {
10125             struct timespec ts;
10126             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10127             if (!is_error(ret)) {
10128                 ret = host_to_target_timespec(arg2, &ts);
10129             }
10130         }
10131         return ret;
10132 #endif
10133 #if defined(TARGET_NR_nanosleep)
10134     case TARGET_NR_nanosleep:
10135         {
10136             struct timespec req, rem;
10137             target_to_host_timespec(&req, arg1);
10138             ret = get_errno(safe_nanosleep(&req, &rem));
10139             if (is_error(ret) && arg2) {
10140                 host_to_target_timespec(arg2, &rem);
10141             }
10142         }
10143         return ret;
10144 #endif
10145     case TARGET_NR_prctl:
10146         switch (arg1) {
10147         case PR_GET_PDEATHSIG:
10148         {
10149             int deathsig;
10150             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10151             if (!is_error(ret) && arg2
10152                 && put_user_ual(deathsig, arg2)) {
10153                 return -TARGET_EFAULT;
10154             }
10155             return ret;
10156         }
10157 #ifdef PR_GET_NAME
10158         case PR_GET_NAME:
10159         {
10160             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10161             if (!name) {
10162                 return -TARGET_EFAULT;
10163             }
10164             ret = get_errno(prctl(arg1, (unsigned long)name,
10165                                   arg3, arg4, arg5));
10166             unlock_user(name, arg2, 16);
10167             return ret;
10168         }
10169         case PR_SET_NAME:
10170         {
10171             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10172             if (!name) {
10173                 return -TARGET_EFAULT;
10174             }
10175             ret = get_errno(prctl(arg1, (unsigned long)name,
10176                                   arg3, arg4, arg5));
10177             unlock_user(name, arg2, 0);
10178             return ret;
10179         }
10180 #endif
10181 #ifdef TARGET_MIPS
10182         case TARGET_PR_GET_FP_MODE:
10183         {
10184             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10185             ret = 0;
10186             if (env->CP0_Status & (1 << CP0St_FR)) {
10187                 ret |= TARGET_PR_FP_MODE_FR;
10188             }
10189             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10190                 ret |= TARGET_PR_FP_MODE_FRE;
10191             }
10192             return ret;
10193         }
10194         case TARGET_PR_SET_FP_MODE:
10195         {
10196             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10197             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10198             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10199             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10200             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10201 
10202             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10203                                             TARGET_PR_FP_MODE_FRE;
10204 
10205             /* If nothing to change, return right away, successfully.  */
10206             if (old_fr == new_fr && old_fre == new_fre) {
10207                 return 0;
10208             }
10209             /* Check the value is valid */
10210             if (arg2 & ~known_bits) {
10211                 return -TARGET_EOPNOTSUPP;
10212             }
10213             /* Setting FRE without FR is not supported.  */
10214             if (new_fre && !new_fr) {
10215                 return -TARGET_EOPNOTSUPP;
10216             }
10217             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10218                 /* FR1 is not supported */
10219                 return -TARGET_EOPNOTSUPP;
10220             }
10221             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10222                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10223                 /* cannot set FR=0 */
10224                 return -TARGET_EOPNOTSUPP;
10225             }
10226             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10227                 /* Cannot set FRE=1 */
10228                 return -TARGET_EOPNOTSUPP;
10229             }
10230 
10231             int i;
10232             fpr_t *fpr = env->active_fpu.fpr;
10233             for (i = 0; i < 32 ; i += 2) {
10234                 if (!old_fr && new_fr) {
10235                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10236                 } else if (old_fr && !new_fr) {
10237                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10238                 }
10239             }
10240 
10241             if (new_fr) {
10242                 env->CP0_Status |= (1 << CP0St_FR);
10243                 env->hflags |= MIPS_HFLAG_F64;
10244             } else {
10245                 env->CP0_Status &= ~(1 << CP0St_FR);
10246                 env->hflags &= ~MIPS_HFLAG_F64;
10247             }
10248             if (new_fre) {
10249                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10250                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10251                     env->hflags |= MIPS_HFLAG_FRE;
10252                 }
10253             } else {
10254                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10255                 env->hflags &= ~MIPS_HFLAG_FRE;
10256             }
10257 
10258             return 0;
10259         }
10260 #endif /* MIPS */
10261 #ifdef TARGET_AARCH64
10262         case TARGET_PR_SVE_SET_VL:
10263             /*
10264              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10265              * PR_SVE_VL_INHERIT.  Note the kernel definition
10266              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10267              * even though the current architectural maximum is VQ=16.
10268              */
10269             ret = -TARGET_EINVAL;
10270             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10271                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10272                 CPUARMState *env = cpu_env;
10273                 ARMCPU *cpu = env_archcpu(env);
10274                 uint32_t vq, old_vq;
10275 
10276                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10277                 vq = MAX(arg2 / 16, 1);
10278                 vq = MIN(vq, cpu->sve_max_vq);
10279 
10280                 if (vq < old_vq) {
10281                     aarch64_sve_narrow_vq(env, vq);
10282                 }
10283                 env->vfp.zcr_el[1] = vq - 1;
10284                 arm_rebuild_hflags(env);
10285                 ret = vq * 16;
10286             }
10287             return ret;
10288         case TARGET_PR_SVE_GET_VL:
10289             ret = -TARGET_EINVAL;
10290             {
10291                 ARMCPU *cpu = env_archcpu(cpu_env);
10292                 if (cpu_isar_feature(aa64_sve, cpu)) {
10293                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10294                 }
10295             }
10296             return ret;
10297         case TARGET_PR_PAC_RESET_KEYS:
10298             {
10299                 CPUARMState *env = cpu_env;
10300                 ARMCPU *cpu = env_archcpu(env);
10301 
10302                 if (arg3 || arg4 || arg5) {
10303                     return -TARGET_EINVAL;
10304                 }
10305                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10306                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10307                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10308                                TARGET_PR_PAC_APGAKEY);
10309                     int ret = 0;
10310                     Error *err = NULL;
10311 
10312                     if (arg2 == 0) {
10313                         arg2 = all;
10314                     } else if (arg2 & ~all) {
10315                         return -TARGET_EINVAL;
10316                     }
10317                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10318                         ret |= qemu_guest_getrandom(&env->keys.apia,
10319                                                     sizeof(ARMPACKey), &err);
10320                     }
10321                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10322                         ret |= qemu_guest_getrandom(&env->keys.apib,
10323                                                     sizeof(ARMPACKey), &err);
10324                     }
10325                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10326                         ret |= qemu_guest_getrandom(&env->keys.apda,
10327                                                     sizeof(ARMPACKey), &err);
10328                     }
10329                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10330                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10331                                                     sizeof(ARMPACKey), &err);
10332                     }
10333                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10334                         ret |= qemu_guest_getrandom(&env->keys.apga,
10335                                                     sizeof(ARMPACKey), &err);
10336                     }
10337                     if (ret != 0) {
10338                         /*
10339                          * Some unknown failure in the crypto.  The best
10340                          * we can do is log it and fail the syscall.
10341                          * The real syscall cannot fail this way.
10342                          */
10343                         qemu_log_mask(LOG_UNIMP,
10344                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10345                                       error_get_pretty(err));
10346                         error_free(err);
10347                         return -TARGET_EIO;
10348                     }
10349                     return 0;
10350                 }
10351             }
10352             return -TARGET_EINVAL;
10353 #endif /* AARCH64 */
10354         case PR_GET_SECCOMP:
10355         case PR_SET_SECCOMP:
10356             /* Disable seccomp to prevent the target disabling syscalls we
10357              * need. */
10358             return -TARGET_EINVAL;
10359         default:
10360             /* Most prctl options have no pointer arguments */
10361             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10362         }
10363         break;
10364 #ifdef TARGET_NR_arch_prctl
10365     case TARGET_NR_arch_prctl:
10366         return do_arch_prctl(cpu_env, arg1, arg2);
10367 #endif
10368 #ifdef TARGET_NR_pread64
10369     case TARGET_NR_pread64:
10370         if (regpairs_aligned(cpu_env, num)) {
10371             arg4 = arg5;
10372             arg5 = arg6;
10373         }
10374         if (arg2 == 0 && arg3 == 0) {
10375             /* Special-case NULL buffer and zero length, which should succeed */
10376             p = 0;
10377         } else {
10378             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10379             if (!p) {
10380                 return -TARGET_EFAULT;
10381             }
10382         }
10383         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10384         unlock_user(p, arg2, ret);
10385         return ret;
10386     case TARGET_NR_pwrite64:
10387         if (regpairs_aligned(cpu_env, num)) {
10388             arg4 = arg5;
10389             arg5 = arg6;
10390         }
10391         if (arg2 == 0 && arg3 == 0) {
10392             /* Special-case NULL buffer and zero length, which should succeed */
10393             p = 0;
10394         } else {
10395             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10396             if (!p) {
10397                 return -TARGET_EFAULT;
10398             }
10399         }
10400         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10401         unlock_user(p, arg2, 0);
10402         return ret;
10403 #endif
10404     case TARGET_NR_getcwd:
10405         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10406             return -TARGET_EFAULT;
10407         ret = get_errno(sys_getcwd1(p, arg2));
10408         unlock_user(p, arg1, ret);
10409         return ret;
10410     case TARGET_NR_capget:
10411     case TARGET_NR_capset:
10412     {
10413         struct target_user_cap_header *target_header;
10414         struct target_user_cap_data *target_data = NULL;
10415         struct __user_cap_header_struct header;
10416         struct __user_cap_data_struct data[2];
10417         struct __user_cap_data_struct *dataptr = NULL;
10418         int i, target_datalen;
10419         int data_items = 1;
10420 
10421         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10422             return -TARGET_EFAULT;
10423         }
10424         header.version = tswap32(target_header->version);
10425         header.pid = tswap32(target_header->pid);
10426 
10427         if (header.version != _LINUX_CAPABILITY_VERSION) {
10428             /* Version 2 and up takes pointer to two user_data structs */
10429             data_items = 2;
10430         }
10431 
10432         target_datalen = sizeof(*target_data) * data_items;
10433 
10434         if (arg2) {
10435             if (num == TARGET_NR_capget) {
10436                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10437             } else {
10438                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10439             }
10440             if (!target_data) {
10441                 unlock_user_struct(target_header, arg1, 0);
10442                 return -TARGET_EFAULT;
10443             }
10444 
10445             if (num == TARGET_NR_capset) {
10446                 for (i = 0; i < data_items; i++) {
10447                     data[i].effective = tswap32(target_data[i].effective);
10448                     data[i].permitted = tswap32(target_data[i].permitted);
10449                     data[i].inheritable = tswap32(target_data[i].inheritable);
10450                 }
10451             }
10452 
10453             dataptr = data;
10454         }
10455 
10456         if (num == TARGET_NR_capget) {
10457             ret = get_errno(capget(&header, dataptr));
10458         } else {
10459             ret = get_errno(capset(&header, dataptr));
10460         }
10461 
10462         /* The kernel always updates version for both capget and capset */
10463         target_header->version = tswap32(header.version);
10464         unlock_user_struct(target_header, arg1, 1);
10465 
10466         if (arg2) {
10467             if (num == TARGET_NR_capget) {
10468                 for (i = 0; i < data_items; i++) {
10469                     target_data[i].effective = tswap32(data[i].effective);
10470                     target_data[i].permitted = tswap32(data[i].permitted);
10471                     target_data[i].inheritable = tswap32(data[i].inheritable);
10472                 }
10473                 unlock_user(target_data, arg2, target_datalen);
10474             } else {
10475                 unlock_user(target_data, arg2, 0);
10476             }
10477         }
10478         return ret;
10479     }
10480     case TARGET_NR_sigaltstack:
10481         return do_sigaltstack(arg1, arg2,
10482                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10483 
10484 #ifdef CONFIG_SENDFILE
10485 #ifdef TARGET_NR_sendfile
10486     case TARGET_NR_sendfile:
10487     {
10488         off_t *offp = NULL;
10489         off_t off;
10490         if (arg3) {
10491             ret = get_user_sal(off, arg3);
10492             if (is_error(ret)) {
10493                 return ret;
10494             }
10495             offp = &off;
10496         }
10497         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10498         if (!is_error(ret) && arg3) {
10499             abi_long ret2 = put_user_sal(off, arg3);
10500             if (is_error(ret2)) {
10501                 ret = ret2;
10502             }
10503         }
10504         return ret;
10505     }
10506 #endif
10507 #ifdef TARGET_NR_sendfile64
10508     case TARGET_NR_sendfile64:
10509     {
10510         off_t *offp = NULL;
10511         off_t off;
10512         if (arg3) {
10513             ret = get_user_s64(off, arg3);
10514             if (is_error(ret)) {
10515                 return ret;
10516             }
10517             offp = &off;
10518         }
10519         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10520         if (!is_error(ret) && arg3) {
10521             abi_long ret2 = put_user_s64(off, arg3);
10522             if (is_error(ret2)) {
10523                 ret = ret2;
10524             }
10525         }
10526         return ret;
10527     }
10528 #endif
10529 #endif
10530 #ifdef TARGET_NR_vfork
10531     case TARGET_NR_vfork:
10532         return get_errno(do_fork(cpu_env,
10533                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10534                          0, 0, 0, 0));
10535 #endif
10536 #ifdef TARGET_NR_ugetrlimit
10537     case TARGET_NR_ugetrlimit:
10538     {
10539 	struct rlimit rlim;
10540 	int resource = target_to_host_resource(arg1);
10541 	ret = get_errno(getrlimit(resource, &rlim));
10542 	if (!is_error(ret)) {
10543 	    struct target_rlimit *target_rlim;
10544             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10545                 return -TARGET_EFAULT;
10546 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10547 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10548             unlock_user_struct(target_rlim, arg2, 1);
10549 	}
10550         return ret;
10551     }
10552 #endif
10553 #ifdef TARGET_NR_truncate64
10554     case TARGET_NR_truncate64:
10555         if (!(p = lock_user_string(arg1)))
10556             return -TARGET_EFAULT;
10557 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10558         unlock_user(p, arg1, 0);
10559         return ret;
10560 #endif
10561 #ifdef TARGET_NR_ftruncate64
10562     case TARGET_NR_ftruncate64:
10563         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10564 #endif
10565 #ifdef TARGET_NR_stat64
10566     case TARGET_NR_stat64:
10567         if (!(p = lock_user_string(arg1))) {
10568             return -TARGET_EFAULT;
10569         }
10570         ret = get_errno(stat(path(p), &st));
10571         unlock_user(p, arg1, 0);
10572         if (!is_error(ret))
10573             ret = host_to_target_stat64(cpu_env, arg2, &st);
10574         return ret;
10575 #endif
10576 #ifdef TARGET_NR_lstat64
10577     case TARGET_NR_lstat64:
10578         if (!(p = lock_user_string(arg1))) {
10579             return -TARGET_EFAULT;
10580         }
10581         ret = get_errno(lstat(path(p), &st));
10582         unlock_user(p, arg1, 0);
10583         if (!is_error(ret))
10584             ret = host_to_target_stat64(cpu_env, arg2, &st);
10585         return ret;
10586 #endif
10587 #ifdef TARGET_NR_fstat64
10588     case TARGET_NR_fstat64:
10589         ret = get_errno(fstat(arg1, &st));
10590         if (!is_error(ret))
10591             ret = host_to_target_stat64(cpu_env, arg2, &st);
10592         return ret;
10593 #endif
10594 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10595 #ifdef TARGET_NR_fstatat64
10596     case TARGET_NR_fstatat64:
10597 #endif
10598 #ifdef TARGET_NR_newfstatat
10599     case TARGET_NR_newfstatat:
10600 #endif
10601         if (!(p = lock_user_string(arg2))) {
10602             return -TARGET_EFAULT;
10603         }
10604         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10605         unlock_user(p, arg2, 0);
10606         if (!is_error(ret))
10607             ret = host_to_target_stat64(cpu_env, arg3, &st);
10608         return ret;
10609 #endif
10610 #if defined(TARGET_NR_statx)
10611     case TARGET_NR_statx:
10612         {
10613             struct target_statx *target_stx;
10614             int dirfd = arg1;
10615             int flags = arg3;
10616 
10617             p = lock_user_string(arg2);
10618             if (p == NULL) {
10619                 return -TARGET_EFAULT;
10620             }
10621 #if defined(__NR_statx)
10622             {
10623                 /*
10624                  * It is assumed that struct statx is architecture independent.
10625                  */
10626                 struct target_statx host_stx;
10627                 int mask = arg4;
10628 
10629                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10630                 if (!is_error(ret)) {
10631                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10632                         unlock_user(p, arg2, 0);
10633                         return -TARGET_EFAULT;
10634                     }
10635                 }
10636 
10637                 if (ret != -TARGET_ENOSYS) {
10638                     unlock_user(p, arg2, 0);
10639                     return ret;
10640                 }
10641             }
10642 #endif
10643             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10644             unlock_user(p, arg2, 0);
10645 
10646             if (!is_error(ret)) {
10647                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10648                     return -TARGET_EFAULT;
10649                 }
10650                 memset(target_stx, 0, sizeof(*target_stx));
10651                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10652                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10653                 __put_user(st.st_ino, &target_stx->stx_ino);
10654                 __put_user(st.st_mode, &target_stx->stx_mode);
10655                 __put_user(st.st_uid, &target_stx->stx_uid);
10656                 __put_user(st.st_gid, &target_stx->stx_gid);
10657                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10658                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10659                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10660                 __put_user(st.st_size, &target_stx->stx_size);
10661                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10662                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10663                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10664                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10665                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10666                 unlock_user_struct(target_stx, arg5, 1);
10667             }
10668         }
10669         return ret;
10670 #endif
10671 #ifdef TARGET_NR_lchown
10672     case TARGET_NR_lchown:
10673         if (!(p = lock_user_string(arg1)))
10674             return -TARGET_EFAULT;
10675         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10676         unlock_user(p, arg1, 0);
10677         return ret;
10678 #endif
10679 #ifdef TARGET_NR_getuid
10680     case TARGET_NR_getuid:
10681         return get_errno(high2lowuid(getuid()));
10682 #endif
10683 #ifdef TARGET_NR_getgid
10684     case TARGET_NR_getgid:
10685         return get_errno(high2lowgid(getgid()));
10686 #endif
10687 #ifdef TARGET_NR_geteuid
10688     case TARGET_NR_geteuid:
10689         return get_errno(high2lowuid(geteuid()));
10690 #endif
10691 #ifdef TARGET_NR_getegid
10692     case TARGET_NR_getegid:
10693         return get_errno(high2lowgid(getegid()));
10694 #endif
10695     case TARGET_NR_setreuid:
10696         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10697     case TARGET_NR_setregid:
10698         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10699     case TARGET_NR_getgroups:
10700         {
10701             int gidsetsize = arg1;
10702             target_id *target_grouplist;
10703             gid_t *grouplist;
10704             int i;
10705 
10706             grouplist = alloca(gidsetsize * sizeof(gid_t));
10707             ret = get_errno(getgroups(gidsetsize, grouplist));
10708             if (gidsetsize == 0)
10709                 return ret;
10710             if (!is_error(ret)) {
10711                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10712                 if (!target_grouplist)
10713                     return -TARGET_EFAULT;
10714                 for(i = 0;i < ret; i++)
10715                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10716                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10717             }
10718         }
10719         return ret;
10720     case TARGET_NR_setgroups:
10721         {
10722             int gidsetsize = arg1;
10723             target_id *target_grouplist;
10724             gid_t *grouplist = NULL;
10725             int i;
10726             if (gidsetsize) {
10727                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10728                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10729                 if (!target_grouplist) {
10730                     return -TARGET_EFAULT;
10731                 }
10732                 for (i = 0; i < gidsetsize; i++) {
10733                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10734                 }
10735                 unlock_user(target_grouplist, arg2, 0);
10736             }
10737             return get_errno(setgroups(gidsetsize, grouplist));
10738         }
10739     case TARGET_NR_fchown:
10740         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10741 #if defined(TARGET_NR_fchownat)
10742     case TARGET_NR_fchownat:
10743         if (!(p = lock_user_string(arg2)))
10744             return -TARGET_EFAULT;
10745         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10746                                  low2highgid(arg4), arg5));
10747         unlock_user(p, arg2, 0);
10748         return ret;
10749 #endif
10750 #ifdef TARGET_NR_setresuid
10751     case TARGET_NR_setresuid:
10752         return get_errno(sys_setresuid(low2highuid(arg1),
10753                                        low2highuid(arg2),
10754                                        low2highuid(arg3)));
10755 #endif
10756 #ifdef TARGET_NR_getresuid
10757     case TARGET_NR_getresuid:
10758         {
10759             uid_t ruid, euid, suid;
10760             ret = get_errno(getresuid(&ruid, &euid, &suid));
10761             if (!is_error(ret)) {
10762                 if (put_user_id(high2lowuid(ruid), arg1)
10763                     || put_user_id(high2lowuid(euid), arg2)
10764                     || put_user_id(high2lowuid(suid), arg3))
10765                     return -TARGET_EFAULT;
10766             }
10767         }
10768         return ret;
10769 #endif
10770 #ifdef TARGET_NR_getresgid
10771     case TARGET_NR_setresgid:
10772         return get_errno(sys_setresgid(low2highgid(arg1),
10773                                        low2highgid(arg2),
10774                                        low2highgid(arg3)));
10775 #endif
10776 #ifdef TARGET_NR_getresgid
10777     case TARGET_NR_getresgid:
10778         {
10779             gid_t rgid, egid, sgid;
10780             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10781             if (!is_error(ret)) {
10782                 if (put_user_id(high2lowgid(rgid), arg1)
10783                     || put_user_id(high2lowgid(egid), arg2)
10784                     || put_user_id(high2lowgid(sgid), arg3))
10785                     return -TARGET_EFAULT;
10786             }
10787         }
10788         return ret;
10789 #endif
10790 #ifdef TARGET_NR_chown
10791     case TARGET_NR_chown:
10792         if (!(p = lock_user_string(arg1)))
10793             return -TARGET_EFAULT;
10794         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10795         unlock_user(p, arg1, 0);
10796         return ret;
10797 #endif
10798     case TARGET_NR_setuid:
10799         return get_errno(sys_setuid(low2highuid(arg1)));
10800     case TARGET_NR_setgid:
10801         return get_errno(sys_setgid(low2highgid(arg1)));
10802     case TARGET_NR_setfsuid:
10803         return get_errno(setfsuid(arg1));
10804     case TARGET_NR_setfsgid:
10805         return get_errno(setfsgid(arg1));
10806 
10807 #ifdef TARGET_NR_lchown32
10808     case TARGET_NR_lchown32:
10809         if (!(p = lock_user_string(arg1)))
10810             return -TARGET_EFAULT;
10811         ret = get_errno(lchown(p, arg2, arg3));
10812         unlock_user(p, arg1, 0);
10813         return ret;
10814 #endif
10815 #ifdef TARGET_NR_getuid32
10816     case TARGET_NR_getuid32:
10817         return get_errno(getuid());
10818 #endif
10819 
10820 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10821    /* Alpha specific */
10822     case TARGET_NR_getxuid:
10823          {
10824             uid_t euid;
10825             euid=geteuid();
10826             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10827          }
10828         return get_errno(getuid());
10829 #endif
10830 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10831    /* Alpha specific */
10832     case TARGET_NR_getxgid:
10833          {
10834             uid_t egid;
10835             egid=getegid();
10836             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10837          }
10838         return get_errno(getgid());
10839 #endif
10840 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10841     /* Alpha specific */
10842     case TARGET_NR_osf_getsysinfo:
10843         ret = -TARGET_EOPNOTSUPP;
10844         switch (arg1) {
10845           case TARGET_GSI_IEEE_FP_CONTROL:
10846             {
10847                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10848                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10849 
10850                 swcr &= ~SWCR_STATUS_MASK;
10851                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10852 
10853                 if (put_user_u64 (swcr, arg2))
10854                         return -TARGET_EFAULT;
10855                 ret = 0;
10856             }
10857             break;
10858 
10859           /* case GSI_IEEE_STATE_AT_SIGNAL:
10860              -- Not implemented in linux kernel.
10861              case GSI_UACPROC:
10862              -- Retrieves current unaligned access state; not much used.
10863              case GSI_PROC_TYPE:
10864              -- Retrieves implver information; surely not used.
10865              case GSI_GET_HWRPB:
10866              -- Grabs a copy of the HWRPB; surely not used.
10867           */
10868         }
10869         return ret;
10870 #endif
10871 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10872     /* Alpha specific */
10873     case TARGET_NR_osf_setsysinfo:
10874         ret = -TARGET_EOPNOTSUPP;
10875         switch (arg1) {
10876           case TARGET_SSI_IEEE_FP_CONTROL:
10877             {
10878                 uint64_t swcr, fpcr;
10879 
10880                 if (get_user_u64 (swcr, arg2)) {
10881                     return -TARGET_EFAULT;
10882                 }
10883 
10884                 /*
10885                  * The kernel calls swcr_update_status to update the
10886                  * status bits from the fpcr at every point that it
10887                  * could be queried.  Therefore, we store the status
10888                  * bits only in FPCR.
10889                  */
10890                 ((CPUAlphaState *)cpu_env)->swcr
10891                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10892 
10893                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10894                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10895                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10896                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10897                 ret = 0;
10898             }
10899             break;
10900 
10901           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10902             {
10903                 uint64_t exc, fpcr, fex;
10904 
10905                 if (get_user_u64(exc, arg2)) {
10906                     return -TARGET_EFAULT;
10907                 }
10908                 exc &= SWCR_STATUS_MASK;
10909                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10910 
10911                 /* Old exceptions are not signaled.  */
10912                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10913                 fex = exc & ~fex;
10914                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10915                 fex &= ((CPUArchState *)cpu_env)->swcr;
10916 
10917                 /* Update the hardware fpcr.  */
10918                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10919                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10920 
10921                 if (fex) {
10922                     int si_code = TARGET_FPE_FLTUNK;
10923                     target_siginfo_t info;
10924 
10925                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10926                         si_code = TARGET_FPE_FLTUND;
10927                     }
10928                     if (fex & SWCR_TRAP_ENABLE_INE) {
10929                         si_code = TARGET_FPE_FLTRES;
10930                     }
10931                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10932                         si_code = TARGET_FPE_FLTUND;
10933                     }
10934                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10935                         si_code = TARGET_FPE_FLTOVF;
10936                     }
10937                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10938                         si_code = TARGET_FPE_FLTDIV;
10939                     }
10940                     if (fex & SWCR_TRAP_ENABLE_INV) {
10941                         si_code = TARGET_FPE_FLTINV;
10942                     }
10943 
10944                     info.si_signo = SIGFPE;
10945                     info.si_errno = 0;
10946                     info.si_code = si_code;
10947                     info._sifields._sigfault._addr
10948                         = ((CPUArchState *)cpu_env)->pc;
10949                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10950                                  QEMU_SI_FAULT, &info);
10951                 }
10952                 ret = 0;
10953             }
10954             break;
10955 
10956           /* case SSI_NVPAIRS:
10957              -- Used with SSIN_UACPROC to enable unaligned accesses.
10958              case SSI_IEEE_STATE_AT_SIGNAL:
10959              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10960              -- Not implemented in linux kernel
10961           */
10962         }
10963         return ret;
10964 #endif
10965 #ifdef TARGET_NR_osf_sigprocmask
10966     /* Alpha specific.  */
10967     case TARGET_NR_osf_sigprocmask:
10968         {
10969             abi_ulong mask;
10970             int how;
10971             sigset_t set, oldset;
10972 
10973             switch(arg1) {
10974             case TARGET_SIG_BLOCK:
10975                 how = SIG_BLOCK;
10976                 break;
10977             case TARGET_SIG_UNBLOCK:
10978                 how = SIG_UNBLOCK;
10979                 break;
10980             case TARGET_SIG_SETMASK:
10981                 how = SIG_SETMASK;
10982                 break;
10983             default:
10984                 return -TARGET_EINVAL;
10985             }
10986             mask = arg2;
10987             target_to_host_old_sigset(&set, &mask);
10988             ret = do_sigprocmask(how, &set, &oldset);
10989             if (!ret) {
10990                 host_to_target_old_sigset(&mask, &oldset);
10991                 ret = mask;
10992             }
10993         }
10994         return ret;
10995 #endif
10996 
10997 #ifdef TARGET_NR_getgid32
10998     case TARGET_NR_getgid32:
10999         return get_errno(getgid());
11000 #endif
11001 #ifdef TARGET_NR_geteuid32
11002     case TARGET_NR_geteuid32:
11003         return get_errno(geteuid());
11004 #endif
11005 #ifdef TARGET_NR_getegid32
11006     case TARGET_NR_getegid32:
11007         return get_errno(getegid());
11008 #endif
11009 #ifdef TARGET_NR_setreuid32
11010     case TARGET_NR_setreuid32:
11011         return get_errno(setreuid(arg1, arg2));
11012 #endif
11013 #ifdef TARGET_NR_setregid32
11014     case TARGET_NR_setregid32:
11015         return get_errno(setregid(arg1, arg2));
11016 #endif
11017 #ifdef TARGET_NR_getgroups32
11018     case TARGET_NR_getgroups32:
11019         {
11020             int gidsetsize = arg1;
11021             uint32_t *target_grouplist;
11022             gid_t *grouplist;
11023             int i;
11024 
11025             grouplist = alloca(gidsetsize * sizeof(gid_t));
11026             ret = get_errno(getgroups(gidsetsize, grouplist));
11027             if (gidsetsize == 0)
11028                 return ret;
11029             if (!is_error(ret)) {
11030                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11031                 if (!target_grouplist) {
11032                     return -TARGET_EFAULT;
11033                 }
11034                 for(i = 0;i < ret; i++)
11035                     target_grouplist[i] = tswap32(grouplist[i]);
11036                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11037             }
11038         }
11039         return ret;
11040 #endif
11041 #ifdef TARGET_NR_setgroups32
11042     case TARGET_NR_setgroups32:
11043         {
11044             int gidsetsize = arg1;
11045             uint32_t *target_grouplist;
11046             gid_t *grouplist;
11047             int i;
11048 
11049             grouplist = alloca(gidsetsize * sizeof(gid_t));
11050             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11051             if (!target_grouplist) {
11052                 return -TARGET_EFAULT;
11053             }
11054             for(i = 0;i < gidsetsize; i++)
11055                 grouplist[i] = tswap32(target_grouplist[i]);
11056             unlock_user(target_grouplist, arg2, 0);
11057             return get_errno(setgroups(gidsetsize, grouplist));
11058         }
11059 #endif
11060 #ifdef TARGET_NR_fchown32
11061     case TARGET_NR_fchown32:
11062         return get_errno(fchown(arg1, arg2, arg3));
11063 #endif
11064 #ifdef TARGET_NR_setresuid32
11065     case TARGET_NR_setresuid32:
11066         return get_errno(sys_setresuid(arg1, arg2, arg3));
11067 #endif
11068 #ifdef TARGET_NR_getresuid32
11069     case TARGET_NR_getresuid32:
11070         {
11071             uid_t ruid, euid, suid;
11072             ret = get_errno(getresuid(&ruid, &euid, &suid));
11073             if (!is_error(ret)) {
11074                 if (put_user_u32(ruid, arg1)
11075                     || put_user_u32(euid, arg2)
11076                     || put_user_u32(suid, arg3))
11077                     return -TARGET_EFAULT;
11078             }
11079         }
11080         return ret;
11081 #endif
11082 #ifdef TARGET_NR_setresgid32
11083     case TARGET_NR_setresgid32:
11084         return get_errno(sys_setresgid(arg1, arg2, arg3));
11085 #endif
11086 #ifdef TARGET_NR_getresgid32
11087     case TARGET_NR_getresgid32:
11088         {
11089             gid_t rgid, egid, sgid;
11090             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11091             if (!is_error(ret)) {
11092                 if (put_user_u32(rgid, arg1)
11093                     || put_user_u32(egid, arg2)
11094                     || put_user_u32(sgid, arg3))
11095                     return -TARGET_EFAULT;
11096             }
11097         }
11098         return ret;
11099 #endif
11100 #ifdef TARGET_NR_chown32
11101     case TARGET_NR_chown32:
11102         if (!(p = lock_user_string(arg1)))
11103             return -TARGET_EFAULT;
11104         ret = get_errno(chown(p, arg2, arg3));
11105         unlock_user(p, arg1, 0);
11106         return ret;
11107 #endif
11108 #ifdef TARGET_NR_setuid32
11109     case TARGET_NR_setuid32:
11110         return get_errno(sys_setuid(arg1));
11111 #endif
11112 #ifdef TARGET_NR_setgid32
11113     case TARGET_NR_setgid32:
11114         return get_errno(sys_setgid(arg1));
11115 #endif
11116 #ifdef TARGET_NR_setfsuid32
11117     case TARGET_NR_setfsuid32:
11118         return get_errno(setfsuid(arg1));
11119 #endif
11120 #ifdef TARGET_NR_setfsgid32
11121     case TARGET_NR_setfsgid32:
11122         return get_errno(setfsgid(arg1));
11123 #endif
11124 #ifdef TARGET_NR_mincore
11125     case TARGET_NR_mincore:
11126         {
11127             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11128             if (!a) {
11129                 return -TARGET_ENOMEM;
11130             }
11131             p = lock_user_string(arg3);
11132             if (!p) {
11133                 ret = -TARGET_EFAULT;
11134             } else {
11135                 ret = get_errno(mincore(a, arg2, p));
11136                 unlock_user(p, arg3, ret);
11137             }
11138             unlock_user(a, arg1, 0);
11139         }
11140         return ret;
11141 #endif
11142 #ifdef TARGET_NR_arm_fadvise64_64
11143     case TARGET_NR_arm_fadvise64_64:
11144         /* arm_fadvise64_64 looks like fadvise64_64 but
11145          * with different argument order: fd, advice, offset, len
11146          * rather than the usual fd, offset, len, advice.
11147          * Note that offset and len are both 64-bit so appear as
11148          * pairs of 32-bit registers.
11149          */
11150         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11151                             target_offset64(arg5, arg6), arg2);
11152         return -host_to_target_errno(ret);
11153 #endif
11154 
11155 #if TARGET_ABI_BITS == 32
11156 
11157 #ifdef TARGET_NR_fadvise64_64
11158     case TARGET_NR_fadvise64_64:
11159 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11160         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11161         ret = arg2;
11162         arg2 = arg3;
11163         arg3 = arg4;
11164         arg4 = arg5;
11165         arg5 = arg6;
11166         arg6 = ret;
11167 #else
11168         /* 6 args: fd, offset (high, low), len (high, low), advice */
11169         if (regpairs_aligned(cpu_env, num)) {
11170             /* offset is in (3,4), len in (5,6) and advice in 7 */
11171             arg2 = arg3;
11172             arg3 = arg4;
11173             arg4 = arg5;
11174             arg5 = arg6;
11175             arg6 = arg7;
11176         }
11177 #endif
11178         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11179                             target_offset64(arg4, arg5), arg6);
11180         return -host_to_target_errno(ret);
11181 #endif
11182 
11183 #ifdef TARGET_NR_fadvise64
11184     case TARGET_NR_fadvise64:
11185         /* 5 args: fd, offset (high, low), len, advice */
11186         if (regpairs_aligned(cpu_env, num)) {
11187             /* offset is in (3,4), len in 5 and advice in 6 */
11188             arg2 = arg3;
11189             arg3 = arg4;
11190             arg4 = arg5;
11191             arg5 = arg6;
11192         }
11193         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11194         return -host_to_target_errno(ret);
11195 #endif
11196 
11197 #else /* not a 32-bit ABI */
11198 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11199 #ifdef TARGET_NR_fadvise64_64
11200     case TARGET_NR_fadvise64_64:
11201 #endif
11202 #ifdef TARGET_NR_fadvise64
11203     case TARGET_NR_fadvise64:
11204 #endif
11205 #ifdef TARGET_S390X
11206         switch (arg4) {
11207         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11208         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11209         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11210         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11211         default: break;
11212         }
11213 #endif
11214         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11215 #endif
11216 #endif /* end of 64-bit ABI fadvise handling */
11217 
11218 #ifdef TARGET_NR_madvise
11219     case TARGET_NR_madvise:
11220         /* A straight passthrough may not be safe because qemu sometimes
11221            turns private file-backed mappings into anonymous mappings.
11222            This will break MADV_DONTNEED.
11223            This is a hint, so ignoring and returning success is ok.  */
11224         return 0;
11225 #endif
11226 #if TARGET_ABI_BITS == 32
11227     case TARGET_NR_fcntl64:
11228     {
11229 	int cmd;
11230 	struct flock64 fl;
11231         from_flock64_fn *copyfrom = copy_from_user_flock64;
11232         to_flock64_fn *copyto = copy_to_user_flock64;
11233 
11234 #ifdef TARGET_ARM
11235         if (!((CPUARMState *)cpu_env)->eabi) {
11236             copyfrom = copy_from_user_oabi_flock64;
11237             copyto = copy_to_user_oabi_flock64;
11238         }
11239 #endif
11240 
11241 	cmd = target_to_host_fcntl_cmd(arg2);
11242         if (cmd == -TARGET_EINVAL) {
11243             return cmd;
11244         }
11245 
11246         switch(arg2) {
11247         case TARGET_F_GETLK64:
11248             ret = copyfrom(&fl, arg3);
11249             if (ret) {
11250                 break;
11251             }
11252             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11253             if (ret == 0) {
11254                 ret = copyto(arg3, &fl);
11255             }
11256 	    break;
11257 
11258         case TARGET_F_SETLK64:
11259         case TARGET_F_SETLKW64:
11260             ret = copyfrom(&fl, arg3);
11261             if (ret) {
11262                 break;
11263             }
11264             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11265 	    break;
11266         default:
11267             ret = do_fcntl(arg1, arg2, arg3);
11268             break;
11269         }
11270         return ret;
11271     }
11272 #endif
11273 #ifdef TARGET_NR_cacheflush
11274     case TARGET_NR_cacheflush:
11275         /* self-modifying code is handled automatically, so nothing needed */
11276         return 0;
11277 #endif
11278 #ifdef TARGET_NR_getpagesize
11279     case TARGET_NR_getpagesize:
11280         return TARGET_PAGE_SIZE;
11281 #endif
11282     case TARGET_NR_gettid:
11283         return get_errno(sys_gettid());
11284 #ifdef TARGET_NR_readahead
11285     case TARGET_NR_readahead:
11286 #if TARGET_ABI_BITS == 32
11287         if (regpairs_aligned(cpu_env, num)) {
11288             arg2 = arg3;
11289             arg3 = arg4;
11290             arg4 = arg5;
11291         }
11292         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11293 #else
11294         ret = get_errno(readahead(arg1, arg2, arg3));
11295 #endif
11296         return ret;
11297 #endif
11298 #ifdef CONFIG_ATTR
11299 #ifdef TARGET_NR_setxattr
11300     case TARGET_NR_listxattr:
11301     case TARGET_NR_llistxattr:
11302     {
11303         void *p, *b = 0;
11304         if (arg2) {
11305             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11306             if (!b) {
11307                 return -TARGET_EFAULT;
11308             }
11309         }
11310         p = lock_user_string(arg1);
11311         if (p) {
11312             if (num == TARGET_NR_listxattr) {
11313                 ret = get_errno(listxattr(p, b, arg3));
11314             } else {
11315                 ret = get_errno(llistxattr(p, b, arg3));
11316             }
11317         } else {
11318             ret = -TARGET_EFAULT;
11319         }
11320         unlock_user(p, arg1, 0);
11321         unlock_user(b, arg2, arg3);
11322         return ret;
11323     }
11324     case TARGET_NR_flistxattr:
11325     {
11326         void *b = 0;
11327         if (arg2) {
11328             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11329             if (!b) {
11330                 return -TARGET_EFAULT;
11331             }
11332         }
11333         ret = get_errno(flistxattr(arg1, b, arg3));
11334         unlock_user(b, arg2, arg3);
11335         return ret;
11336     }
11337     case TARGET_NR_setxattr:
11338     case TARGET_NR_lsetxattr:
11339         {
11340             void *p, *n, *v = 0;
11341             if (arg3) {
11342                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11343                 if (!v) {
11344                     return -TARGET_EFAULT;
11345                 }
11346             }
11347             p = lock_user_string(arg1);
11348             n = lock_user_string(arg2);
11349             if (p && n) {
11350                 if (num == TARGET_NR_setxattr) {
11351                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11352                 } else {
11353                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11354                 }
11355             } else {
11356                 ret = -TARGET_EFAULT;
11357             }
11358             unlock_user(p, arg1, 0);
11359             unlock_user(n, arg2, 0);
11360             unlock_user(v, arg3, 0);
11361         }
11362         return ret;
11363     case TARGET_NR_fsetxattr:
11364         {
11365             void *n, *v = 0;
11366             if (arg3) {
11367                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11368                 if (!v) {
11369                     return -TARGET_EFAULT;
11370                 }
11371             }
11372             n = lock_user_string(arg2);
11373             if (n) {
11374                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11375             } else {
11376                 ret = -TARGET_EFAULT;
11377             }
11378             unlock_user(n, arg2, 0);
11379             unlock_user(v, arg3, 0);
11380         }
11381         return ret;
11382     case TARGET_NR_getxattr:
11383     case TARGET_NR_lgetxattr:
11384         {
11385             void *p, *n, *v = 0;
11386             if (arg3) {
11387                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11388                 if (!v) {
11389                     return -TARGET_EFAULT;
11390                 }
11391             }
11392             p = lock_user_string(arg1);
11393             n = lock_user_string(arg2);
11394             if (p && n) {
11395                 if (num == TARGET_NR_getxattr) {
11396                     ret = get_errno(getxattr(p, n, v, arg4));
11397                 } else {
11398                     ret = get_errno(lgetxattr(p, n, v, arg4));
11399                 }
11400             } else {
11401                 ret = -TARGET_EFAULT;
11402             }
11403             unlock_user(p, arg1, 0);
11404             unlock_user(n, arg2, 0);
11405             unlock_user(v, arg3, arg4);
11406         }
11407         return ret;
11408     case TARGET_NR_fgetxattr:
11409         {
11410             void *n, *v = 0;
11411             if (arg3) {
11412                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11413                 if (!v) {
11414                     return -TARGET_EFAULT;
11415                 }
11416             }
11417             n = lock_user_string(arg2);
11418             if (n) {
11419                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11420             } else {
11421                 ret = -TARGET_EFAULT;
11422             }
11423             unlock_user(n, arg2, 0);
11424             unlock_user(v, arg3, arg4);
11425         }
11426         return ret;
11427     case TARGET_NR_removexattr:
11428     case TARGET_NR_lremovexattr:
11429         {
11430             void *p, *n;
11431             p = lock_user_string(arg1);
11432             n = lock_user_string(arg2);
11433             if (p && n) {
11434                 if (num == TARGET_NR_removexattr) {
11435                     ret = get_errno(removexattr(p, n));
11436                 } else {
11437                     ret = get_errno(lremovexattr(p, n));
11438                 }
11439             } else {
11440                 ret = -TARGET_EFAULT;
11441             }
11442             unlock_user(p, arg1, 0);
11443             unlock_user(n, arg2, 0);
11444         }
11445         return ret;
11446     case TARGET_NR_fremovexattr:
11447         {
11448             void *n;
11449             n = lock_user_string(arg2);
11450             if (n) {
11451                 ret = get_errno(fremovexattr(arg1, n));
11452             } else {
11453                 ret = -TARGET_EFAULT;
11454             }
11455             unlock_user(n, arg2, 0);
11456         }
11457         return ret;
11458 #endif
11459 #endif /* CONFIG_ATTR */
11460 #ifdef TARGET_NR_set_thread_area
11461     case TARGET_NR_set_thread_area:
11462 #if defined(TARGET_MIPS)
11463       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11464       return 0;
11465 #elif defined(TARGET_CRIS)
11466       if (arg1 & 0xff)
11467           ret = -TARGET_EINVAL;
11468       else {
11469           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11470           ret = 0;
11471       }
11472       return ret;
11473 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11474       return do_set_thread_area(cpu_env, arg1);
11475 #elif defined(TARGET_M68K)
11476       {
11477           TaskState *ts = cpu->opaque;
11478           ts->tp_value = arg1;
11479           return 0;
11480       }
11481 #else
11482       return -TARGET_ENOSYS;
11483 #endif
11484 #endif
11485 #ifdef TARGET_NR_get_thread_area
11486     case TARGET_NR_get_thread_area:
11487 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11488         return do_get_thread_area(cpu_env, arg1);
11489 #elif defined(TARGET_M68K)
11490         {
11491             TaskState *ts = cpu->opaque;
11492             return ts->tp_value;
11493         }
11494 #else
11495         return -TARGET_ENOSYS;
11496 #endif
11497 #endif
11498 #ifdef TARGET_NR_getdomainname
11499     case TARGET_NR_getdomainname:
11500         return -TARGET_ENOSYS;
11501 #endif
11502 
11503 #ifdef TARGET_NR_clock_settime
11504     case TARGET_NR_clock_settime:
11505     {
11506         struct timespec ts;
11507 
11508         ret = target_to_host_timespec(&ts, arg2);
11509         if (!is_error(ret)) {
11510             ret = get_errno(clock_settime(arg1, &ts));
11511         }
11512         return ret;
11513     }
11514 #endif
11515 #ifdef TARGET_NR_clock_settime64
11516     case TARGET_NR_clock_settime64:
11517     {
11518         struct timespec ts;
11519 
11520         ret = target_to_host_timespec64(&ts, arg2);
11521         if (!is_error(ret)) {
11522             ret = get_errno(clock_settime(arg1, &ts));
11523         }
11524         return ret;
11525     }
11526 #endif
11527 #ifdef TARGET_NR_clock_gettime
11528     case TARGET_NR_clock_gettime:
11529     {
11530         struct timespec ts;
11531         ret = get_errno(clock_gettime(arg1, &ts));
11532         if (!is_error(ret)) {
11533             ret = host_to_target_timespec(arg2, &ts);
11534         }
11535         return ret;
11536     }
11537 #endif
11538 #ifdef TARGET_NR_clock_gettime64
11539     case TARGET_NR_clock_gettime64:
11540     {
11541         struct timespec ts;
11542         ret = get_errno(clock_gettime(arg1, &ts));
11543         if (!is_error(ret)) {
11544             ret = host_to_target_timespec64(arg2, &ts);
11545         }
11546         return ret;
11547     }
11548 #endif
11549 #ifdef TARGET_NR_clock_getres
11550     case TARGET_NR_clock_getres:
11551     {
11552         struct timespec ts;
11553         ret = get_errno(clock_getres(arg1, &ts));
11554         if (!is_error(ret)) {
11555             host_to_target_timespec(arg2, &ts);
11556         }
11557         return ret;
11558     }
11559 #endif
11560 #ifdef TARGET_NR_clock_nanosleep
11561     case TARGET_NR_clock_nanosleep:
11562     {
11563         struct timespec ts;
11564         target_to_host_timespec(&ts, arg3);
11565         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11566                                              &ts, arg4 ? &ts : NULL));
11567         if (arg4)
11568             host_to_target_timespec(arg4, &ts);
11569 
11570 #if defined(TARGET_PPC)
11571         /* clock_nanosleep is odd in that it returns positive errno values.
11572          * On PPC, CR0 bit 3 should be set in such a situation. */
11573         if (ret && ret != -TARGET_ERESTARTSYS) {
11574             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11575         }
11576 #endif
11577         return ret;
11578     }
11579 #endif
11580 
11581 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11582     case TARGET_NR_set_tid_address:
11583         return get_errno(set_tid_address((int *)g2h(arg1)));
11584 #endif
11585 
11586     case TARGET_NR_tkill:
11587         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11588 
11589     case TARGET_NR_tgkill:
11590         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11591                          target_to_host_signal(arg3)));
11592 
11593 #ifdef TARGET_NR_set_robust_list
11594     case TARGET_NR_set_robust_list:
11595     case TARGET_NR_get_robust_list:
11596         /* The ABI for supporting robust futexes has userspace pass
11597          * the kernel a pointer to a linked list which is updated by
11598          * userspace after the syscall; the list is walked by the kernel
11599          * when the thread exits. Since the linked list in QEMU guest
11600          * memory isn't a valid linked list for the host and we have
11601          * no way to reliably intercept the thread-death event, we can't
11602          * support these. Silently return ENOSYS so that guest userspace
11603          * falls back to a non-robust futex implementation (which should
11604          * be OK except in the corner case of the guest crashing while
11605          * holding a mutex that is shared with another process via
11606          * shared memory).
11607          */
11608         return -TARGET_ENOSYS;
11609 #endif
11610 
11611 #if defined(TARGET_NR_utimensat)
11612     case TARGET_NR_utimensat:
11613         {
11614             struct timespec *tsp, ts[2];
11615             if (!arg3) {
11616                 tsp = NULL;
11617             } else {
11618                 target_to_host_timespec(ts, arg3);
11619                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11620                 tsp = ts;
11621             }
11622             if (!arg2)
11623                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11624             else {
11625                 if (!(p = lock_user_string(arg2))) {
11626                     return -TARGET_EFAULT;
11627                 }
11628                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11629                 unlock_user(p, arg2, 0);
11630             }
11631         }
11632         return ret;
11633 #endif
11634 #ifdef TARGET_NR_futex
11635     case TARGET_NR_futex:
11636         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11637 #endif
11638 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11639     case TARGET_NR_inotify_init:
11640         ret = get_errno(sys_inotify_init());
11641         if (ret >= 0) {
11642             fd_trans_register(ret, &target_inotify_trans);
11643         }
11644         return ret;
11645 #endif
11646 #ifdef CONFIG_INOTIFY1
11647 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11648     case TARGET_NR_inotify_init1:
11649         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11650                                           fcntl_flags_tbl)));
11651         if (ret >= 0) {
11652             fd_trans_register(ret, &target_inotify_trans);
11653         }
11654         return ret;
11655 #endif
11656 #endif
11657 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11658     case TARGET_NR_inotify_add_watch:
11659         p = lock_user_string(arg2);
11660         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11661         unlock_user(p, arg2, 0);
11662         return ret;
11663 #endif
11664 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11665     case TARGET_NR_inotify_rm_watch:
11666         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11667 #endif
11668 
11669 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11670     case TARGET_NR_mq_open:
11671         {
11672             struct mq_attr posix_mq_attr;
11673             struct mq_attr *pposix_mq_attr;
11674             int host_flags;
11675 
11676             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11677             pposix_mq_attr = NULL;
11678             if (arg4) {
11679                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11680                     return -TARGET_EFAULT;
11681                 }
11682                 pposix_mq_attr = &posix_mq_attr;
11683             }
11684             p = lock_user_string(arg1 - 1);
11685             if (!p) {
11686                 return -TARGET_EFAULT;
11687             }
11688             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11689             unlock_user (p, arg1, 0);
11690         }
11691         return ret;
11692 
11693     case TARGET_NR_mq_unlink:
11694         p = lock_user_string(arg1 - 1);
11695         if (!p) {
11696             return -TARGET_EFAULT;
11697         }
11698         ret = get_errno(mq_unlink(p));
11699         unlock_user (p, arg1, 0);
11700         return ret;
11701 
11702 #ifdef TARGET_NR_mq_timedsend
11703     case TARGET_NR_mq_timedsend:
11704         {
11705             struct timespec ts;
11706 
11707             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11708             if (arg5 != 0) {
11709                 target_to_host_timespec(&ts, arg5);
11710                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11711                 host_to_target_timespec(arg5, &ts);
11712             } else {
11713                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11714             }
11715             unlock_user (p, arg2, arg3);
11716         }
11717         return ret;
11718 #endif
11719 
11720 #ifdef TARGET_NR_mq_timedreceive
11721     case TARGET_NR_mq_timedreceive:
11722         {
11723             struct timespec ts;
11724             unsigned int prio;
11725 
11726             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11727             if (arg5 != 0) {
11728                 target_to_host_timespec(&ts, arg5);
11729                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11730                                                      &prio, &ts));
11731                 host_to_target_timespec(arg5, &ts);
11732             } else {
11733                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11734                                                      &prio, NULL));
11735             }
11736             unlock_user (p, arg2, arg3);
11737             if (arg4 != 0)
11738                 put_user_u32(prio, arg4);
11739         }
11740         return ret;
11741 #endif
11742 
11743     /* Not implemented for now... */
11744 /*     case TARGET_NR_mq_notify: */
11745 /*         break; */
11746 
11747     case TARGET_NR_mq_getsetattr:
11748         {
11749             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11750             ret = 0;
11751             if (arg2 != 0) {
11752                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11753                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11754                                            &posix_mq_attr_out));
11755             } else if (arg3 != 0) {
11756                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11757             }
11758             if (ret == 0 && arg3 != 0) {
11759                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11760             }
11761         }
11762         return ret;
11763 #endif
11764 
11765 #ifdef CONFIG_SPLICE
11766 #ifdef TARGET_NR_tee
11767     case TARGET_NR_tee:
11768         {
11769             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11770         }
11771         return ret;
11772 #endif
11773 #ifdef TARGET_NR_splice
11774     case TARGET_NR_splice:
11775         {
11776             loff_t loff_in, loff_out;
11777             loff_t *ploff_in = NULL, *ploff_out = NULL;
11778             if (arg2) {
11779                 if (get_user_u64(loff_in, arg2)) {
11780                     return -TARGET_EFAULT;
11781                 }
11782                 ploff_in = &loff_in;
11783             }
11784             if (arg4) {
11785                 if (get_user_u64(loff_out, arg4)) {
11786                     return -TARGET_EFAULT;
11787                 }
11788                 ploff_out = &loff_out;
11789             }
11790             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11791             if (arg2) {
11792                 if (put_user_u64(loff_in, arg2)) {
11793                     return -TARGET_EFAULT;
11794                 }
11795             }
11796             if (arg4) {
11797                 if (put_user_u64(loff_out, arg4)) {
11798                     return -TARGET_EFAULT;
11799                 }
11800             }
11801         }
11802         return ret;
11803 #endif
11804 #ifdef TARGET_NR_vmsplice
11805 	case TARGET_NR_vmsplice:
11806         {
11807             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11808             if (vec != NULL) {
11809                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11810                 unlock_iovec(vec, arg2, arg3, 0);
11811             } else {
11812                 ret = -host_to_target_errno(errno);
11813             }
11814         }
11815         return ret;
11816 #endif
11817 #endif /* CONFIG_SPLICE */
11818 #ifdef CONFIG_EVENTFD
11819 #if defined(TARGET_NR_eventfd)
11820     case TARGET_NR_eventfd:
11821         ret = get_errno(eventfd(arg1, 0));
11822         if (ret >= 0) {
11823             fd_trans_register(ret, &target_eventfd_trans);
11824         }
11825         return ret;
11826 #endif
11827 #if defined(TARGET_NR_eventfd2)
11828     case TARGET_NR_eventfd2:
11829     {
11830         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11831         if (arg2 & TARGET_O_NONBLOCK) {
11832             host_flags |= O_NONBLOCK;
11833         }
11834         if (arg2 & TARGET_O_CLOEXEC) {
11835             host_flags |= O_CLOEXEC;
11836         }
11837         ret = get_errno(eventfd(arg1, host_flags));
11838         if (ret >= 0) {
11839             fd_trans_register(ret, &target_eventfd_trans);
11840         }
11841         return ret;
11842     }
11843 #endif
11844 #endif /* CONFIG_EVENTFD  */
11845 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11846     case TARGET_NR_fallocate:
11847 #if TARGET_ABI_BITS == 32
11848         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11849                                   target_offset64(arg5, arg6)));
11850 #else
11851         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11852 #endif
11853         return ret;
11854 #endif
11855 #if defined(CONFIG_SYNC_FILE_RANGE)
11856 #if defined(TARGET_NR_sync_file_range)
11857     case TARGET_NR_sync_file_range:
11858 #if TARGET_ABI_BITS == 32
11859 #if defined(TARGET_MIPS)
11860         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11861                                         target_offset64(arg5, arg6), arg7));
11862 #else
11863         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11864                                         target_offset64(arg4, arg5), arg6));
11865 #endif /* !TARGET_MIPS */
11866 #else
11867         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11868 #endif
11869         return ret;
11870 #endif
11871 #if defined(TARGET_NR_sync_file_range2) || \
11872     defined(TARGET_NR_arm_sync_file_range)
11873 #if defined(TARGET_NR_sync_file_range2)
11874     case TARGET_NR_sync_file_range2:
11875 #endif
11876 #if defined(TARGET_NR_arm_sync_file_range)
11877     case TARGET_NR_arm_sync_file_range:
11878 #endif
11879         /* This is like sync_file_range but the arguments are reordered */
11880 #if TARGET_ABI_BITS == 32
11881         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11882                                         target_offset64(arg5, arg6), arg2));
11883 #else
11884         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11885 #endif
11886         return ret;
11887 #endif
11888 #endif
11889 #if defined(TARGET_NR_signalfd4)
11890     case TARGET_NR_signalfd4:
11891         return do_signalfd4(arg1, arg2, arg4);
11892 #endif
11893 #if defined(TARGET_NR_signalfd)
11894     case TARGET_NR_signalfd:
11895         return do_signalfd4(arg1, arg2, 0);
11896 #endif
11897 #if defined(CONFIG_EPOLL)
11898 #if defined(TARGET_NR_epoll_create)
11899     case TARGET_NR_epoll_create:
11900         return get_errno(epoll_create(arg1));
11901 #endif
11902 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11903     case TARGET_NR_epoll_create1:
11904         return get_errno(epoll_create1(arg1));
11905 #endif
11906 #if defined(TARGET_NR_epoll_ctl)
11907     case TARGET_NR_epoll_ctl:
11908     {
11909         struct epoll_event ep;
11910         struct epoll_event *epp = 0;
11911         if (arg4) {
11912             struct target_epoll_event *target_ep;
11913             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11914                 return -TARGET_EFAULT;
11915             }
11916             ep.events = tswap32(target_ep->events);
11917             /* The epoll_data_t union is just opaque data to the kernel,
11918              * so we transfer all 64 bits across and need not worry what
11919              * actual data type it is.
11920              */
11921             ep.data.u64 = tswap64(target_ep->data.u64);
11922             unlock_user_struct(target_ep, arg4, 0);
11923             epp = &ep;
11924         }
11925         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11926     }
11927 #endif
11928 
11929 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11930 #if defined(TARGET_NR_epoll_wait)
11931     case TARGET_NR_epoll_wait:
11932 #endif
11933 #if defined(TARGET_NR_epoll_pwait)
11934     case TARGET_NR_epoll_pwait:
11935 #endif
11936     {
11937         struct target_epoll_event *target_ep;
11938         struct epoll_event *ep;
11939         int epfd = arg1;
11940         int maxevents = arg3;
11941         int timeout = arg4;
11942 
11943         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11944             return -TARGET_EINVAL;
11945         }
11946 
11947         target_ep = lock_user(VERIFY_WRITE, arg2,
11948                               maxevents * sizeof(struct target_epoll_event), 1);
11949         if (!target_ep) {
11950             return -TARGET_EFAULT;
11951         }
11952 
11953         ep = g_try_new(struct epoll_event, maxevents);
11954         if (!ep) {
11955             unlock_user(target_ep, arg2, 0);
11956             return -TARGET_ENOMEM;
11957         }
11958 
11959         switch (num) {
11960 #if defined(TARGET_NR_epoll_pwait)
11961         case TARGET_NR_epoll_pwait:
11962         {
11963             target_sigset_t *target_set;
11964             sigset_t _set, *set = &_set;
11965 
11966             if (arg5) {
11967                 if (arg6 != sizeof(target_sigset_t)) {
11968                     ret = -TARGET_EINVAL;
11969                     break;
11970                 }
11971 
11972                 target_set = lock_user(VERIFY_READ, arg5,
11973                                        sizeof(target_sigset_t), 1);
11974                 if (!target_set) {
11975                     ret = -TARGET_EFAULT;
11976                     break;
11977                 }
11978                 target_to_host_sigset(set, target_set);
11979                 unlock_user(target_set, arg5, 0);
11980             } else {
11981                 set = NULL;
11982             }
11983 
11984             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11985                                              set, SIGSET_T_SIZE));
11986             break;
11987         }
11988 #endif
11989 #if defined(TARGET_NR_epoll_wait)
11990         case TARGET_NR_epoll_wait:
11991             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11992                                              NULL, 0));
11993             break;
11994 #endif
11995         default:
11996             ret = -TARGET_ENOSYS;
11997         }
11998         if (!is_error(ret)) {
11999             int i;
12000             for (i = 0; i < ret; i++) {
12001                 target_ep[i].events = tswap32(ep[i].events);
12002                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12003             }
12004             unlock_user(target_ep, arg2,
12005                         ret * sizeof(struct target_epoll_event));
12006         } else {
12007             unlock_user(target_ep, arg2, 0);
12008         }
12009         g_free(ep);
12010         return ret;
12011     }
12012 #endif
12013 #endif
12014 #ifdef TARGET_NR_prlimit64
12015     case TARGET_NR_prlimit64:
12016     {
12017         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12018         struct target_rlimit64 *target_rnew, *target_rold;
12019         struct host_rlimit64 rnew, rold, *rnewp = 0;
12020         int resource = target_to_host_resource(arg2);
12021 
12022         if (arg3 && (resource != RLIMIT_AS &&
12023                      resource != RLIMIT_DATA &&
12024                      resource != RLIMIT_STACK)) {
12025             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12026                 return -TARGET_EFAULT;
12027             }
12028             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12029             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12030             unlock_user_struct(target_rnew, arg3, 0);
12031             rnewp = &rnew;
12032         }
12033 
12034         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12035         if (!is_error(ret) && arg4) {
12036             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12037                 return -TARGET_EFAULT;
12038             }
12039             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12040             target_rold->rlim_max = tswap64(rold.rlim_max);
12041             unlock_user_struct(target_rold, arg4, 1);
12042         }
12043         return ret;
12044     }
12045 #endif
12046 #ifdef TARGET_NR_gethostname
12047     case TARGET_NR_gethostname:
12048     {
12049         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12050         if (name) {
12051             ret = get_errno(gethostname(name, arg2));
12052             unlock_user(name, arg1, arg2);
12053         } else {
12054             ret = -TARGET_EFAULT;
12055         }
12056         return ret;
12057     }
12058 #endif
12059 #ifdef TARGET_NR_atomic_cmpxchg_32
12060     case TARGET_NR_atomic_cmpxchg_32:
12061     {
12062         /* should use start_exclusive from main.c */
12063         abi_ulong mem_value;
12064         if (get_user_u32(mem_value, arg6)) {
12065             target_siginfo_t info;
12066             info.si_signo = SIGSEGV;
12067             info.si_errno = 0;
12068             info.si_code = TARGET_SEGV_MAPERR;
12069             info._sifields._sigfault._addr = arg6;
12070             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12071                          QEMU_SI_FAULT, &info);
12072             ret = 0xdeadbeef;
12073 
12074         }
12075         if (mem_value == arg2)
12076             put_user_u32(arg1, arg6);
12077         return mem_value;
12078     }
12079 #endif
12080 #ifdef TARGET_NR_atomic_barrier
12081     case TARGET_NR_atomic_barrier:
12082         /* Like the kernel implementation and the
12083            qemu arm barrier, no-op this? */
12084         return 0;
12085 #endif
12086 
12087 #ifdef TARGET_NR_timer_create
12088     case TARGET_NR_timer_create:
12089     {
12090         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12091 
12092         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12093 
12094         int clkid = arg1;
12095         int timer_index = next_free_host_timer();
12096 
12097         if (timer_index < 0) {
12098             ret = -TARGET_EAGAIN;
12099         } else {
12100             timer_t *phtimer = g_posix_timers  + timer_index;
12101 
12102             if (arg2) {
12103                 phost_sevp = &host_sevp;
12104                 ret = target_to_host_sigevent(phost_sevp, arg2);
12105                 if (ret != 0) {
12106                     return ret;
12107                 }
12108             }
12109 
12110             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12111             if (ret) {
12112                 phtimer = NULL;
12113             } else {
12114                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12115                     return -TARGET_EFAULT;
12116                 }
12117             }
12118         }
12119         return ret;
12120     }
12121 #endif
12122 
12123 #ifdef TARGET_NR_timer_settime
12124     case TARGET_NR_timer_settime:
12125     {
12126         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12127          * struct itimerspec * old_value */
12128         target_timer_t timerid = get_timer_id(arg1);
12129 
12130         if (timerid < 0) {
12131             ret = timerid;
12132         } else if (arg3 == 0) {
12133             ret = -TARGET_EINVAL;
12134         } else {
12135             timer_t htimer = g_posix_timers[timerid];
12136             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12137 
12138             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12139                 return -TARGET_EFAULT;
12140             }
12141             ret = get_errno(
12142                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12143             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12144                 return -TARGET_EFAULT;
12145             }
12146         }
12147         return ret;
12148     }
12149 #endif
12150 
12151 #ifdef TARGET_NR_timer_gettime
12152     case TARGET_NR_timer_gettime:
12153     {
12154         /* args: timer_t timerid, struct itimerspec *curr_value */
12155         target_timer_t timerid = get_timer_id(arg1);
12156 
12157         if (timerid < 0) {
12158             ret = timerid;
12159         } else if (!arg2) {
12160             ret = -TARGET_EFAULT;
12161         } else {
12162             timer_t htimer = g_posix_timers[timerid];
12163             struct itimerspec hspec;
12164             ret = get_errno(timer_gettime(htimer, &hspec));
12165 
12166             if (host_to_target_itimerspec(arg2, &hspec)) {
12167                 ret = -TARGET_EFAULT;
12168             }
12169         }
12170         return ret;
12171     }
12172 #endif
12173 
12174 #ifdef TARGET_NR_timer_getoverrun
12175     case TARGET_NR_timer_getoverrun:
12176     {
12177         /* args: timer_t timerid */
12178         target_timer_t timerid = get_timer_id(arg1);
12179 
12180         if (timerid < 0) {
12181             ret = timerid;
12182         } else {
12183             timer_t htimer = g_posix_timers[timerid];
12184             ret = get_errno(timer_getoverrun(htimer));
12185         }
12186         return ret;
12187     }
12188 #endif
12189 
12190 #ifdef TARGET_NR_timer_delete
12191     case TARGET_NR_timer_delete:
12192     {
12193         /* args: timer_t timerid */
12194         target_timer_t timerid = get_timer_id(arg1);
12195 
12196         if (timerid < 0) {
12197             ret = timerid;
12198         } else {
12199             timer_t htimer = g_posix_timers[timerid];
12200             ret = get_errno(timer_delete(htimer));
12201             g_posix_timers[timerid] = 0;
12202         }
12203         return ret;
12204     }
12205 #endif
12206 
12207 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12208     case TARGET_NR_timerfd_create:
12209         return get_errno(timerfd_create(arg1,
12210                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12211 #endif
12212 
12213 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12214     case TARGET_NR_timerfd_gettime:
12215         {
12216             struct itimerspec its_curr;
12217 
12218             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12219 
12220             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12221                 return -TARGET_EFAULT;
12222             }
12223         }
12224         return ret;
12225 #endif
12226 
12227 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12228     case TARGET_NR_timerfd_settime:
12229         {
12230             struct itimerspec its_new, its_old, *p_new;
12231 
12232             if (arg3) {
12233                 if (target_to_host_itimerspec(&its_new, arg3)) {
12234                     return -TARGET_EFAULT;
12235                 }
12236                 p_new = &its_new;
12237             } else {
12238                 p_new = NULL;
12239             }
12240 
12241             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12242 
12243             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12244                 return -TARGET_EFAULT;
12245             }
12246         }
12247         return ret;
12248 #endif
12249 
12250 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12251     case TARGET_NR_ioprio_get:
12252         return get_errno(ioprio_get(arg1, arg2));
12253 #endif
12254 
12255 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12256     case TARGET_NR_ioprio_set:
12257         return get_errno(ioprio_set(arg1, arg2, arg3));
12258 #endif
12259 
12260 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12261     case TARGET_NR_setns:
12262         return get_errno(setns(arg1, arg2));
12263 #endif
12264 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12265     case TARGET_NR_unshare:
12266         return get_errno(unshare(arg1));
12267 #endif
12268 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12269     case TARGET_NR_kcmp:
12270         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12271 #endif
12272 #ifdef TARGET_NR_swapcontext
12273     case TARGET_NR_swapcontext:
12274         /* PowerPC specific.  */
12275         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12276 #endif
12277 #ifdef TARGET_NR_memfd_create
12278     case TARGET_NR_memfd_create:
12279         p = lock_user_string(arg1);
12280         if (!p) {
12281             return -TARGET_EFAULT;
12282         }
12283         ret = get_errno(memfd_create(p, arg2));
12284         fd_trans_unregister(ret);
12285         unlock_user(p, arg1, 0);
12286         return ret;
12287 #endif
12288 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12289     case TARGET_NR_membarrier:
12290         return get_errno(membarrier(arg1, arg2));
12291 #endif
12292 
12293     default:
12294         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12295         return -TARGET_ENOSYS;
12296     }
12297     return ret;
12298 }
12299 
12300 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12301                     abi_long arg2, abi_long arg3, abi_long arg4,
12302                     abi_long arg5, abi_long arg6, abi_long arg7,
12303                     abi_long arg8)
12304 {
12305     CPUState *cpu = env_cpu(cpu_env);
12306     abi_long ret;
12307 
12308 #ifdef DEBUG_ERESTARTSYS
12309     /* Debug-only code for exercising the syscall-restart code paths
12310      * in the per-architecture cpu main loops: restart every syscall
12311      * the guest makes once before letting it through.
12312      */
12313     {
12314         static bool flag;
12315         flag = !flag;
12316         if (flag) {
12317             return -TARGET_ERESTARTSYS;
12318         }
12319     }
12320 #endif
12321 
12322     record_syscall_start(cpu, num, arg1,
12323                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12324 
12325     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12326         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12327     }
12328 
12329     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12330                       arg5, arg6, arg7, arg8);
12331 
12332     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12333         print_syscall_ret(num, ret);
12334     }
12335 
12336     record_syscall_return(cpu, num, ret);
12337     return ret;
12338 }
12339