xref: /openbmc/qemu/linux-user/syscall.c (revision 8a49b300)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #include "linux_loop.h"
116 #include "uname.h"
117 
118 #include "qemu.h"
119 #include "qemu/guest-random.h"
120 #include "qemu/selfmap.h"
121 #include "user/syscall-trace.h"
122 #include "qapi/error.h"
123 #include "fd-trans.h"
124 #include "tcg/tcg.h"
125 
126 #ifndef CLONE_IO
127 #define CLONE_IO                0x80000000      /* Clone io context */
128 #endif
129 
130 /* We can't directly call the host clone syscall, because this will
131  * badly confuse libc (breaking mutexes, for example). So we must
132  * divide clone flags into:
133  *  * flag combinations that look like pthread_create()
134  *  * flag combinations that look like fork()
135  *  * flags we can implement within QEMU itself
136  *  * flags we can't support and will return an error for
137  */
138 /* For thread creation, all these flags must be present; for
139  * fork, none must be present.
140  */
141 #define CLONE_THREAD_FLAGS                              \
142     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
143      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
144 
145 /* These flags are ignored:
146  * CLONE_DETACHED is now ignored by the kernel;
147  * CLONE_IO is just an optimisation hint to the I/O scheduler
148  */
149 #define CLONE_IGNORED_FLAGS                     \
150     (CLONE_DETACHED | CLONE_IO)
151 
152 /* Flags for fork which we can implement within QEMU itself */
153 #define CLONE_OPTIONAL_FORK_FLAGS               \
154     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
155      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
156 
157 /* Flags for thread creation which we can implement within QEMU itself */
158 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
159     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
160      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
161 
162 #define CLONE_INVALID_FORK_FLAGS                                        \
163     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
164 
165 #define CLONE_INVALID_THREAD_FLAGS                                      \
166     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
167        CLONE_IGNORED_FLAGS))
168 
169 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
170  * have almost all been allocated. We cannot support any of
171  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
172  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
173  * The checks against the invalid thread masks above will catch these.
174  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
175  */
176 
177 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
178  * once. This exercises the codepaths for restart.
179  */
180 //#define DEBUG_ERESTARTSYS
181 
182 //#include <linux/msdos_fs.h>
183 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
184 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
185 
186 #undef _syscall0
187 #undef _syscall1
188 #undef _syscall2
189 #undef _syscall3
190 #undef _syscall4
191 #undef _syscall5
192 #undef _syscall6
193 
194 #define _syscall0(type,name)		\
195 static type name (void)			\
196 {					\
197 	return syscall(__NR_##name);	\
198 }
199 
200 #define _syscall1(type,name,type1,arg1)		\
201 static type name (type1 arg1)			\
202 {						\
203 	return syscall(__NR_##name, arg1);	\
204 }
205 
206 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
207 static type name (type1 arg1,type2 arg2)		\
208 {							\
209 	return syscall(__NR_##name, arg1, arg2);	\
210 }
211 
212 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
213 static type name (type1 arg1,type2 arg2,type3 arg3)		\
214 {								\
215 	return syscall(__NR_##name, arg1, arg2, arg3);		\
216 }
217 
218 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
219 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
220 {										\
221 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
222 }
223 
224 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
225 		  type5,arg5)							\
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
227 {										\
228 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
229 }
230 
231 
232 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
233 		  type5,arg5,type6,arg6)					\
234 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
235                   type6 arg6)							\
236 {										\
237 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
238 }
239 
240 
241 #define __NR_sys_uname __NR_uname
242 #define __NR_sys_getcwd1 __NR_getcwd
243 #define __NR_sys_getdents __NR_getdents
244 #define __NR_sys_getdents64 __NR_getdents64
245 #define __NR_sys_getpriority __NR_getpriority
246 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
247 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
248 #define __NR_sys_syslog __NR_syslog
249 #if defined(__NR_futex)
250 # define __NR_sys_futex __NR_futex
251 #endif
252 #if defined(__NR_futex_time64)
253 # define __NR_sys_futex_time64 __NR_futex_time64
254 #endif
255 #define __NR_sys_inotify_init __NR_inotify_init
256 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
257 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
258 #define __NR_sys_statx __NR_statx
259 
260 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
261 #define __NR__llseek __NR_lseek
262 #endif
263 
264 /* Newer kernel ports have llseek() instead of _llseek() */
265 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
266 #define TARGET_NR__llseek TARGET_NR_llseek
267 #endif
268 
269 #define __NR_sys_gettid __NR_gettid
270 _syscall0(int, sys_gettid)
271 
272 /* For the 64-bit guest on 32-bit host case we must emulate
273  * getdents using getdents64, because otherwise the host
274  * might hand us back more dirent records than we can fit
275  * into the guest buffer after structure format conversion.
276  * Otherwise we emulate getdents with getdents if the host has it.
277  */
278 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
279 #define EMULATE_GETDENTS_WITH_GETDENTS
280 #endif
281 
282 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
283 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
284 #endif
285 #if (defined(TARGET_NR_getdents) && \
286       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
287     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
288 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
289 #endif
290 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
291 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
292           loff_t *, res, uint, wh);
293 #endif
294 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
295 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
296           siginfo_t *, uinfo)
297 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
298 #ifdef __NR_exit_group
299 _syscall1(int,exit_group,int,error_code)
300 #endif
301 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
302 _syscall1(int,set_tid_address,int *,tidptr)
303 #endif
304 #if defined(__NR_futex)
305 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
306           const struct timespec *,timeout,int *,uaddr2,int,val3)
307 #endif
308 #if defined(__NR_futex_time64)
309 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
310           const struct timespec *,timeout,int *,uaddr2,int,val3)
311 #endif
312 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
313 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
314           unsigned long *, user_mask_ptr);
315 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
316 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
317           unsigned long *, user_mask_ptr);
318 #define __NR_sys_getcpu __NR_getcpu
319 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
320 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
321           void *, arg);
322 _syscall2(int, capget, struct __user_cap_header_struct *, header,
323           struct __user_cap_data_struct *, data);
324 _syscall2(int, capset, struct __user_cap_header_struct *, header,
325           struct __user_cap_data_struct *, data);
326 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
327 _syscall2(int, ioprio_get, int, which, int, who)
328 #endif
329 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
330 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
331 #endif
332 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
333 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
334 #endif
335 
336 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
337 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
338           unsigned long, idx1, unsigned long, idx2)
339 #endif
340 
341 /*
342  * It is assumed that struct statx is architecture independent.
343  */
344 #if defined(TARGET_NR_statx) && defined(__NR_statx)
345 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
346           unsigned int, mask, struct target_statx *, statxbuf)
347 #endif
348 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
349 _syscall2(int, membarrier, int, cmd, int, flags)
350 #endif
351 
352 static bitmask_transtbl fcntl_flags_tbl[] = {
353   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
354   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
355   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
356   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
357   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
358   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
359   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
360   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
361   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
362   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
363   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
364   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
365   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
366 #if defined(O_DIRECT)
367   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
368 #endif
369 #if defined(O_NOATIME)
370   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
371 #endif
372 #if defined(O_CLOEXEC)
373   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
374 #endif
375 #if defined(O_PATH)
376   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
377 #endif
378 #if defined(O_TMPFILE)
379   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
380 #endif
381   /* Don't terminate the list prematurely on 64-bit host+guest.  */
382 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
383   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
384 #endif
385   { 0, 0, 0, 0 }
386 };
387 
388 static int sys_getcwd1(char *buf, size_t size)
389 {
390   if (getcwd(buf, size) == NULL) {
391       /* getcwd() sets errno */
392       return (-1);
393   }
394   return strlen(buf)+1;
395 }
396 
397 #ifdef TARGET_NR_utimensat
398 #if defined(__NR_utimensat)
399 #define __NR_sys_utimensat __NR_utimensat
400 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
401           const struct timespec *,tsp,int,flags)
402 #else
403 static int sys_utimensat(int dirfd, const char *pathname,
404                          const struct timespec times[2], int flags)
405 {
406     errno = ENOSYS;
407     return -1;
408 }
409 #endif
410 #endif /* TARGET_NR_utimensat */
411 
412 #ifdef TARGET_NR_renameat2
413 #if defined(__NR_renameat2)
414 #define __NR_sys_renameat2 __NR_renameat2
415 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
416           const char *, new, unsigned int, flags)
417 #else
418 static int sys_renameat2(int oldfd, const char *old,
419                          int newfd, const char *new, int flags)
420 {
421     if (flags == 0) {
422         return renameat(oldfd, old, newfd, new);
423     }
424     errno = ENOSYS;
425     return -1;
426 }
427 #endif
428 #endif /* TARGET_NR_renameat2 */
429 
430 #ifdef CONFIG_INOTIFY
431 #include <sys/inotify.h>
432 
433 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
434 static int sys_inotify_init(void)
435 {
436   return (inotify_init());
437 }
438 #endif
439 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
440 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
441 {
442   return (inotify_add_watch(fd, pathname, mask));
443 }
444 #endif
445 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
446 static int sys_inotify_rm_watch(int fd, int32_t wd)
447 {
448   return (inotify_rm_watch(fd, wd));
449 }
450 #endif
451 #ifdef CONFIG_INOTIFY1
452 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
453 static int sys_inotify_init1(int flags)
454 {
455   return (inotify_init1(flags));
456 }
457 #endif
458 #endif
459 #else
460 /* Userspace can usually survive runtime without inotify */
461 #undef TARGET_NR_inotify_init
462 #undef TARGET_NR_inotify_init1
463 #undef TARGET_NR_inotify_add_watch
464 #undef TARGET_NR_inotify_rm_watch
465 #endif /* CONFIG_INOTIFY  */
466 
467 #if defined(TARGET_NR_prlimit64)
468 #ifndef __NR_prlimit64
469 # define __NR_prlimit64 -1
470 #endif
471 #define __NR_sys_prlimit64 __NR_prlimit64
472 /* The glibc rlimit structure may not be that used by the underlying syscall */
473 struct host_rlimit64 {
474     uint64_t rlim_cur;
475     uint64_t rlim_max;
476 };
477 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
478           const struct host_rlimit64 *, new_limit,
479           struct host_rlimit64 *, old_limit)
480 #endif
481 
482 
483 #if defined(TARGET_NR_timer_create)
484 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
485 static timer_t g_posix_timers[32] = { 0, } ;
486 
487 static inline int next_free_host_timer(void)
488 {
489     int k ;
490     /* FIXME: Does finding the next free slot require a lock? */
491     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
492         if (g_posix_timers[k] == 0) {
493             g_posix_timers[k] = (timer_t) 1;
494             return k;
495         }
496     }
497     return -1;
498 }
499 #endif
500 
501 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
502 #ifdef TARGET_ARM
503 static inline int regpairs_aligned(void *cpu_env, int num)
504 {
505     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
506 }
507 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
508 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
509 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
510 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
511  * of registers which translates to the same as ARM/MIPS, because we start with
512  * r3 as arg1 */
513 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
514 #elif defined(TARGET_SH4)
515 /* SH4 doesn't align register pairs, except for p{read,write}64 */
516 static inline int regpairs_aligned(void *cpu_env, int num)
517 {
518     switch (num) {
519     case TARGET_NR_pread64:
520     case TARGET_NR_pwrite64:
521         return 1;
522 
523     default:
524         return 0;
525     }
526 }
527 #elif defined(TARGET_XTENSA)
528 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
529 #else
530 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
531 #endif
532 
533 #define ERRNO_TABLE_SIZE 1200
534 
535 /* target_to_host_errno_table[] is initialized from
536  * host_to_target_errno_table[] in syscall_init(). */
537 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
538 };
539 
540 /*
541  * This list is the union of errno values overridden in asm-<arch>/errno.h
542  * minus the errnos that are not actually generic to all archs.
543  */
544 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
545     [EAGAIN]		= TARGET_EAGAIN,
546     [EIDRM]		= TARGET_EIDRM,
547     [ECHRNG]		= TARGET_ECHRNG,
548     [EL2NSYNC]		= TARGET_EL2NSYNC,
549     [EL3HLT]		= TARGET_EL3HLT,
550     [EL3RST]		= TARGET_EL3RST,
551     [ELNRNG]		= TARGET_ELNRNG,
552     [EUNATCH]		= TARGET_EUNATCH,
553     [ENOCSI]		= TARGET_ENOCSI,
554     [EL2HLT]		= TARGET_EL2HLT,
555     [EDEADLK]		= TARGET_EDEADLK,
556     [ENOLCK]		= TARGET_ENOLCK,
557     [EBADE]		= TARGET_EBADE,
558     [EBADR]		= TARGET_EBADR,
559     [EXFULL]		= TARGET_EXFULL,
560     [ENOANO]		= TARGET_ENOANO,
561     [EBADRQC]		= TARGET_EBADRQC,
562     [EBADSLT]		= TARGET_EBADSLT,
563     [EBFONT]		= TARGET_EBFONT,
564     [ENOSTR]		= TARGET_ENOSTR,
565     [ENODATA]		= TARGET_ENODATA,
566     [ETIME]		= TARGET_ETIME,
567     [ENOSR]		= TARGET_ENOSR,
568     [ENONET]		= TARGET_ENONET,
569     [ENOPKG]		= TARGET_ENOPKG,
570     [EREMOTE]		= TARGET_EREMOTE,
571     [ENOLINK]		= TARGET_ENOLINK,
572     [EADV]		= TARGET_EADV,
573     [ESRMNT]		= TARGET_ESRMNT,
574     [ECOMM]		= TARGET_ECOMM,
575     [EPROTO]		= TARGET_EPROTO,
576     [EDOTDOT]		= TARGET_EDOTDOT,
577     [EMULTIHOP]		= TARGET_EMULTIHOP,
578     [EBADMSG]		= TARGET_EBADMSG,
579     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
580     [EOVERFLOW]		= TARGET_EOVERFLOW,
581     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
582     [EBADFD]		= TARGET_EBADFD,
583     [EREMCHG]		= TARGET_EREMCHG,
584     [ELIBACC]		= TARGET_ELIBACC,
585     [ELIBBAD]		= TARGET_ELIBBAD,
586     [ELIBSCN]		= TARGET_ELIBSCN,
587     [ELIBMAX]		= TARGET_ELIBMAX,
588     [ELIBEXEC]		= TARGET_ELIBEXEC,
589     [EILSEQ]		= TARGET_EILSEQ,
590     [ENOSYS]		= TARGET_ENOSYS,
591     [ELOOP]		= TARGET_ELOOP,
592     [ERESTART]		= TARGET_ERESTART,
593     [ESTRPIPE]		= TARGET_ESTRPIPE,
594     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
595     [EUSERS]		= TARGET_EUSERS,
596     [ENOTSOCK]		= TARGET_ENOTSOCK,
597     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
598     [EMSGSIZE]		= TARGET_EMSGSIZE,
599     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
600     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
601     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
602     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
603     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
604     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
605     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
606     [EADDRINUSE]	= TARGET_EADDRINUSE,
607     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
608     [ENETDOWN]		= TARGET_ENETDOWN,
609     [ENETUNREACH]	= TARGET_ENETUNREACH,
610     [ENETRESET]		= TARGET_ENETRESET,
611     [ECONNABORTED]	= TARGET_ECONNABORTED,
612     [ECONNRESET]	= TARGET_ECONNRESET,
613     [ENOBUFS]		= TARGET_ENOBUFS,
614     [EISCONN]		= TARGET_EISCONN,
615     [ENOTCONN]		= TARGET_ENOTCONN,
616     [EUCLEAN]		= TARGET_EUCLEAN,
617     [ENOTNAM]		= TARGET_ENOTNAM,
618     [ENAVAIL]		= TARGET_ENAVAIL,
619     [EISNAM]		= TARGET_EISNAM,
620     [EREMOTEIO]		= TARGET_EREMOTEIO,
621     [EDQUOT]            = TARGET_EDQUOT,
622     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
623     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
624     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
625     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
626     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
627     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
628     [EALREADY]		= TARGET_EALREADY,
629     [EINPROGRESS]	= TARGET_EINPROGRESS,
630     [ESTALE]		= TARGET_ESTALE,
631     [ECANCELED]		= TARGET_ECANCELED,
632     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
633     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
634 #ifdef ENOKEY
635     [ENOKEY]		= TARGET_ENOKEY,
636 #endif
637 #ifdef EKEYEXPIRED
638     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
639 #endif
640 #ifdef EKEYREVOKED
641     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
642 #endif
643 #ifdef EKEYREJECTED
644     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
645 #endif
646 #ifdef EOWNERDEAD
647     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
648 #endif
649 #ifdef ENOTRECOVERABLE
650     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
651 #endif
652 #ifdef ENOMSG
653     [ENOMSG]            = TARGET_ENOMSG,
654 #endif
655 #ifdef ERKFILL
656     [ERFKILL]           = TARGET_ERFKILL,
657 #endif
658 #ifdef EHWPOISON
659     [EHWPOISON]         = TARGET_EHWPOISON,
660 #endif
661 };
662 
663 static inline int host_to_target_errno(int err)
664 {
665     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
666         host_to_target_errno_table[err]) {
667         return host_to_target_errno_table[err];
668     }
669     return err;
670 }
671 
672 static inline int target_to_host_errno(int err)
673 {
674     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
675         target_to_host_errno_table[err]) {
676         return target_to_host_errno_table[err];
677     }
678     return err;
679 }
680 
681 static inline abi_long get_errno(abi_long ret)
682 {
683     if (ret == -1)
684         return -host_to_target_errno(errno);
685     else
686         return ret;
687 }
688 
689 const char *target_strerror(int err)
690 {
691     if (err == TARGET_ERESTARTSYS) {
692         return "To be restarted";
693     }
694     if (err == TARGET_QEMU_ESIGRETURN) {
695         return "Successful exit from sigreturn";
696     }
697 
698     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
699         return NULL;
700     }
701     return strerror(target_to_host_errno(err));
702 }
703 
704 #define safe_syscall0(type, name) \
705 static type safe_##name(void) \
706 { \
707     return safe_syscall(__NR_##name); \
708 }
709 
710 #define safe_syscall1(type, name, type1, arg1) \
711 static type safe_##name(type1 arg1) \
712 { \
713     return safe_syscall(__NR_##name, arg1); \
714 }
715 
716 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
717 static type safe_##name(type1 arg1, type2 arg2) \
718 { \
719     return safe_syscall(__NR_##name, arg1, arg2); \
720 }
721 
722 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
723 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
724 { \
725     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
726 }
727 
728 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
729     type4, arg4) \
730 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
731 { \
732     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
733 }
734 
735 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
736     type4, arg4, type5, arg5) \
737 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
738     type5 arg5) \
739 { \
740     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
741 }
742 
743 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
744     type4, arg4, type5, arg5, type6, arg6) \
745 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
746     type5 arg5, type6 arg6) \
747 { \
748     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
749 }
750 
751 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
752 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
753 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
754               int, flags, mode_t, mode)
755 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
756 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
757               struct rusage *, rusage)
758 #endif
759 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
760               int, options, struct rusage *, rusage)
761 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
762 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
763     defined(TARGET_NR_pselect6)
764 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
765               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
766 #endif
767 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
768 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
769               struct timespec *, tsp, const sigset_t *, sigmask,
770               size_t, sigsetsize)
771 #endif
772 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
773               int, maxevents, int, timeout, const sigset_t *, sigmask,
774               size_t, sigsetsize)
775 #if defined(__NR_futex)
776 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
777               const struct timespec *,timeout,int *,uaddr2,int,val3)
778 #endif
779 #if defined(__NR_futex_time64)
780 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
781               const struct timespec *,timeout,int *,uaddr2,int,val3)
782 #endif
783 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
784 safe_syscall2(int, kill, pid_t, pid, int, sig)
785 safe_syscall2(int, tkill, int, tid, int, sig)
786 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
787 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
788 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
789 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
790               unsigned long, pos_l, unsigned long, pos_h)
791 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
792               unsigned long, pos_l, unsigned long, pos_h)
793 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
794               socklen_t, addrlen)
795 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
796               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
797 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
798               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
799 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
800 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
801 safe_syscall2(int, flock, int, fd, int, operation)
802 #ifdef TARGET_NR_rt_sigtimedwait
803 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
804               const struct timespec *, uts, size_t, sigsetsize)
805 #endif
806 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
807               int, flags)
808 #if defined(TARGET_NR_nanosleep)
809 safe_syscall2(int, nanosleep, const struct timespec *, req,
810               struct timespec *, rem)
811 #endif
812 #ifdef TARGET_NR_clock_nanosleep
813 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
814               const struct timespec *, req, struct timespec *, rem)
815 #endif
816 #ifdef __NR_ipc
817 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
818               void *, ptr, long, fifth)
819 #endif
820 #ifdef __NR_msgsnd
821 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
822               int, flags)
823 #endif
824 #ifdef __NR_msgrcv
825 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
826               long, msgtype, int, flags)
827 #endif
828 #ifdef __NR_semtimedop
829 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
830               unsigned, nsops, const struct timespec *, timeout)
831 #endif
832 #ifdef TARGET_NR_mq_timedsend
833 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
834               size_t, len, unsigned, prio, const struct timespec *, timeout)
835 #endif
836 #ifdef TARGET_NR_mq_timedreceive
837 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
838               size_t, len, unsigned *, prio, const struct timespec *, timeout)
839 #endif
840 /* We do ioctl like this rather than via safe_syscall3 to preserve the
841  * "third argument might be integer or pointer or not present" behaviour of
842  * the libc function.
843  */
844 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
845 /* Similarly for fcntl. Note that callers must always:
846  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
847  *  use the flock64 struct rather than unsuffixed flock
848  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
849  */
850 #ifdef __NR_fcntl64
851 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
852 #else
853 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
854 #endif
855 
856 static inline int host_to_target_sock_type(int host_type)
857 {
858     int target_type;
859 
860     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
861     case SOCK_DGRAM:
862         target_type = TARGET_SOCK_DGRAM;
863         break;
864     case SOCK_STREAM:
865         target_type = TARGET_SOCK_STREAM;
866         break;
867     default:
868         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
869         break;
870     }
871 
872 #if defined(SOCK_CLOEXEC)
873     if (host_type & SOCK_CLOEXEC) {
874         target_type |= TARGET_SOCK_CLOEXEC;
875     }
876 #endif
877 
878 #if defined(SOCK_NONBLOCK)
879     if (host_type & SOCK_NONBLOCK) {
880         target_type |= TARGET_SOCK_NONBLOCK;
881     }
882 #endif
883 
884     return target_type;
885 }
886 
887 static abi_ulong target_brk;
888 static abi_ulong target_original_brk;
889 static abi_ulong brk_page;
890 
891 void target_set_brk(abi_ulong new_brk)
892 {
893     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
894     brk_page = HOST_PAGE_ALIGN(target_brk);
895 }
896 
897 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
898 #define DEBUGF_BRK(message, args...)
899 
900 /* do_brk() must return target values and target errnos. */
901 abi_long do_brk(abi_ulong new_brk)
902 {
903     abi_long mapped_addr;
904     abi_ulong new_alloc_size;
905 
906     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
907 
908     if (!new_brk) {
909         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
910         return target_brk;
911     }
912     if (new_brk < target_original_brk) {
913         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
914                    target_brk);
915         return target_brk;
916     }
917 
918     /* If the new brk is less than the highest page reserved to the
919      * target heap allocation, set it and we're almost done...  */
920     if (new_brk <= brk_page) {
921         /* Heap contents are initialized to zero, as for anonymous
922          * mapped pages.  */
923         if (new_brk > target_brk) {
924             memset(g2h(target_brk), 0, new_brk - target_brk);
925         }
926 	target_brk = new_brk;
927         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
928 	return target_brk;
929     }
930 
931     /* We need to allocate more memory after the brk... Note that
932      * we don't use MAP_FIXED because that will map over the top of
933      * any existing mapping (like the one with the host libc or qemu
934      * itself); instead we treat "mapped but at wrong address" as
935      * a failure and unmap again.
936      */
937     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
938     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
939                                         PROT_READ|PROT_WRITE,
940                                         MAP_ANON|MAP_PRIVATE, 0, 0));
941 
942     if (mapped_addr == brk_page) {
943         /* Heap contents are initialized to zero, as for anonymous
944          * mapped pages.  Technically the new pages are already
945          * initialized to zero since they *are* anonymous mapped
946          * pages, however we have to take care with the contents that
947          * come from the remaining part of the previous page: it may
948          * contains garbage data due to a previous heap usage (grown
949          * then shrunken).  */
950         memset(g2h(target_brk), 0, brk_page - target_brk);
951 
952         target_brk = new_brk;
953         brk_page = HOST_PAGE_ALIGN(target_brk);
954         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
955             target_brk);
956         return target_brk;
957     } else if (mapped_addr != -1) {
958         /* Mapped but at wrong address, meaning there wasn't actually
959          * enough space for this brk.
960          */
961         target_munmap(mapped_addr, new_alloc_size);
962         mapped_addr = -1;
963         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
964     }
965     else {
966         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
967     }
968 
969 #if defined(TARGET_ALPHA)
970     /* We (partially) emulate OSF/1 on Alpha, which requires we
971        return a proper errno, not an unchanged brk value.  */
972     return -TARGET_ENOMEM;
973 #endif
974     /* For everything else, return the previous break. */
975     return target_brk;
976 }
977 
978 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
979     defined(TARGET_NR_pselect6)
980 static inline abi_long copy_from_user_fdset(fd_set *fds,
981                                             abi_ulong target_fds_addr,
982                                             int n)
983 {
984     int i, nw, j, k;
985     abi_ulong b, *target_fds;
986 
987     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
988     if (!(target_fds = lock_user(VERIFY_READ,
989                                  target_fds_addr,
990                                  sizeof(abi_ulong) * nw,
991                                  1)))
992         return -TARGET_EFAULT;
993 
994     FD_ZERO(fds);
995     k = 0;
996     for (i = 0; i < nw; i++) {
997         /* grab the abi_ulong */
998         __get_user(b, &target_fds[i]);
999         for (j = 0; j < TARGET_ABI_BITS; j++) {
1000             /* check the bit inside the abi_ulong */
1001             if ((b >> j) & 1)
1002                 FD_SET(k, fds);
1003             k++;
1004         }
1005     }
1006 
1007     unlock_user(target_fds, target_fds_addr, 0);
1008 
1009     return 0;
1010 }
1011 
1012 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1013                                                  abi_ulong target_fds_addr,
1014                                                  int n)
1015 {
1016     if (target_fds_addr) {
1017         if (copy_from_user_fdset(fds, target_fds_addr, n))
1018             return -TARGET_EFAULT;
1019         *fds_ptr = fds;
1020     } else {
1021         *fds_ptr = NULL;
1022     }
1023     return 0;
1024 }
1025 
1026 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1027                                           const fd_set *fds,
1028                                           int n)
1029 {
1030     int i, nw, j, k;
1031     abi_long v;
1032     abi_ulong *target_fds;
1033 
1034     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1035     if (!(target_fds = lock_user(VERIFY_WRITE,
1036                                  target_fds_addr,
1037                                  sizeof(abi_ulong) * nw,
1038                                  0)))
1039         return -TARGET_EFAULT;
1040 
1041     k = 0;
1042     for (i = 0; i < nw; i++) {
1043         v = 0;
1044         for (j = 0; j < TARGET_ABI_BITS; j++) {
1045             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1046             k++;
1047         }
1048         __put_user(v, &target_fds[i]);
1049     }
1050 
1051     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1052 
1053     return 0;
1054 }
1055 #endif
1056 
1057 #if defined(__alpha__)
1058 #define HOST_HZ 1024
1059 #else
1060 #define HOST_HZ 100
1061 #endif
1062 
1063 static inline abi_long host_to_target_clock_t(long ticks)
1064 {
1065 #if HOST_HZ == TARGET_HZ
1066     return ticks;
1067 #else
1068     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1069 #endif
1070 }
1071 
1072 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1073                                              const struct rusage *rusage)
1074 {
1075     struct target_rusage *target_rusage;
1076 
1077     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1078         return -TARGET_EFAULT;
1079     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1080     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1081     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1082     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1083     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1084     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1085     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1086     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1087     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1088     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1089     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1090     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1091     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1092     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1093     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1094     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1095     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1096     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1097     unlock_user_struct(target_rusage, target_addr, 1);
1098 
1099     return 0;
1100 }
1101 
1102 #ifdef TARGET_NR_setrlimit
1103 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1104 {
1105     abi_ulong target_rlim_swap;
1106     rlim_t result;
1107 
1108     target_rlim_swap = tswapal(target_rlim);
1109     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1110         return RLIM_INFINITY;
1111 
1112     result = target_rlim_swap;
1113     if (target_rlim_swap != (rlim_t)result)
1114         return RLIM_INFINITY;
1115 
1116     return result;
1117 }
1118 #endif
1119 
1120 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1121 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1122 {
1123     abi_ulong target_rlim_swap;
1124     abi_ulong result;
1125 
1126     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1127         target_rlim_swap = TARGET_RLIM_INFINITY;
1128     else
1129         target_rlim_swap = rlim;
1130     result = tswapal(target_rlim_swap);
1131 
1132     return result;
1133 }
1134 #endif
1135 
1136 static inline int target_to_host_resource(int code)
1137 {
1138     switch (code) {
1139     case TARGET_RLIMIT_AS:
1140         return RLIMIT_AS;
1141     case TARGET_RLIMIT_CORE:
1142         return RLIMIT_CORE;
1143     case TARGET_RLIMIT_CPU:
1144         return RLIMIT_CPU;
1145     case TARGET_RLIMIT_DATA:
1146         return RLIMIT_DATA;
1147     case TARGET_RLIMIT_FSIZE:
1148         return RLIMIT_FSIZE;
1149     case TARGET_RLIMIT_LOCKS:
1150         return RLIMIT_LOCKS;
1151     case TARGET_RLIMIT_MEMLOCK:
1152         return RLIMIT_MEMLOCK;
1153     case TARGET_RLIMIT_MSGQUEUE:
1154         return RLIMIT_MSGQUEUE;
1155     case TARGET_RLIMIT_NICE:
1156         return RLIMIT_NICE;
1157     case TARGET_RLIMIT_NOFILE:
1158         return RLIMIT_NOFILE;
1159     case TARGET_RLIMIT_NPROC:
1160         return RLIMIT_NPROC;
1161     case TARGET_RLIMIT_RSS:
1162         return RLIMIT_RSS;
1163     case TARGET_RLIMIT_RTPRIO:
1164         return RLIMIT_RTPRIO;
1165     case TARGET_RLIMIT_SIGPENDING:
1166         return RLIMIT_SIGPENDING;
1167     case TARGET_RLIMIT_STACK:
1168         return RLIMIT_STACK;
1169     default:
1170         return code;
1171     }
1172 }
1173 
1174 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1175                                               abi_ulong target_tv_addr)
1176 {
1177     struct target_timeval *target_tv;
1178 
1179     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1180         return -TARGET_EFAULT;
1181     }
1182 
1183     __get_user(tv->tv_sec, &target_tv->tv_sec);
1184     __get_user(tv->tv_usec, &target_tv->tv_usec);
1185 
1186     unlock_user_struct(target_tv, target_tv_addr, 0);
1187 
1188     return 0;
1189 }
1190 
1191 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1192                                             const struct timeval *tv)
1193 {
1194     struct target_timeval *target_tv;
1195 
1196     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1197         return -TARGET_EFAULT;
1198     }
1199 
1200     __put_user(tv->tv_sec, &target_tv->tv_sec);
1201     __put_user(tv->tv_usec, &target_tv->tv_usec);
1202 
1203     unlock_user_struct(target_tv, target_tv_addr, 1);
1204 
1205     return 0;
1206 }
1207 
1208 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1209                                              const struct timeval *tv)
1210 {
1211     struct target__kernel_sock_timeval *target_tv;
1212 
1213     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1214         return -TARGET_EFAULT;
1215     }
1216 
1217     __put_user(tv->tv_sec, &target_tv->tv_sec);
1218     __put_user(tv->tv_usec, &target_tv->tv_usec);
1219 
1220     unlock_user_struct(target_tv, target_tv_addr, 1);
1221 
1222     return 0;
1223 }
1224 
1225 #if defined(TARGET_NR_futex) || \
1226     defined(TARGET_NR_rt_sigtimedwait) || \
1227     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1228     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1229     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1230     defined(TARGET_NR_mq_timedreceive)
1231 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1232                                                abi_ulong target_addr)
1233 {
1234     struct target_timespec *target_ts;
1235 
1236     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1237         return -TARGET_EFAULT;
1238     }
1239     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1240     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1241     unlock_user_struct(target_ts, target_addr, 0);
1242     return 0;
1243 }
1244 #endif
1245 
1246 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64)
1247 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1248                                                  abi_ulong target_addr)
1249 {
1250     struct target__kernel_timespec *target_ts;
1251 
1252     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1253         return -TARGET_EFAULT;
1254     }
1255     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1256     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1257     unlock_user_struct(target_ts, target_addr, 0);
1258     return 0;
1259 }
1260 #endif
1261 
1262 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1263                                                struct timespec *host_ts)
1264 {
1265     struct target_timespec *target_ts;
1266 
1267     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1268         return -TARGET_EFAULT;
1269     }
1270     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1271     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1272     unlock_user_struct(target_ts, target_addr, 1);
1273     return 0;
1274 }
1275 
1276 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1277                                                  struct timespec *host_ts)
1278 {
1279     struct target__kernel_timespec *target_ts;
1280 
1281     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1282         return -TARGET_EFAULT;
1283     }
1284     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1285     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1286     unlock_user_struct(target_ts, target_addr, 1);
1287     return 0;
1288 }
1289 
1290 #if defined(TARGET_NR_gettimeofday)
1291 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1292                                              struct timezone *tz)
1293 {
1294     struct target_timezone *target_tz;
1295 
1296     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1297         return -TARGET_EFAULT;
1298     }
1299 
1300     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1301     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1302 
1303     unlock_user_struct(target_tz, target_tz_addr, 1);
1304 
1305     return 0;
1306 }
1307 #endif
1308 
1309 #if defined(TARGET_NR_settimeofday)
1310 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1311                                                abi_ulong target_tz_addr)
1312 {
1313     struct target_timezone *target_tz;
1314 
1315     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1316         return -TARGET_EFAULT;
1317     }
1318 
1319     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1320     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1321 
1322     unlock_user_struct(target_tz, target_tz_addr, 0);
1323 
1324     return 0;
1325 }
1326 #endif
1327 
1328 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1329 #include <mqueue.h>
1330 
1331 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1332                                               abi_ulong target_mq_attr_addr)
1333 {
1334     struct target_mq_attr *target_mq_attr;
1335 
1336     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1337                           target_mq_attr_addr, 1))
1338         return -TARGET_EFAULT;
1339 
1340     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1341     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1342     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1343     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1344 
1345     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1346 
1347     return 0;
1348 }
1349 
1350 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1351                                             const struct mq_attr *attr)
1352 {
1353     struct target_mq_attr *target_mq_attr;
1354 
1355     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1356                           target_mq_attr_addr, 0))
1357         return -TARGET_EFAULT;
1358 
1359     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1360     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1361     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1362     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1363 
1364     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1365 
1366     return 0;
1367 }
1368 #endif
1369 
1370 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1371 /* do_select() must return target values and target errnos. */
1372 static abi_long do_select(int n,
1373                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1374                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1375 {
1376     fd_set rfds, wfds, efds;
1377     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1378     struct timeval tv;
1379     struct timespec ts, *ts_ptr;
1380     abi_long ret;
1381 
1382     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1383     if (ret) {
1384         return ret;
1385     }
1386     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1387     if (ret) {
1388         return ret;
1389     }
1390     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1391     if (ret) {
1392         return ret;
1393     }
1394 
1395     if (target_tv_addr) {
1396         if (copy_from_user_timeval(&tv, target_tv_addr))
1397             return -TARGET_EFAULT;
1398         ts.tv_sec = tv.tv_sec;
1399         ts.tv_nsec = tv.tv_usec * 1000;
1400         ts_ptr = &ts;
1401     } else {
1402         ts_ptr = NULL;
1403     }
1404 
1405     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1406                                   ts_ptr, NULL));
1407 
1408     if (!is_error(ret)) {
1409         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1410             return -TARGET_EFAULT;
1411         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1412             return -TARGET_EFAULT;
1413         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1414             return -TARGET_EFAULT;
1415 
1416         if (target_tv_addr) {
1417             tv.tv_sec = ts.tv_sec;
1418             tv.tv_usec = ts.tv_nsec / 1000;
1419             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1420                 return -TARGET_EFAULT;
1421             }
1422         }
1423     }
1424 
1425     return ret;
1426 }
1427 
1428 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1429 static abi_long do_old_select(abi_ulong arg1)
1430 {
1431     struct target_sel_arg_struct *sel;
1432     abi_ulong inp, outp, exp, tvp;
1433     long nsel;
1434 
1435     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1436         return -TARGET_EFAULT;
1437     }
1438 
1439     nsel = tswapal(sel->n);
1440     inp = tswapal(sel->inp);
1441     outp = tswapal(sel->outp);
1442     exp = tswapal(sel->exp);
1443     tvp = tswapal(sel->tvp);
1444 
1445     unlock_user_struct(sel, arg1, 0);
1446 
1447     return do_select(nsel, inp, outp, exp, tvp);
1448 }
1449 #endif
1450 #endif
1451 
1452 static abi_long do_pipe2(int host_pipe[], int flags)
1453 {
1454 #ifdef CONFIG_PIPE2
1455     return pipe2(host_pipe, flags);
1456 #else
1457     return -ENOSYS;
1458 #endif
1459 }
1460 
1461 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1462                         int flags, int is_pipe2)
1463 {
1464     int host_pipe[2];
1465     abi_long ret;
1466     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1467 
1468     if (is_error(ret))
1469         return get_errno(ret);
1470 
1471     /* Several targets have special calling conventions for the original
1472        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1473     if (!is_pipe2) {
1474 #if defined(TARGET_ALPHA)
1475         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1476         return host_pipe[0];
1477 #elif defined(TARGET_MIPS)
1478         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1479         return host_pipe[0];
1480 #elif defined(TARGET_SH4)
1481         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1482         return host_pipe[0];
1483 #elif defined(TARGET_SPARC)
1484         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1485         return host_pipe[0];
1486 #endif
1487     }
1488 
1489     if (put_user_s32(host_pipe[0], pipedes)
1490         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1491         return -TARGET_EFAULT;
1492     return get_errno(ret);
1493 }
1494 
1495 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1496                                               abi_ulong target_addr,
1497                                               socklen_t len)
1498 {
1499     struct target_ip_mreqn *target_smreqn;
1500 
1501     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1502     if (!target_smreqn)
1503         return -TARGET_EFAULT;
1504     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1505     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1506     if (len == sizeof(struct target_ip_mreqn))
1507         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1508     unlock_user(target_smreqn, target_addr, 0);
1509 
1510     return 0;
1511 }
1512 
1513 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1514                                                abi_ulong target_addr,
1515                                                socklen_t len)
1516 {
1517     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1518     sa_family_t sa_family;
1519     struct target_sockaddr *target_saddr;
1520 
1521     if (fd_trans_target_to_host_addr(fd)) {
1522         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1523     }
1524 
1525     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1526     if (!target_saddr)
1527         return -TARGET_EFAULT;
1528 
1529     sa_family = tswap16(target_saddr->sa_family);
1530 
1531     /* Oops. The caller might send a incomplete sun_path; sun_path
1532      * must be terminated by \0 (see the manual page), but
1533      * unfortunately it is quite common to specify sockaddr_un
1534      * length as "strlen(x->sun_path)" while it should be
1535      * "strlen(...) + 1". We'll fix that here if needed.
1536      * Linux kernel has a similar feature.
1537      */
1538 
1539     if (sa_family == AF_UNIX) {
1540         if (len < unix_maxlen && len > 0) {
1541             char *cp = (char*)target_saddr;
1542 
1543             if ( cp[len-1] && !cp[len] )
1544                 len++;
1545         }
1546         if (len > unix_maxlen)
1547             len = unix_maxlen;
1548     }
1549 
1550     memcpy(addr, target_saddr, len);
1551     addr->sa_family = sa_family;
1552     if (sa_family == AF_NETLINK) {
1553         struct sockaddr_nl *nladdr;
1554 
1555         nladdr = (struct sockaddr_nl *)addr;
1556         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1557         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1558     } else if (sa_family == AF_PACKET) {
1559 	struct target_sockaddr_ll *lladdr;
1560 
1561 	lladdr = (struct target_sockaddr_ll *)addr;
1562 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1563 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1564     }
1565     unlock_user(target_saddr, target_addr, 0);
1566 
1567     return 0;
1568 }
1569 
1570 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1571                                                struct sockaddr *addr,
1572                                                socklen_t len)
1573 {
1574     struct target_sockaddr *target_saddr;
1575 
1576     if (len == 0) {
1577         return 0;
1578     }
1579     assert(addr);
1580 
1581     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1582     if (!target_saddr)
1583         return -TARGET_EFAULT;
1584     memcpy(target_saddr, addr, len);
1585     if (len >= offsetof(struct target_sockaddr, sa_family) +
1586         sizeof(target_saddr->sa_family)) {
1587         target_saddr->sa_family = tswap16(addr->sa_family);
1588     }
1589     if (addr->sa_family == AF_NETLINK &&
1590         len >= sizeof(struct target_sockaddr_nl)) {
1591         struct target_sockaddr_nl *target_nl =
1592                (struct target_sockaddr_nl *)target_saddr;
1593         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1594         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1595     } else if (addr->sa_family == AF_PACKET) {
1596         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1597         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1598         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1599     } else if (addr->sa_family == AF_INET6 &&
1600                len >= sizeof(struct target_sockaddr_in6)) {
1601         struct target_sockaddr_in6 *target_in6 =
1602                (struct target_sockaddr_in6 *)target_saddr;
1603         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1604     }
1605     unlock_user(target_saddr, target_addr, len);
1606 
1607     return 0;
1608 }
1609 
1610 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1611                                            struct target_msghdr *target_msgh)
1612 {
1613     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1614     abi_long msg_controllen;
1615     abi_ulong target_cmsg_addr;
1616     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1617     socklen_t space = 0;
1618 
1619     msg_controllen = tswapal(target_msgh->msg_controllen);
1620     if (msg_controllen < sizeof (struct target_cmsghdr))
1621         goto the_end;
1622     target_cmsg_addr = tswapal(target_msgh->msg_control);
1623     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1624     target_cmsg_start = target_cmsg;
1625     if (!target_cmsg)
1626         return -TARGET_EFAULT;
1627 
1628     while (cmsg && target_cmsg) {
1629         void *data = CMSG_DATA(cmsg);
1630         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1631 
1632         int len = tswapal(target_cmsg->cmsg_len)
1633             - sizeof(struct target_cmsghdr);
1634 
1635         space += CMSG_SPACE(len);
1636         if (space > msgh->msg_controllen) {
1637             space -= CMSG_SPACE(len);
1638             /* This is a QEMU bug, since we allocated the payload
1639              * area ourselves (unlike overflow in host-to-target
1640              * conversion, which is just the guest giving us a buffer
1641              * that's too small). It can't happen for the payload types
1642              * we currently support; if it becomes an issue in future
1643              * we would need to improve our allocation strategy to
1644              * something more intelligent than "twice the size of the
1645              * target buffer we're reading from".
1646              */
1647             qemu_log_mask(LOG_UNIMP,
1648                           ("Unsupported ancillary data %d/%d: "
1649                            "unhandled msg size\n"),
1650                           tswap32(target_cmsg->cmsg_level),
1651                           tswap32(target_cmsg->cmsg_type));
1652             break;
1653         }
1654 
1655         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1656             cmsg->cmsg_level = SOL_SOCKET;
1657         } else {
1658             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1659         }
1660         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1661         cmsg->cmsg_len = CMSG_LEN(len);
1662 
1663         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1664             int *fd = (int *)data;
1665             int *target_fd = (int *)target_data;
1666             int i, numfds = len / sizeof(int);
1667 
1668             for (i = 0; i < numfds; i++) {
1669                 __get_user(fd[i], target_fd + i);
1670             }
1671         } else if (cmsg->cmsg_level == SOL_SOCKET
1672                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1673             struct ucred *cred = (struct ucred *)data;
1674             struct target_ucred *target_cred =
1675                 (struct target_ucred *)target_data;
1676 
1677             __get_user(cred->pid, &target_cred->pid);
1678             __get_user(cred->uid, &target_cred->uid);
1679             __get_user(cred->gid, &target_cred->gid);
1680         } else {
1681             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1682                           cmsg->cmsg_level, cmsg->cmsg_type);
1683             memcpy(data, target_data, len);
1684         }
1685 
1686         cmsg = CMSG_NXTHDR(msgh, cmsg);
1687         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1688                                          target_cmsg_start);
1689     }
1690     unlock_user(target_cmsg, target_cmsg_addr, 0);
1691  the_end:
1692     msgh->msg_controllen = space;
1693     return 0;
1694 }
1695 
1696 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1697                                            struct msghdr *msgh)
1698 {
1699     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1700     abi_long msg_controllen;
1701     abi_ulong target_cmsg_addr;
1702     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1703     socklen_t space = 0;
1704 
1705     msg_controllen = tswapal(target_msgh->msg_controllen);
1706     if (msg_controllen < sizeof (struct target_cmsghdr))
1707         goto the_end;
1708     target_cmsg_addr = tswapal(target_msgh->msg_control);
1709     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1710     target_cmsg_start = target_cmsg;
1711     if (!target_cmsg)
1712         return -TARGET_EFAULT;
1713 
1714     while (cmsg && target_cmsg) {
1715         void *data = CMSG_DATA(cmsg);
1716         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1717 
1718         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1719         int tgt_len, tgt_space;
1720 
1721         /* We never copy a half-header but may copy half-data;
1722          * this is Linux's behaviour in put_cmsg(). Note that
1723          * truncation here is a guest problem (which we report
1724          * to the guest via the CTRUNC bit), unlike truncation
1725          * in target_to_host_cmsg, which is a QEMU bug.
1726          */
1727         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1728             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1729             break;
1730         }
1731 
1732         if (cmsg->cmsg_level == SOL_SOCKET) {
1733             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1734         } else {
1735             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1736         }
1737         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1738 
1739         /* Payload types which need a different size of payload on
1740          * the target must adjust tgt_len here.
1741          */
1742         tgt_len = len;
1743         switch (cmsg->cmsg_level) {
1744         case SOL_SOCKET:
1745             switch (cmsg->cmsg_type) {
1746             case SO_TIMESTAMP:
1747                 tgt_len = sizeof(struct target_timeval);
1748                 break;
1749             default:
1750                 break;
1751             }
1752             break;
1753         default:
1754             break;
1755         }
1756 
1757         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1758             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1759             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1760         }
1761 
1762         /* We must now copy-and-convert len bytes of payload
1763          * into tgt_len bytes of destination space. Bear in mind
1764          * that in both source and destination we may be dealing
1765          * with a truncated value!
1766          */
1767         switch (cmsg->cmsg_level) {
1768         case SOL_SOCKET:
1769             switch (cmsg->cmsg_type) {
1770             case SCM_RIGHTS:
1771             {
1772                 int *fd = (int *)data;
1773                 int *target_fd = (int *)target_data;
1774                 int i, numfds = tgt_len / sizeof(int);
1775 
1776                 for (i = 0; i < numfds; i++) {
1777                     __put_user(fd[i], target_fd + i);
1778                 }
1779                 break;
1780             }
1781             case SO_TIMESTAMP:
1782             {
1783                 struct timeval *tv = (struct timeval *)data;
1784                 struct target_timeval *target_tv =
1785                     (struct target_timeval *)target_data;
1786 
1787                 if (len != sizeof(struct timeval) ||
1788                     tgt_len != sizeof(struct target_timeval)) {
1789                     goto unimplemented;
1790                 }
1791 
1792                 /* copy struct timeval to target */
1793                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1794                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1795                 break;
1796             }
1797             case SCM_CREDENTIALS:
1798             {
1799                 struct ucred *cred = (struct ucred *)data;
1800                 struct target_ucred *target_cred =
1801                     (struct target_ucred *)target_data;
1802 
1803                 __put_user(cred->pid, &target_cred->pid);
1804                 __put_user(cred->uid, &target_cred->uid);
1805                 __put_user(cred->gid, &target_cred->gid);
1806                 break;
1807             }
1808             default:
1809                 goto unimplemented;
1810             }
1811             break;
1812 
1813         case SOL_IP:
1814             switch (cmsg->cmsg_type) {
1815             case IP_TTL:
1816             {
1817                 uint32_t *v = (uint32_t *)data;
1818                 uint32_t *t_int = (uint32_t *)target_data;
1819 
1820                 if (len != sizeof(uint32_t) ||
1821                     tgt_len != sizeof(uint32_t)) {
1822                     goto unimplemented;
1823                 }
1824                 __put_user(*v, t_int);
1825                 break;
1826             }
1827             case IP_RECVERR:
1828             {
1829                 struct errhdr_t {
1830                    struct sock_extended_err ee;
1831                    struct sockaddr_in offender;
1832                 };
1833                 struct errhdr_t *errh = (struct errhdr_t *)data;
1834                 struct errhdr_t *target_errh =
1835                     (struct errhdr_t *)target_data;
1836 
1837                 if (len != sizeof(struct errhdr_t) ||
1838                     tgt_len != sizeof(struct errhdr_t)) {
1839                     goto unimplemented;
1840                 }
1841                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1842                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1843                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1844                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1845                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1846                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1847                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1848                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1849                     (void *) &errh->offender, sizeof(errh->offender));
1850                 break;
1851             }
1852             default:
1853                 goto unimplemented;
1854             }
1855             break;
1856 
1857         case SOL_IPV6:
1858             switch (cmsg->cmsg_type) {
1859             case IPV6_HOPLIMIT:
1860             {
1861                 uint32_t *v = (uint32_t *)data;
1862                 uint32_t *t_int = (uint32_t *)target_data;
1863 
1864                 if (len != sizeof(uint32_t) ||
1865                     tgt_len != sizeof(uint32_t)) {
1866                     goto unimplemented;
1867                 }
1868                 __put_user(*v, t_int);
1869                 break;
1870             }
1871             case IPV6_RECVERR:
1872             {
1873                 struct errhdr6_t {
1874                    struct sock_extended_err ee;
1875                    struct sockaddr_in6 offender;
1876                 };
1877                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1878                 struct errhdr6_t *target_errh =
1879                     (struct errhdr6_t *)target_data;
1880 
1881                 if (len != sizeof(struct errhdr6_t) ||
1882                     tgt_len != sizeof(struct errhdr6_t)) {
1883                     goto unimplemented;
1884                 }
1885                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1886                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1887                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1888                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1889                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1890                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1891                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1892                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1893                     (void *) &errh->offender, sizeof(errh->offender));
1894                 break;
1895             }
1896             default:
1897                 goto unimplemented;
1898             }
1899             break;
1900 
1901         default:
1902         unimplemented:
1903             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1904                           cmsg->cmsg_level, cmsg->cmsg_type);
1905             memcpy(target_data, data, MIN(len, tgt_len));
1906             if (tgt_len > len) {
1907                 memset(target_data + len, 0, tgt_len - len);
1908             }
1909         }
1910 
1911         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1912         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1913         if (msg_controllen < tgt_space) {
1914             tgt_space = msg_controllen;
1915         }
1916         msg_controllen -= tgt_space;
1917         space += tgt_space;
1918         cmsg = CMSG_NXTHDR(msgh, cmsg);
1919         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1920                                          target_cmsg_start);
1921     }
1922     unlock_user(target_cmsg, target_cmsg_addr, space);
1923  the_end:
1924     target_msgh->msg_controllen = tswapal(space);
1925     return 0;
1926 }
1927 
1928 /* do_setsockopt() Must return target values and target errnos. */
1929 static abi_long do_setsockopt(int sockfd, int level, int optname,
1930                               abi_ulong optval_addr, socklen_t optlen)
1931 {
1932     abi_long ret;
1933     int val;
1934     struct ip_mreqn *ip_mreq;
1935     struct ip_mreq_source *ip_mreq_source;
1936 
1937     switch(level) {
1938     case SOL_TCP:
1939         /* TCP options all take an 'int' value.  */
1940         if (optlen < sizeof(uint32_t))
1941             return -TARGET_EINVAL;
1942 
1943         if (get_user_u32(val, optval_addr))
1944             return -TARGET_EFAULT;
1945         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1946         break;
1947     case SOL_IP:
1948         switch(optname) {
1949         case IP_TOS:
1950         case IP_TTL:
1951         case IP_HDRINCL:
1952         case IP_ROUTER_ALERT:
1953         case IP_RECVOPTS:
1954         case IP_RETOPTS:
1955         case IP_PKTINFO:
1956         case IP_MTU_DISCOVER:
1957         case IP_RECVERR:
1958         case IP_RECVTTL:
1959         case IP_RECVTOS:
1960 #ifdef IP_FREEBIND
1961         case IP_FREEBIND:
1962 #endif
1963         case IP_MULTICAST_TTL:
1964         case IP_MULTICAST_LOOP:
1965             val = 0;
1966             if (optlen >= sizeof(uint32_t)) {
1967                 if (get_user_u32(val, optval_addr))
1968                     return -TARGET_EFAULT;
1969             } else if (optlen >= 1) {
1970                 if (get_user_u8(val, optval_addr))
1971                     return -TARGET_EFAULT;
1972             }
1973             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1974             break;
1975         case IP_ADD_MEMBERSHIP:
1976         case IP_DROP_MEMBERSHIP:
1977             if (optlen < sizeof (struct target_ip_mreq) ||
1978                 optlen > sizeof (struct target_ip_mreqn))
1979                 return -TARGET_EINVAL;
1980 
1981             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1982             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1983             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1984             break;
1985 
1986         case IP_BLOCK_SOURCE:
1987         case IP_UNBLOCK_SOURCE:
1988         case IP_ADD_SOURCE_MEMBERSHIP:
1989         case IP_DROP_SOURCE_MEMBERSHIP:
1990             if (optlen != sizeof (struct target_ip_mreq_source))
1991                 return -TARGET_EINVAL;
1992 
1993             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1994             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1995             unlock_user (ip_mreq_source, optval_addr, 0);
1996             break;
1997 
1998         default:
1999             goto unimplemented;
2000         }
2001         break;
2002     case SOL_IPV6:
2003         switch (optname) {
2004         case IPV6_MTU_DISCOVER:
2005         case IPV6_MTU:
2006         case IPV6_V6ONLY:
2007         case IPV6_RECVPKTINFO:
2008         case IPV6_UNICAST_HOPS:
2009         case IPV6_MULTICAST_HOPS:
2010         case IPV6_MULTICAST_LOOP:
2011         case IPV6_RECVERR:
2012         case IPV6_RECVHOPLIMIT:
2013         case IPV6_2292HOPLIMIT:
2014         case IPV6_CHECKSUM:
2015         case IPV6_ADDRFORM:
2016         case IPV6_2292PKTINFO:
2017         case IPV6_RECVTCLASS:
2018         case IPV6_RECVRTHDR:
2019         case IPV6_2292RTHDR:
2020         case IPV6_RECVHOPOPTS:
2021         case IPV6_2292HOPOPTS:
2022         case IPV6_RECVDSTOPTS:
2023         case IPV6_2292DSTOPTS:
2024         case IPV6_TCLASS:
2025 #ifdef IPV6_RECVPATHMTU
2026         case IPV6_RECVPATHMTU:
2027 #endif
2028 #ifdef IPV6_TRANSPARENT
2029         case IPV6_TRANSPARENT:
2030 #endif
2031 #ifdef IPV6_FREEBIND
2032         case IPV6_FREEBIND:
2033 #endif
2034 #ifdef IPV6_RECVORIGDSTADDR
2035         case IPV6_RECVORIGDSTADDR:
2036 #endif
2037             val = 0;
2038             if (optlen < sizeof(uint32_t)) {
2039                 return -TARGET_EINVAL;
2040             }
2041             if (get_user_u32(val, optval_addr)) {
2042                 return -TARGET_EFAULT;
2043             }
2044             ret = get_errno(setsockopt(sockfd, level, optname,
2045                                        &val, sizeof(val)));
2046             break;
2047         case IPV6_PKTINFO:
2048         {
2049             struct in6_pktinfo pki;
2050 
2051             if (optlen < sizeof(pki)) {
2052                 return -TARGET_EINVAL;
2053             }
2054 
2055             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2056                 return -TARGET_EFAULT;
2057             }
2058 
2059             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2060 
2061             ret = get_errno(setsockopt(sockfd, level, optname,
2062                                        &pki, sizeof(pki)));
2063             break;
2064         }
2065         case IPV6_ADD_MEMBERSHIP:
2066         case IPV6_DROP_MEMBERSHIP:
2067         {
2068             struct ipv6_mreq ipv6mreq;
2069 
2070             if (optlen < sizeof(ipv6mreq)) {
2071                 return -TARGET_EINVAL;
2072             }
2073 
2074             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2075                 return -TARGET_EFAULT;
2076             }
2077 
2078             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2079 
2080             ret = get_errno(setsockopt(sockfd, level, optname,
2081                                        &ipv6mreq, sizeof(ipv6mreq)));
2082             break;
2083         }
2084         default:
2085             goto unimplemented;
2086         }
2087         break;
2088     case SOL_ICMPV6:
2089         switch (optname) {
2090         case ICMPV6_FILTER:
2091         {
2092             struct icmp6_filter icmp6f;
2093 
2094             if (optlen > sizeof(icmp6f)) {
2095                 optlen = sizeof(icmp6f);
2096             }
2097 
2098             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2099                 return -TARGET_EFAULT;
2100             }
2101 
2102             for (val = 0; val < 8; val++) {
2103                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2104             }
2105 
2106             ret = get_errno(setsockopt(sockfd, level, optname,
2107                                        &icmp6f, optlen));
2108             break;
2109         }
2110         default:
2111             goto unimplemented;
2112         }
2113         break;
2114     case SOL_RAW:
2115         switch (optname) {
2116         case ICMP_FILTER:
2117         case IPV6_CHECKSUM:
2118             /* those take an u32 value */
2119             if (optlen < sizeof(uint32_t)) {
2120                 return -TARGET_EINVAL;
2121             }
2122 
2123             if (get_user_u32(val, optval_addr)) {
2124                 return -TARGET_EFAULT;
2125             }
2126             ret = get_errno(setsockopt(sockfd, level, optname,
2127                                        &val, sizeof(val)));
2128             break;
2129 
2130         default:
2131             goto unimplemented;
2132         }
2133         break;
2134 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2135     case SOL_ALG:
2136         switch (optname) {
2137         case ALG_SET_KEY:
2138         {
2139             char *alg_key = g_malloc(optlen);
2140 
2141             if (!alg_key) {
2142                 return -TARGET_ENOMEM;
2143             }
2144             if (copy_from_user(alg_key, optval_addr, optlen)) {
2145                 g_free(alg_key);
2146                 return -TARGET_EFAULT;
2147             }
2148             ret = get_errno(setsockopt(sockfd, level, optname,
2149                                        alg_key, optlen));
2150             g_free(alg_key);
2151             break;
2152         }
2153         case ALG_SET_AEAD_AUTHSIZE:
2154         {
2155             ret = get_errno(setsockopt(sockfd, level, optname,
2156                                        NULL, optlen));
2157             break;
2158         }
2159         default:
2160             goto unimplemented;
2161         }
2162         break;
2163 #endif
2164     case TARGET_SOL_SOCKET:
2165         switch (optname) {
2166         case TARGET_SO_RCVTIMEO:
2167         {
2168                 struct timeval tv;
2169 
2170                 optname = SO_RCVTIMEO;
2171 
2172 set_timeout:
2173                 if (optlen != sizeof(struct target_timeval)) {
2174                     return -TARGET_EINVAL;
2175                 }
2176 
2177                 if (copy_from_user_timeval(&tv, optval_addr)) {
2178                     return -TARGET_EFAULT;
2179                 }
2180 
2181                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2182                                 &tv, sizeof(tv)));
2183                 return ret;
2184         }
2185         case TARGET_SO_SNDTIMEO:
2186                 optname = SO_SNDTIMEO;
2187                 goto set_timeout;
2188         case TARGET_SO_ATTACH_FILTER:
2189         {
2190                 struct target_sock_fprog *tfprog;
2191                 struct target_sock_filter *tfilter;
2192                 struct sock_fprog fprog;
2193                 struct sock_filter *filter;
2194                 int i;
2195 
2196                 if (optlen != sizeof(*tfprog)) {
2197                     return -TARGET_EINVAL;
2198                 }
2199                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2200                     return -TARGET_EFAULT;
2201                 }
2202                 if (!lock_user_struct(VERIFY_READ, tfilter,
2203                                       tswapal(tfprog->filter), 0)) {
2204                     unlock_user_struct(tfprog, optval_addr, 1);
2205                     return -TARGET_EFAULT;
2206                 }
2207 
2208                 fprog.len = tswap16(tfprog->len);
2209                 filter = g_try_new(struct sock_filter, fprog.len);
2210                 if (filter == NULL) {
2211                     unlock_user_struct(tfilter, tfprog->filter, 1);
2212                     unlock_user_struct(tfprog, optval_addr, 1);
2213                     return -TARGET_ENOMEM;
2214                 }
2215                 for (i = 0; i < fprog.len; i++) {
2216                     filter[i].code = tswap16(tfilter[i].code);
2217                     filter[i].jt = tfilter[i].jt;
2218                     filter[i].jf = tfilter[i].jf;
2219                     filter[i].k = tswap32(tfilter[i].k);
2220                 }
2221                 fprog.filter = filter;
2222 
2223                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2224                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2225                 g_free(filter);
2226 
2227                 unlock_user_struct(tfilter, tfprog->filter, 1);
2228                 unlock_user_struct(tfprog, optval_addr, 1);
2229                 return ret;
2230         }
2231 	case TARGET_SO_BINDTODEVICE:
2232 	{
2233 		char *dev_ifname, *addr_ifname;
2234 
2235 		if (optlen > IFNAMSIZ - 1) {
2236 		    optlen = IFNAMSIZ - 1;
2237 		}
2238 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2239 		if (!dev_ifname) {
2240 		    return -TARGET_EFAULT;
2241 		}
2242 		optname = SO_BINDTODEVICE;
2243 		addr_ifname = alloca(IFNAMSIZ);
2244 		memcpy(addr_ifname, dev_ifname, optlen);
2245 		addr_ifname[optlen] = 0;
2246 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2247                                            addr_ifname, optlen));
2248 		unlock_user (dev_ifname, optval_addr, 0);
2249 		return ret;
2250 	}
2251         case TARGET_SO_LINGER:
2252         {
2253                 struct linger lg;
2254                 struct target_linger *tlg;
2255 
2256                 if (optlen != sizeof(struct target_linger)) {
2257                     return -TARGET_EINVAL;
2258                 }
2259                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2260                     return -TARGET_EFAULT;
2261                 }
2262                 __get_user(lg.l_onoff, &tlg->l_onoff);
2263                 __get_user(lg.l_linger, &tlg->l_linger);
2264                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2265                                 &lg, sizeof(lg)));
2266                 unlock_user_struct(tlg, optval_addr, 0);
2267                 return ret;
2268         }
2269             /* Options with 'int' argument.  */
2270         case TARGET_SO_DEBUG:
2271 		optname = SO_DEBUG;
2272 		break;
2273         case TARGET_SO_REUSEADDR:
2274 		optname = SO_REUSEADDR;
2275 		break;
2276 #ifdef SO_REUSEPORT
2277         case TARGET_SO_REUSEPORT:
2278                 optname = SO_REUSEPORT;
2279                 break;
2280 #endif
2281         case TARGET_SO_TYPE:
2282 		optname = SO_TYPE;
2283 		break;
2284         case TARGET_SO_ERROR:
2285 		optname = SO_ERROR;
2286 		break;
2287         case TARGET_SO_DONTROUTE:
2288 		optname = SO_DONTROUTE;
2289 		break;
2290         case TARGET_SO_BROADCAST:
2291 		optname = SO_BROADCAST;
2292 		break;
2293         case TARGET_SO_SNDBUF:
2294 		optname = SO_SNDBUF;
2295 		break;
2296         case TARGET_SO_SNDBUFFORCE:
2297                 optname = SO_SNDBUFFORCE;
2298                 break;
2299         case TARGET_SO_RCVBUF:
2300 		optname = SO_RCVBUF;
2301 		break;
2302         case TARGET_SO_RCVBUFFORCE:
2303                 optname = SO_RCVBUFFORCE;
2304                 break;
2305         case TARGET_SO_KEEPALIVE:
2306 		optname = SO_KEEPALIVE;
2307 		break;
2308         case TARGET_SO_OOBINLINE:
2309 		optname = SO_OOBINLINE;
2310 		break;
2311         case TARGET_SO_NO_CHECK:
2312 		optname = SO_NO_CHECK;
2313 		break;
2314         case TARGET_SO_PRIORITY:
2315 		optname = SO_PRIORITY;
2316 		break;
2317 #ifdef SO_BSDCOMPAT
2318         case TARGET_SO_BSDCOMPAT:
2319 		optname = SO_BSDCOMPAT;
2320 		break;
2321 #endif
2322         case TARGET_SO_PASSCRED:
2323 		optname = SO_PASSCRED;
2324 		break;
2325         case TARGET_SO_PASSSEC:
2326                 optname = SO_PASSSEC;
2327                 break;
2328         case TARGET_SO_TIMESTAMP:
2329 		optname = SO_TIMESTAMP;
2330 		break;
2331         case TARGET_SO_RCVLOWAT:
2332 		optname = SO_RCVLOWAT;
2333 		break;
2334         default:
2335             goto unimplemented;
2336         }
2337 	if (optlen < sizeof(uint32_t))
2338             return -TARGET_EINVAL;
2339 
2340 	if (get_user_u32(val, optval_addr))
2341             return -TARGET_EFAULT;
2342 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2343         break;
2344 #ifdef SOL_NETLINK
2345     case SOL_NETLINK:
2346         switch (optname) {
2347         case NETLINK_PKTINFO:
2348         case NETLINK_ADD_MEMBERSHIP:
2349         case NETLINK_DROP_MEMBERSHIP:
2350         case NETLINK_BROADCAST_ERROR:
2351         case NETLINK_NO_ENOBUFS:
2352 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2353         case NETLINK_LISTEN_ALL_NSID:
2354         case NETLINK_CAP_ACK:
2355 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2356 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2357         case NETLINK_EXT_ACK:
2358 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2359 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2360         case NETLINK_GET_STRICT_CHK:
2361 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2362             break;
2363         default:
2364             goto unimplemented;
2365         }
2366         val = 0;
2367         if (optlen < sizeof(uint32_t)) {
2368             return -TARGET_EINVAL;
2369         }
2370         if (get_user_u32(val, optval_addr)) {
2371             return -TARGET_EFAULT;
2372         }
2373         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2374                                    sizeof(val)));
2375         break;
2376 #endif /* SOL_NETLINK */
2377     default:
2378     unimplemented:
2379         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2380                       level, optname);
2381         ret = -TARGET_ENOPROTOOPT;
2382     }
2383     return ret;
2384 }
2385 
2386 /* do_getsockopt() Must return target values and target errnos. */
2387 static abi_long do_getsockopt(int sockfd, int level, int optname,
2388                               abi_ulong optval_addr, abi_ulong optlen)
2389 {
2390     abi_long ret;
2391     int len, val;
2392     socklen_t lv;
2393 
2394     switch(level) {
2395     case TARGET_SOL_SOCKET:
2396         level = SOL_SOCKET;
2397         switch (optname) {
2398         /* These don't just return a single integer */
2399         case TARGET_SO_PEERNAME:
2400             goto unimplemented;
2401         case TARGET_SO_RCVTIMEO: {
2402             struct timeval tv;
2403             socklen_t tvlen;
2404 
2405             optname = SO_RCVTIMEO;
2406 
2407 get_timeout:
2408             if (get_user_u32(len, optlen)) {
2409                 return -TARGET_EFAULT;
2410             }
2411             if (len < 0) {
2412                 return -TARGET_EINVAL;
2413             }
2414 
2415             tvlen = sizeof(tv);
2416             ret = get_errno(getsockopt(sockfd, level, optname,
2417                                        &tv, &tvlen));
2418             if (ret < 0) {
2419                 return ret;
2420             }
2421             if (len > sizeof(struct target_timeval)) {
2422                 len = sizeof(struct target_timeval);
2423             }
2424             if (copy_to_user_timeval(optval_addr, &tv)) {
2425                 return -TARGET_EFAULT;
2426             }
2427             if (put_user_u32(len, optlen)) {
2428                 return -TARGET_EFAULT;
2429             }
2430             break;
2431         }
2432         case TARGET_SO_SNDTIMEO:
2433             optname = SO_SNDTIMEO;
2434             goto get_timeout;
2435         case TARGET_SO_PEERCRED: {
2436             struct ucred cr;
2437             socklen_t crlen;
2438             struct target_ucred *tcr;
2439 
2440             if (get_user_u32(len, optlen)) {
2441                 return -TARGET_EFAULT;
2442             }
2443             if (len < 0) {
2444                 return -TARGET_EINVAL;
2445             }
2446 
2447             crlen = sizeof(cr);
2448             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2449                                        &cr, &crlen));
2450             if (ret < 0) {
2451                 return ret;
2452             }
2453             if (len > crlen) {
2454                 len = crlen;
2455             }
2456             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2457                 return -TARGET_EFAULT;
2458             }
2459             __put_user(cr.pid, &tcr->pid);
2460             __put_user(cr.uid, &tcr->uid);
2461             __put_user(cr.gid, &tcr->gid);
2462             unlock_user_struct(tcr, optval_addr, 1);
2463             if (put_user_u32(len, optlen)) {
2464                 return -TARGET_EFAULT;
2465             }
2466             break;
2467         }
2468         case TARGET_SO_PEERSEC: {
2469             char *name;
2470 
2471             if (get_user_u32(len, optlen)) {
2472                 return -TARGET_EFAULT;
2473             }
2474             if (len < 0) {
2475                 return -TARGET_EINVAL;
2476             }
2477             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2478             if (!name) {
2479                 return -TARGET_EFAULT;
2480             }
2481             lv = len;
2482             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2483                                        name, &lv));
2484             if (put_user_u32(lv, optlen)) {
2485                 ret = -TARGET_EFAULT;
2486             }
2487             unlock_user(name, optval_addr, lv);
2488             break;
2489         }
2490         case TARGET_SO_LINGER:
2491         {
2492             struct linger lg;
2493             socklen_t lglen;
2494             struct target_linger *tlg;
2495 
2496             if (get_user_u32(len, optlen)) {
2497                 return -TARGET_EFAULT;
2498             }
2499             if (len < 0) {
2500                 return -TARGET_EINVAL;
2501             }
2502 
2503             lglen = sizeof(lg);
2504             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2505                                        &lg, &lglen));
2506             if (ret < 0) {
2507                 return ret;
2508             }
2509             if (len > lglen) {
2510                 len = lglen;
2511             }
2512             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2513                 return -TARGET_EFAULT;
2514             }
2515             __put_user(lg.l_onoff, &tlg->l_onoff);
2516             __put_user(lg.l_linger, &tlg->l_linger);
2517             unlock_user_struct(tlg, optval_addr, 1);
2518             if (put_user_u32(len, optlen)) {
2519                 return -TARGET_EFAULT;
2520             }
2521             break;
2522         }
2523         /* Options with 'int' argument.  */
2524         case TARGET_SO_DEBUG:
2525             optname = SO_DEBUG;
2526             goto int_case;
2527         case TARGET_SO_REUSEADDR:
2528             optname = SO_REUSEADDR;
2529             goto int_case;
2530 #ifdef SO_REUSEPORT
2531         case TARGET_SO_REUSEPORT:
2532             optname = SO_REUSEPORT;
2533             goto int_case;
2534 #endif
2535         case TARGET_SO_TYPE:
2536             optname = SO_TYPE;
2537             goto int_case;
2538         case TARGET_SO_ERROR:
2539             optname = SO_ERROR;
2540             goto int_case;
2541         case TARGET_SO_DONTROUTE:
2542             optname = SO_DONTROUTE;
2543             goto int_case;
2544         case TARGET_SO_BROADCAST:
2545             optname = SO_BROADCAST;
2546             goto int_case;
2547         case TARGET_SO_SNDBUF:
2548             optname = SO_SNDBUF;
2549             goto int_case;
2550         case TARGET_SO_RCVBUF:
2551             optname = SO_RCVBUF;
2552             goto int_case;
2553         case TARGET_SO_KEEPALIVE:
2554             optname = SO_KEEPALIVE;
2555             goto int_case;
2556         case TARGET_SO_OOBINLINE:
2557             optname = SO_OOBINLINE;
2558             goto int_case;
2559         case TARGET_SO_NO_CHECK:
2560             optname = SO_NO_CHECK;
2561             goto int_case;
2562         case TARGET_SO_PRIORITY:
2563             optname = SO_PRIORITY;
2564             goto int_case;
2565 #ifdef SO_BSDCOMPAT
2566         case TARGET_SO_BSDCOMPAT:
2567             optname = SO_BSDCOMPAT;
2568             goto int_case;
2569 #endif
2570         case TARGET_SO_PASSCRED:
2571             optname = SO_PASSCRED;
2572             goto int_case;
2573         case TARGET_SO_TIMESTAMP:
2574             optname = SO_TIMESTAMP;
2575             goto int_case;
2576         case TARGET_SO_RCVLOWAT:
2577             optname = SO_RCVLOWAT;
2578             goto int_case;
2579         case TARGET_SO_ACCEPTCONN:
2580             optname = SO_ACCEPTCONN;
2581             goto int_case;
2582         default:
2583             goto int_case;
2584         }
2585         break;
2586     case SOL_TCP:
2587         /* TCP options all take an 'int' value.  */
2588     int_case:
2589         if (get_user_u32(len, optlen))
2590             return -TARGET_EFAULT;
2591         if (len < 0)
2592             return -TARGET_EINVAL;
2593         lv = sizeof(lv);
2594         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2595         if (ret < 0)
2596             return ret;
2597         if (optname == SO_TYPE) {
2598             val = host_to_target_sock_type(val);
2599         }
2600         if (len > lv)
2601             len = lv;
2602         if (len == 4) {
2603             if (put_user_u32(val, optval_addr))
2604                 return -TARGET_EFAULT;
2605         } else {
2606             if (put_user_u8(val, optval_addr))
2607                 return -TARGET_EFAULT;
2608         }
2609         if (put_user_u32(len, optlen))
2610             return -TARGET_EFAULT;
2611         break;
2612     case SOL_IP:
2613         switch(optname) {
2614         case IP_TOS:
2615         case IP_TTL:
2616         case IP_HDRINCL:
2617         case IP_ROUTER_ALERT:
2618         case IP_RECVOPTS:
2619         case IP_RETOPTS:
2620         case IP_PKTINFO:
2621         case IP_MTU_DISCOVER:
2622         case IP_RECVERR:
2623         case IP_RECVTOS:
2624 #ifdef IP_FREEBIND
2625         case IP_FREEBIND:
2626 #endif
2627         case IP_MULTICAST_TTL:
2628         case IP_MULTICAST_LOOP:
2629             if (get_user_u32(len, optlen))
2630                 return -TARGET_EFAULT;
2631             if (len < 0)
2632                 return -TARGET_EINVAL;
2633             lv = sizeof(lv);
2634             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2635             if (ret < 0)
2636                 return ret;
2637             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2638                 len = 1;
2639                 if (put_user_u32(len, optlen)
2640                     || put_user_u8(val, optval_addr))
2641                     return -TARGET_EFAULT;
2642             } else {
2643                 if (len > sizeof(int))
2644                     len = sizeof(int);
2645                 if (put_user_u32(len, optlen)
2646                     || put_user_u32(val, optval_addr))
2647                     return -TARGET_EFAULT;
2648             }
2649             break;
2650         default:
2651             ret = -TARGET_ENOPROTOOPT;
2652             break;
2653         }
2654         break;
2655     case SOL_IPV6:
2656         switch (optname) {
2657         case IPV6_MTU_DISCOVER:
2658         case IPV6_MTU:
2659         case IPV6_V6ONLY:
2660         case IPV6_RECVPKTINFO:
2661         case IPV6_UNICAST_HOPS:
2662         case IPV6_MULTICAST_HOPS:
2663         case IPV6_MULTICAST_LOOP:
2664         case IPV6_RECVERR:
2665         case IPV6_RECVHOPLIMIT:
2666         case IPV6_2292HOPLIMIT:
2667         case IPV6_CHECKSUM:
2668         case IPV6_ADDRFORM:
2669         case IPV6_2292PKTINFO:
2670         case IPV6_RECVTCLASS:
2671         case IPV6_RECVRTHDR:
2672         case IPV6_2292RTHDR:
2673         case IPV6_RECVHOPOPTS:
2674         case IPV6_2292HOPOPTS:
2675         case IPV6_RECVDSTOPTS:
2676         case IPV6_2292DSTOPTS:
2677         case IPV6_TCLASS:
2678 #ifdef IPV6_RECVPATHMTU
2679         case IPV6_RECVPATHMTU:
2680 #endif
2681 #ifdef IPV6_TRANSPARENT
2682         case IPV6_TRANSPARENT:
2683 #endif
2684 #ifdef IPV6_FREEBIND
2685         case IPV6_FREEBIND:
2686 #endif
2687 #ifdef IPV6_RECVORIGDSTADDR
2688         case IPV6_RECVORIGDSTADDR:
2689 #endif
2690             if (get_user_u32(len, optlen))
2691                 return -TARGET_EFAULT;
2692             if (len < 0)
2693                 return -TARGET_EINVAL;
2694             lv = sizeof(lv);
2695             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2696             if (ret < 0)
2697                 return ret;
2698             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2699                 len = 1;
2700                 if (put_user_u32(len, optlen)
2701                     || put_user_u8(val, optval_addr))
2702                     return -TARGET_EFAULT;
2703             } else {
2704                 if (len > sizeof(int))
2705                     len = sizeof(int);
2706                 if (put_user_u32(len, optlen)
2707                     || put_user_u32(val, optval_addr))
2708                     return -TARGET_EFAULT;
2709             }
2710             break;
2711         default:
2712             ret = -TARGET_ENOPROTOOPT;
2713             break;
2714         }
2715         break;
2716 #ifdef SOL_NETLINK
2717     case SOL_NETLINK:
2718         switch (optname) {
2719         case NETLINK_PKTINFO:
2720         case NETLINK_BROADCAST_ERROR:
2721         case NETLINK_NO_ENOBUFS:
2722 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2723         case NETLINK_LISTEN_ALL_NSID:
2724         case NETLINK_CAP_ACK:
2725 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2726 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2727         case NETLINK_EXT_ACK:
2728 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2729 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2730         case NETLINK_GET_STRICT_CHK:
2731 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2732             if (get_user_u32(len, optlen)) {
2733                 return -TARGET_EFAULT;
2734             }
2735             if (len != sizeof(val)) {
2736                 return -TARGET_EINVAL;
2737             }
2738             lv = len;
2739             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2740             if (ret < 0) {
2741                 return ret;
2742             }
2743             if (put_user_u32(lv, optlen)
2744                 || put_user_u32(val, optval_addr)) {
2745                 return -TARGET_EFAULT;
2746             }
2747             break;
2748 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2749         case NETLINK_LIST_MEMBERSHIPS:
2750         {
2751             uint32_t *results;
2752             int i;
2753             if (get_user_u32(len, optlen)) {
2754                 return -TARGET_EFAULT;
2755             }
2756             if (len < 0) {
2757                 return -TARGET_EINVAL;
2758             }
2759             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2760             if (!results) {
2761                 return -TARGET_EFAULT;
2762             }
2763             lv = len;
2764             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2765             if (ret < 0) {
2766                 unlock_user(results, optval_addr, 0);
2767                 return ret;
2768             }
2769             /* swap host endianess to target endianess. */
2770             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2771                 results[i] = tswap32(results[i]);
2772             }
2773             if (put_user_u32(lv, optlen)) {
2774                 return -TARGET_EFAULT;
2775             }
2776             unlock_user(results, optval_addr, 0);
2777             break;
2778         }
2779 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2780         default:
2781             goto unimplemented;
2782         }
2783         break;
2784 #endif /* SOL_NETLINK */
2785     default:
2786     unimplemented:
2787         qemu_log_mask(LOG_UNIMP,
2788                       "getsockopt level=%d optname=%d not yet supported\n",
2789                       level, optname);
2790         ret = -TARGET_EOPNOTSUPP;
2791         break;
2792     }
2793     return ret;
2794 }
2795 
2796 /* Convert target low/high pair representing file offset into the host
2797  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2798  * as the kernel doesn't handle them either.
2799  */
2800 static void target_to_host_low_high(abi_ulong tlow,
2801                                     abi_ulong thigh,
2802                                     unsigned long *hlow,
2803                                     unsigned long *hhigh)
2804 {
2805     uint64_t off = tlow |
2806         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2807         TARGET_LONG_BITS / 2;
2808 
2809     *hlow = off;
2810     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2811 }
2812 
2813 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2814                                 abi_ulong count, int copy)
2815 {
2816     struct target_iovec *target_vec;
2817     struct iovec *vec;
2818     abi_ulong total_len, max_len;
2819     int i;
2820     int err = 0;
2821     bool bad_address = false;
2822 
2823     if (count == 0) {
2824         errno = 0;
2825         return NULL;
2826     }
2827     if (count > IOV_MAX) {
2828         errno = EINVAL;
2829         return NULL;
2830     }
2831 
2832     vec = g_try_new0(struct iovec, count);
2833     if (vec == NULL) {
2834         errno = ENOMEM;
2835         return NULL;
2836     }
2837 
2838     target_vec = lock_user(VERIFY_READ, target_addr,
2839                            count * sizeof(struct target_iovec), 1);
2840     if (target_vec == NULL) {
2841         err = EFAULT;
2842         goto fail2;
2843     }
2844 
2845     /* ??? If host page size > target page size, this will result in a
2846        value larger than what we can actually support.  */
2847     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2848     total_len = 0;
2849 
2850     for (i = 0; i < count; i++) {
2851         abi_ulong base = tswapal(target_vec[i].iov_base);
2852         abi_long len = tswapal(target_vec[i].iov_len);
2853 
2854         if (len < 0) {
2855             err = EINVAL;
2856             goto fail;
2857         } else if (len == 0) {
2858             /* Zero length pointer is ignored.  */
2859             vec[i].iov_base = 0;
2860         } else {
2861             vec[i].iov_base = lock_user(type, base, len, copy);
2862             /* If the first buffer pointer is bad, this is a fault.  But
2863              * subsequent bad buffers will result in a partial write; this
2864              * is realized by filling the vector with null pointers and
2865              * zero lengths. */
2866             if (!vec[i].iov_base) {
2867                 if (i == 0) {
2868                     err = EFAULT;
2869                     goto fail;
2870                 } else {
2871                     bad_address = true;
2872                 }
2873             }
2874             if (bad_address) {
2875                 len = 0;
2876             }
2877             if (len > max_len - total_len) {
2878                 len = max_len - total_len;
2879             }
2880         }
2881         vec[i].iov_len = len;
2882         total_len += len;
2883     }
2884 
2885     unlock_user(target_vec, target_addr, 0);
2886     return vec;
2887 
2888  fail:
2889     while (--i >= 0) {
2890         if (tswapal(target_vec[i].iov_len) > 0) {
2891             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2892         }
2893     }
2894     unlock_user(target_vec, target_addr, 0);
2895  fail2:
2896     g_free(vec);
2897     errno = err;
2898     return NULL;
2899 }
2900 
2901 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2902                          abi_ulong count, int copy)
2903 {
2904     struct target_iovec *target_vec;
2905     int i;
2906 
2907     target_vec = lock_user(VERIFY_READ, target_addr,
2908                            count * sizeof(struct target_iovec), 1);
2909     if (target_vec) {
2910         for (i = 0; i < count; i++) {
2911             abi_ulong base = tswapal(target_vec[i].iov_base);
2912             abi_long len = tswapal(target_vec[i].iov_len);
2913             if (len < 0) {
2914                 break;
2915             }
2916             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2917         }
2918         unlock_user(target_vec, target_addr, 0);
2919     }
2920 
2921     g_free(vec);
2922 }
2923 
2924 static inline int target_to_host_sock_type(int *type)
2925 {
2926     int host_type = 0;
2927     int target_type = *type;
2928 
2929     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2930     case TARGET_SOCK_DGRAM:
2931         host_type = SOCK_DGRAM;
2932         break;
2933     case TARGET_SOCK_STREAM:
2934         host_type = SOCK_STREAM;
2935         break;
2936     default:
2937         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2938         break;
2939     }
2940     if (target_type & TARGET_SOCK_CLOEXEC) {
2941 #if defined(SOCK_CLOEXEC)
2942         host_type |= SOCK_CLOEXEC;
2943 #else
2944         return -TARGET_EINVAL;
2945 #endif
2946     }
2947     if (target_type & TARGET_SOCK_NONBLOCK) {
2948 #if defined(SOCK_NONBLOCK)
2949         host_type |= SOCK_NONBLOCK;
2950 #elif !defined(O_NONBLOCK)
2951         return -TARGET_EINVAL;
2952 #endif
2953     }
2954     *type = host_type;
2955     return 0;
2956 }
2957 
2958 /* Try to emulate socket type flags after socket creation.  */
2959 static int sock_flags_fixup(int fd, int target_type)
2960 {
2961 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2962     if (target_type & TARGET_SOCK_NONBLOCK) {
2963         int flags = fcntl(fd, F_GETFL);
2964         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2965             close(fd);
2966             return -TARGET_EINVAL;
2967         }
2968     }
2969 #endif
2970     return fd;
2971 }
2972 
2973 /* do_socket() Must return target values and target errnos. */
2974 static abi_long do_socket(int domain, int type, int protocol)
2975 {
2976     int target_type = type;
2977     int ret;
2978 
2979     ret = target_to_host_sock_type(&type);
2980     if (ret) {
2981         return ret;
2982     }
2983 
2984     if (domain == PF_NETLINK && !(
2985 #ifdef CONFIG_RTNETLINK
2986          protocol == NETLINK_ROUTE ||
2987 #endif
2988          protocol == NETLINK_KOBJECT_UEVENT ||
2989          protocol == NETLINK_AUDIT)) {
2990         return -TARGET_EPFNOSUPPORT;
2991     }
2992 
2993     if (domain == AF_PACKET ||
2994         (domain == AF_INET && type == SOCK_PACKET)) {
2995         protocol = tswap16(protocol);
2996     }
2997 
2998     ret = get_errno(socket(domain, type, protocol));
2999     if (ret >= 0) {
3000         ret = sock_flags_fixup(ret, target_type);
3001         if (type == SOCK_PACKET) {
3002             /* Manage an obsolete case :
3003              * if socket type is SOCK_PACKET, bind by name
3004              */
3005             fd_trans_register(ret, &target_packet_trans);
3006         } else if (domain == PF_NETLINK) {
3007             switch (protocol) {
3008 #ifdef CONFIG_RTNETLINK
3009             case NETLINK_ROUTE:
3010                 fd_trans_register(ret, &target_netlink_route_trans);
3011                 break;
3012 #endif
3013             case NETLINK_KOBJECT_UEVENT:
3014                 /* nothing to do: messages are strings */
3015                 break;
3016             case NETLINK_AUDIT:
3017                 fd_trans_register(ret, &target_netlink_audit_trans);
3018                 break;
3019             default:
3020                 g_assert_not_reached();
3021             }
3022         }
3023     }
3024     return ret;
3025 }
3026 
3027 /* do_bind() Must return target values and target errnos. */
3028 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3029                         socklen_t addrlen)
3030 {
3031     void *addr;
3032     abi_long ret;
3033 
3034     if ((int)addrlen < 0) {
3035         return -TARGET_EINVAL;
3036     }
3037 
3038     addr = alloca(addrlen+1);
3039 
3040     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3041     if (ret)
3042         return ret;
3043 
3044     return get_errno(bind(sockfd, addr, addrlen));
3045 }
3046 
3047 /* do_connect() Must return target values and target errnos. */
3048 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3049                            socklen_t addrlen)
3050 {
3051     void *addr;
3052     abi_long ret;
3053 
3054     if ((int)addrlen < 0) {
3055         return -TARGET_EINVAL;
3056     }
3057 
3058     addr = alloca(addrlen+1);
3059 
3060     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3061     if (ret)
3062         return ret;
3063 
3064     return get_errno(safe_connect(sockfd, addr, addrlen));
3065 }
3066 
3067 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3068 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3069                                       int flags, int send)
3070 {
3071     abi_long ret, len;
3072     struct msghdr msg;
3073     abi_ulong count;
3074     struct iovec *vec;
3075     abi_ulong target_vec;
3076 
3077     if (msgp->msg_name) {
3078         msg.msg_namelen = tswap32(msgp->msg_namelen);
3079         msg.msg_name = alloca(msg.msg_namelen+1);
3080         ret = target_to_host_sockaddr(fd, msg.msg_name,
3081                                       tswapal(msgp->msg_name),
3082                                       msg.msg_namelen);
3083         if (ret == -TARGET_EFAULT) {
3084             /* For connected sockets msg_name and msg_namelen must
3085              * be ignored, so returning EFAULT immediately is wrong.
3086              * Instead, pass a bad msg_name to the host kernel, and
3087              * let it decide whether to return EFAULT or not.
3088              */
3089             msg.msg_name = (void *)-1;
3090         } else if (ret) {
3091             goto out2;
3092         }
3093     } else {
3094         msg.msg_name = NULL;
3095         msg.msg_namelen = 0;
3096     }
3097     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3098     msg.msg_control = alloca(msg.msg_controllen);
3099     memset(msg.msg_control, 0, msg.msg_controllen);
3100 
3101     msg.msg_flags = tswap32(msgp->msg_flags);
3102 
3103     count = tswapal(msgp->msg_iovlen);
3104     target_vec = tswapal(msgp->msg_iov);
3105 
3106     if (count > IOV_MAX) {
3107         /* sendrcvmsg returns a different errno for this condition than
3108          * readv/writev, so we must catch it here before lock_iovec() does.
3109          */
3110         ret = -TARGET_EMSGSIZE;
3111         goto out2;
3112     }
3113 
3114     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3115                      target_vec, count, send);
3116     if (vec == NULL) {
3117         ret = -host_to_target_errno(errno);
3118         goto out2;
3119     }
3120     msg.msg_iovlen = count;
3121     msg.msg_iov = vec;
3122 
3123     if (send) {
3124         if (fd_trans_target_to_host_data(fd)) {
3125             void *host_msg;
3126 
3127             host_msg = g_malloc(msg.msg_iov->iov_len);
3128             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3129             ret = fd_trans_target_to_host_data(fd)(host_msg,
3130                                                    msg.msg_iov->iov_len);
3131             if (ret >= 0) {
3132                 msg.msg_iov->iov_base = host_msg;
3133                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3134             }
3135             g_free(host_msg);
3136         } else {
3137             ret = target_to_host_cmsg(&msg, msgp);
3138             if (ret == 0) {
3139                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3140             }
3141         }
3142     } else {
3143         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3144         if (!is_error(ret)) {
3145             len = ret;
3146             if (fd_trans_host_to_target_data(fd)) {
3147                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3148                                                MIN(msg.msg_iov->iov_len, len));
3149             } else {
3150                 ret = host_to_target_cmsg(msgp, &msg);
3151             }
3152             if (!is_error(ret)) {
3153                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3154                 msgp->msg_flags = tswap32(msg.msg_flags);
3155                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3156                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3157                                     msg.msg_name, msg.msg_namelen);
3158                     if (ret) {
3159                         goto out;
3160                     }
3161                 }
3162 
3163                 ret = len;
3164             }
3165         }
3166     }
3167 
3168 out:
3169     unlock_iovec(vec, target_vec, count, !send);
3170 out2:
3171     return ret;
3172 }
3173 
3174 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3175                                int flags, int send)
3176 {
3177     abi_long ret;
3178     struct target_msghdr *msgp;
3179 
3180     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3181                           msgp,
3182                           target_msg,
3183                           send ? 1 : 0)) {
3184         return -TARGET_EFAULT;
3185     }
3186     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3187     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3188     return ret;
3189 }
3190 
3191 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3192  * so it might not have this *mmsg-specific flag either.
3193  */
3194 #ifndef MSG_WAITFORONE
3195 #define MSG_WAITFORONE 0x10000
3196 #endif
3197 
3198 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3199                                 unsigned int vlen, unsigned int flags,
3200                                 int send)
3201 {
3202     struct target_mmsghdr *mmsgp;
3203     abi_long ret = 0;
3204     int i;
3205 
3206     if (vlen > UIO_MAXIOV) {
3207         vlen = UIO_MAXIOV;
3208     }
3209 
3210     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3211     if (!mmsgp) {
3212         return -TARGET_EFAULT;
3213     }
3214 
3215     for (i = 0; i < vlen; i++) {
3216         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3217         if (is_error(ret)) {
3218             break;
3219         }
3220         mmsgp[i].msg_len = tswap32(ret);
3221         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3222         if (flags & MSG_WAITFORONE) {
3223             flags |= MSG_DONTWAIT;
3224         }
3225     }
3226 
3227     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3228 
3229     /* Return number of datagrams sent if we sent any at all;
3230      * otherwise return the error.
3231      */
3232     if (i) {
3233         return i;
3234     }
3235     return ret;
3236 }
3237 
3238 /* do_accept4() Must return target values and target errnos. */
3239 static abi_long do_accept4(int fd, abi_ulong target_addr,
3240                            abi_ulong target_addrlen_addr, int flags)
3241 {
3242     socklen_t addrlen, ret_addrlen;
3243     void *addr;
3244     abi_long ret;
3245     int host_flags;
3246 
3247     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3248 
3249     if (target_addr == 0) {
3250         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3251     }
3252 
3253     /* linux returns EINVAL if addrlen pointer is invalid */
3254     if (get_user_u32(addrlen, target_addrlen_addr))
3255         return -TARGET_EINVAL;
3256 
3257     if ((int)addrlen < 0) {
3258         return -TARGET_EINVAL;
3259     }
3260 
3261     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3262         return -TARGET_EINVAL;
3263 
3264     addr = alloca(addrlen);
3265 
3266     ret_addrlen = addrlen;
3267     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3268     if (!is_error(ret)) {
3269         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3270         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3271             ret = -TARGET_EFAULT;
3272         }
3273     }
3274     return ret;
3275 }
3276 
3277 /* do_getpeername() Must return target values and target errnos. */
3278 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3279                                abi_ulong target_addrlen_addr)
3280 {
3281     socklen_t addrlen, ret_addrlen;
3282     void *addr;
3283     abi_long ret;
3284 
3285     if (get_user_u32(addrlen, target_addrlen_addr))
3286         return -TARGET_EFAULT;
3287 
3288     if ((int)addrlen < 0) {
3289         return -TARGET_EINVAL;
3290     }
3291 
3292     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3293         return -TARGET_EFAULT;
3294 
3295     addr = alloca(addrlen);
3296 
3297     ret_addrlen = addrlen;
3298     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3299     if (!is_error(ret)) {
3300         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3301         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3302             ret = -TARGET_EFAULT;
3303         }
3304     }
3305     return ret;
3306 }
3307 
3308 /* do_getsockname() Must return target values and target errnos. */
3309 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3310                                abi_ulong target_addrlen_addr)
3311 {
3312     socklen_t addrlen, ret_addrlen;
3313     void *addr;
3314     abi_long ret;
3315 
3316     if (get_user_u32(addrlen, target_addrlen_addr))
3317         return -TARGET_EFAULT;
3318 
3319     if ((int)addrlen < 0) {
3320         return -TARGET_EINVAL;
3321     }
3322 
3323     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3324         return -TARGET_EFAULT;
3325 
3326     addr = alloca(addrlen);
3327 
3328     ret_addrlen = addrlen;
3329     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3330     if (!is_error(ret)) {
3331         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3332         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3333             ret = -TARGET_EFAULT;
3334         }
3335     }
3336     return ret;
3337 }
3338 
3339 /* do_socketpair() Must return target values and target errnos. */
3340 static abi_long do_socketpair(int domain, int type, int protocol,
3341                               abi_ulong target_tab_addr)
3342 {
3343     int tab[2];
3344     abi_long ret;
3345 
3346     target_to_host_sock_type(&type);
3347 
3348     ret = get_errno(socketpair(domain, type, protocol, tab));
3349     if (!is_error(ret)) {
3350         if (put_user_s32(tab[0], target_tab_addr)
3351             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3352             ret = -TARGET_EFAULT;
3353     }
3354     return ret;
3355 }
3356 
3357 /* do_sendto() Must return target values and target errnos. */
3358 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3359                           abi_ulong target_addr, socklen_t addrlen)
3360 {
3361     void *addr;
3362     void *host_msg;
3363     void *copy_msg = NULL;
3364     abi_long ret;
3365 
3366     if ((int)addrlen < 0) {
3367         return -TARGET_EINVAL;
3368     }
3369 
3370     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3371     if (!host_msg)
3372         return -TARGET_EFAULT;
3373     if (fd_trans_target_to_host_data(fd)) {
3374         copy_msg = host_msg;
3375         host_msg = g_malloc(len);
3376         memcpy(host_msg, copy_msg, len);
3377         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3378         if (ret < 0) {
3379             goto fail;
3380         }
3381     }
3382     if (target_addr) {
3383         addr = alloca(addrlen+1);
3384         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3385         if (ret) {
3386             goto fail;
3387         }
3388         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3389     } else {
3390         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3391     }
3392 fail:
3393     if (copy_msg) {
3394         g_free(host_msg);
3395         host_msg = copy_msg;
3396     }
3397     unlock_user(host_msg, msg, 0);
3398     return ret;
3399 }
3400 
3401 /* do_recvfrom() Must return target values and target errnos. */
3402 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3403                             abi_ulong target_addr,
3404                             abi_ulong target_addrlen)
3405 {
3406     socklen_t addrlen, ret_addrlen;
3407     void *addr;
3408     void *host_msg;
3409     abi_long ret;
3410 
3411     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3412     if (!host_msg)
3413         return -TARGET_EFAULT;
3414     if (target_addr) {
3415         if (get_user_u32(addrlen, target_addrlen)) {
3416             ret = -TARGET_EFAULT;
3417             goto fail;
3418         }
3419         if ((int)addrlen < 0) {
3420             ret = -TARGET_EINVAL;
3421             goto fail;
3422         }
3423         addr = alloca(addrlen);
3424         ret_addrlen = addrlen;
3425         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3426                                       addr, &ret_addrlen));
3427     } else {
3428         addr = NULL; /* To keep compiler quiet.  */
3429         addrlen = 0; /* To keep compiler quiet.  */
3430         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3431     }
3432     if (!is_error(ret)) {
3433         if (fd_trans_host_to_target_data(fd)) {
3434             abi_long trans;
3435             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3436             if (is_error(trans)) {
3437                 ret = trans;
3438                 goto fail;
3439             }
3440         }
3441         if (target_addr) {
3442             host_to_target_sockaddr(target_addr, addr,
3443                                     MIN(addrlen, ret_addrlen));
3444             if (put_user_u32(ret_addrlen, target_addrlen)) {
3445                 ret = -TARGET_EFAULT;
3446                 goto fail;
3447             }
3448         }
3449         unlock_user(host_msg, msg, len);
3450     } else {
3451 fail:
3452         unlock_user(host_msg, msg, 0);
3453     }
3454     return ret;
3455 }
3456 
3457 #ifdef TARGET_NR_socketcall
3458 /* do_socketcall() must return target values and target errnos. */
3459 static abi_long do_socketcall(int num, abi_ulong vptr)
3460 {
3461     static const unsigned nargs[] = { /* number of arguments per operation */
3462         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3463         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3464         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3465         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3466         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3467         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3468         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3469         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3470         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3471         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3472         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3473         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3474         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3475         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3476         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3477         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3478         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3479         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3480         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3481         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3482     };
3483     abi_long a[6]; /* max 6 args */
3484     unsigned i;
3485 
3486     /* check the range of the first argument num */
3487     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3488     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3489         return -TARGET_EINVAL;
3490     }
3491     /* ensure we have space for args */
3492     if (nargs[num] > ARRAY_SIZE(a)) {
3493         return -TARGET_EINVAL;
3494     }
3495     /* collect the arguments in a[] according to nargs[] */
3496     for (i = 0; i < nargs[num]; ++i) {
3497         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3498             return -TARGET_EFAULT;
3499         }
3500     }
3501     /* now when we have the args, invoke the appropriate underlying function */
3502     switch (num) {
3503     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3504         return do_socket(a[0], a[1], a[2]);
3505     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3506         return do_bind(a[0], a[1], a[2]);
3507     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3508         return do_connect(a[0], a[1], a[2]);
3509     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3510         return get_errno(listen(a[0], a[1]));
3511     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3512         return do_accept4(a[0], a[1], a[2], 0);
3513     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3514         return do_getsockname(a[0], a[1], a[2]);
3515     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3516         return do_getpeername(a[0], a[1], a[2]);
3517     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3518         return do_socketpair(a[0], a[1], a[2], a[3]);
3519     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3520         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3521     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3522         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3523     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3524         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3525     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3526         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3527     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3528         return get_errno(shutdown(a[0], a[1]));
3529     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3530         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3531     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3532         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3533     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3534         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3535     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3536         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3537     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3538         return do_accept4(a[0], a[1], a[2], a[3]);
3539     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3540         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3541     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3542         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3543     default:
3544         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3545         return -TARGET_EINVAL;
3546     }
3547 }
3548 #endif
3549 
3550 #define N_SHM_REGIONS	32
3551 
3552 static struct shm_region {
3553     abi_ulong start;
3554     abi_ulong size;
3555     bool in_use;
3556 } shm_regions[N_SHM_REGIONS];
3557 
3558 #ifndef TARGET_SEMID64_DS
3559 /* asm-generic version of this struct */
3560 struct target_semid64_ds
3561 {
3562   struct target_ipc_perm sem_perm;
3563   abi_ulong sem_otime;
3564 #if TARGET_ABI_BITS == 32
3565   abi_ulong __unused1;
3566 #endif
3567   abi_ulong sem_ctime;
3568 #if TARGET_ABI_BITS == 32
3569   abi_ulong __unused2;
3570 #endif
3571   abi_ulong sem_nsems;
3572   abi_ulong __unused3;
3573   abi_ulong __unused4;
3574 };
3575 #endif
3576 
3577 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3578                                                abi_ulong target_addr)
3579 {
3580     struct target_ipc_perm *target_ip;
3581     struct target_semid64_ds *target_sd;
3582 
3583     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3584         return -TARGET_EFAULT;
3585     target_ip = &(target_sd->sem_perm);
3586     host_ip->__key = tswap32(target_ip->__key);
3587     host_ip->uid = tswap32(target_ip->uid);
3588     host_ip->gid = tswap32(target_ip->gid);
3589     host_ip->cuid = tswap32(target_ip->cuid);
3590     host_ip->cgid = tswap32(target_ip->cgid);
3591 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3592     host_ip->mode = tswap32(target_ip->mode);
3593 #else
3594     host_ip->mode = tswap16(target_ip->mode);
3595 #endif
3596 #if defined(TARGET_PPC)
3597     host_ip->__seq = tswap32(target_ip->__seq);
3598 #else
3599     host_ip->__seq = tswap16(target_ip->__seq);
3600 #endif
3601     unlock_user_struct(target_sd, target_addr, 0);
3602     return 0;
3603 }
3604 
3605 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3606                                                struct ipc_perm *host_ip)
3607 {
3608     struct target_ipc_perm *target_ip;
3609     struct target_semid64_ds *target_sd;
3610 
3611     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3612         return -TARGET_EFAULT;
3613     target_ip = &(target_sd->sem_perm);
3614     target_ip->__key = tswap32(host_ip->__key);
3615     target_ip->uid = tswap32(host_ip->uid);
3616     target_ip->gid = tswap32(host_ip->gid);
3617     target_ip->cuid = tswap32(host_ip->cuid);
3618     target_ip->cgid = tswap32(host_ip->cgid);
3619 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3620     target_ip->mode = tswap32(host_ip->mode);
3621 #else
3622     target_ip->mode = tswap16(host_ip->mode);
3623 #endif
3624 #if defined(TARGET_PPC)
3625     target_ip->__seq = tswap32(host_ip->__seq);
3626 #else
3627     target_ip->__seq = tswap16(host_ip->__seq);
3628 #endif
3629     unlock_user_struct(target_sd, target_addr, 1);
3630     return 0;
3631 }
3632 
3633 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3634                                                abi_ulong target_addr)
3635 {
3636     struct target_semid64_ds *target_sd;
3637 
3638     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3639         return -TARGET_EFAULT;
3640     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3641         return -TARGET_EFAULT;
3642     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3643     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3644     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3645     unlock_user_struct(target_sd, target_addr, 0);
3646     return 0;
3647 }
3648 
3649 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3650                                                struct semid_ds *host_sd)
3651 {
3652     struct target_semid64_ds *target_sd;
3653 
3654     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3655         return -TARGET_EFAULT;
3656     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3657         return -TARGET_EFAULT;
3658     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3659     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3660     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3661     unlock_user_struct(target_sd, target_addr, 1);
3662     return 0;
3663 }
3664 
3665 struct target_seminfo {
3666     int semmap;
3667     int semmni;
3668     int semmns;
3669     int semmnu;
3670     int semmsl;
3671     int semopm;
3672     int semume;
3673     int semusz;
3674     int semvmx;
3675     int semaem;
3676 };
3677 
3678 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3679                                               struct seminfo *host_seminfo)
3680 {
3681     struct target_seminfo *target_seminfo;
3682     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3683         return -TARGET_EFAULT;
3684     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3685     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3686     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3687     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3688     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3689     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3690     __put_user(host_seminfo->semume, &target_seminfo->semume);
3691     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3692     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3693     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3694     unlock_user_struct(target_seminfo, target_addr, 1);
3695     return 0;
3696 }
3697 
3698 union semun {
3699 	int val;
3700 	struct semid_ds *buf;
3701 	unsigned short *array;
3702 	struct seminfo *__buf;
3703 };
3704 
3705 union target_semun {
3706 	int val;
3707 	abi_ulong buf;
3708 	abi_ulong array;
3709 	abi_ulong __buf;
3710 };
3711 
3712 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3713                                                abi_ulong target_addr)
3714 {
3715     int nsems;
3716     unsigned short *array;
3717     union semun semun;
3718     struct semid_ds semid_ds;
3719     int i, ret;
3720 
3721     semun.buf = &semid_ds;
3722 
3723     ret = semctl(semid, 0, IPC_STAT, semun);
3724     if (ret == -1)
3725         return get_errno(ret);
3726 
3727     nsems = semid_ds.sem_nsems;
3728 
3729     *host_array = g_try_new(unsigned short, nsems);
3730     if (!*host_array) {
3731         return -TARGET_ENOMEM;
3732     }
3733     array = lock_user(VERIFY_READ, target_addr,
3734                       nsems*sizeof(unsigned short), 1);
3735     if (!array) {
3736         g_free(*host_array);
3737         return -TARGET_EFAULT;
3738     }
3739 
3740     for(i=0; i<nsems; i++) {
3741         __get_user((*host_array)[i], &array[i]);
3742     }
3743     unlock_user(array, target_addr, 0);
3744 
3745     return 0;
3746 }
3747 
3748 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3749                                                unsigned short **host_array)
3750 {
3751     int nsems;
3752     unsigned short *array;
3753     union semun semun;
3754     struct semid_ds semid_ds;
3755     int i, ret;
3756 
3757     semun.buf = &semid_ds;
3758 
3759     ret = semctl(semid, 0, IPC_STAT, semun);
3760     if (ret == -1)
3761         return get_errno(ret);
3762 
3763     nsems = semid_ds.sem_nsems;
3764 
3765     array = lock_user(VERIFY_WRITE, target_addr,
3766                       nsems*sizeof(unsigned short), 0);
3767     if (!array)
3768         return -TARGET_EFAULT;
3769 
3770     for(i=0; i<nsems; i++) {
3771         __put_user((*host_array)[i], &array[i]);
3772     }
3773     g_free(*host_array);
3774     unlock_user(array, target_addr, 1);
3775 
3776     return 0;
3777 }
3778 
3779 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3780                                  abi_ulong target_arg)
3781 {
3782     union target_semun target_su = { .buf = target_arg };
3783     union semun arg;
3784     struct semid_ds dsarg;
3785     unsigned short *array = NULL;
3786     struct seminfo seminfo;
3787     abi_long ret = -TARGET_EINVAL;
3788     abi_long err;
3789     cmd &= 0xff;
3790 
3791     switch( cmd ) {
3792 	case GETVAL:
3793 	case SETVAL:
3794             /* In 64 bit cross-endian situations, we will erroneously pick up
3795              * the wrong half of the union for the "val" element.  To rectify
3796              * this, the entire 8-byte structure is byteswapped, followed by
3797 	     * a swap of the 4 byte val field. In other cases, the data is
3798 	     * already in proper host byte order. */
3799 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3800 		target_su.buf = tswapal(target_su.buf);
3801 		arg.val = tswap32(target_su.val);
3802 	    } else {
3803 		arg.val = target_su.val;
3804 	    }
3805             ret = get_errno(semctl(semid, semnum, cmd, arg));
3806             break;
3807 	case GETALL:
3808 	case SETALL:
3809             err = target_to_host_semarray(semid, &array, target_su.array);
3810             if (err)
3811                 return err;
3812             arg.array = array;
3813             ret = get_errno(semctl(semid, semnum, cmd, arg));
3814             err = host_to_target_semarray(semid, target_su.array, &array);
3815             if (err)
3816                 return err;
3817             break;
3818 	case IPC_STAT:
3819 	case IPC_SET:
3820 	case SEM_STAT:
3821             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3822             if (err)
3823                 return err;
3824             arg.buf = &dsarg;
3825             ret = get_errno(semctl(semid, semnum, cmd, arg));
3826             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3827             if (err)
3828                 return err;
3829             break;
3830 	case IPC_INFO:
3831 	case SEM_INFO:
3832             arg.__buf = &seminfo;
3833             ret = get_errno(semctl(semid, semnum, cmd, arg));
3834             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3835             if (err)
3836                 return err;
3837             break;
3838 	case IPC_RMID:
3839 	case GETPID:
3840 	case GETNCNT:
3841 	case GETZCNT:
3842             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3843             break;
3844     }
3845 
3846     return ret;
3847 }
3848 
3849 struct target_sembuf {
3850     unsigned short sem_num;
3851     short sem_op;
3852     short sem_flg;
3853 };
3854 
3855 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3856                                              abi_ulong target_addr,
3857                                              unsigned nsops)
3858 {
3859     struct target_sembuf *target_sembuf;
3860     int i;
3861 
3862     target_sembuf = lock_user(VERIFY_READ, target_addr,
3863                               nsops*sizeof(struct target_sembuf), 1);
3864     if (!target_sembuf)
3865         return -TARGET_EFAULT;
3866 
3867     for(i=0; i<nsops; i++) {
3868         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3869         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3870         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3871     }
3872 
3873     unlock_user(target_sembuf, target_addr, 0);
3874 
3875     return 0;
3876 }
3877 
3878 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3879 {
3880     struct sembuf sops[nsops];
3881     abi_long ret;
3882 
3883     if (target_to_host_sembuf(sops, ptr, nsops))
3884         return -TARGET_EFAULT;
3885 
3886     ret = -TARGET_ENOSYS;
3887 #ifdef __NR_semtimedop
3888     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3889 #endif
3890 #ifdef __NR_ipc
3891     if (ret == -TARGET_ENOSYS) {
3892         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3893     }
3894 #endif
3895     return ret;
3896 }
3897 
3898 struct target_msqid_ds
3899 {
3900     struct target_ipc_perm msg_perm;
3901     abi_ulong msg_stime;
3902 #if TARGET_ABI_BITS == 32
3903     abi_ulong __unused1;
3904 #endif
3905     abi_ulong msg_rtime;
3906 #if TARGET_ABI_BITS == 32
3907     abi_ulong __unused2;
3908 #endif
3909     abi_ulong msg_ctime;
3910 #if TARGET_ABI_BITS == 32
3911     abi_ulong __unused3;
3912 #endif
3913     abi_ulong __msg_cbytes;
3914     abi_ulong msg_qnum;
3915     abi_ulong msg_qbytes;
3916     abi_ulong msg_lspid;
3917     abi_ulong msg_lrpid;
3918     abi_ulong __unused4;
3919     abi_ulong __unused5;
3920 };
3921 
3922 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3923                                                abi_ulong target_addr)
3924 {
3925     struct target_msqid_ds *target_md;
3926 
3927     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3928         return -TARGET_EFAULT;
3929     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3930         return -TARGET_EFAULT;
3931     host_md->msg_stime = tswapal(target_md->msg_stime);
3932     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3933     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3934     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3935     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3936     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3937     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3938     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3939     unlock_user_struct(target_md, target_addr, 0);
3940     return 0;
3941 }
3942 
3943 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3944                                                struct msqid_ds *host_md)
3945 {
3946     struct target_msqid_ds *target_md;
3947 
3948     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3949         return -TARGET_EFAULT;
3950     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3951         return -TARGET_EFAULT;
3952     target_md->msg_stime = tswapal(host_md->msg_stime);
3953     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3954     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3955     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3956     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3957     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3958     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3959     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3960     unlock_user_struct(target_md, target_addr, 1);
3961     return 0;
3962 }
3963 
3964 struct target_msginfo {
3965     int msgpool;
3966     int msgmap;
3967     int msgmax;
3968     int msgmnb;
3969     int msgmni;
3970     int msgssz;
3971     int msgtql;
3972     unsigned short int msgseg;
3973 };
3974 
3975 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3976                                               struct msginfo *host_msginfo)
3977 {
3978     struct target_msginfo *target_msginfo;
3979     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3980         return -TARGET_EFAULT;
3981     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3982     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3983     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3984     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3985     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3986     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3987     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3988     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3989     unlock_user_struct(target_msginfo, target_addr, 1);
3990     return 0;
3991 }
3992 
3993 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3994 {
3995     struct msqid_ds dsarg;
3996     struct msginfo msginfo;
3997     abi_long ret = -TARGET_EINVAL;
3998 
3999     cmd &= 0xff;
4000 
4001     switch (cmd) {
4002     case IPC_STAT:
4003     case IPC_SET:
4004     case MSG_STAT:
4005         if (target_to_host_msqid_ds(&dsarg,ptr))
4006             return -TARGET_EFAULT;
4007         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4008         if (host_to_target_msqid_ds(ptr,&dsarg))
4009             return -TARGET_EFAULT;
4010         break;
4011     case IPC_RMID:
4012         ret = get_errno(msgctl(msgid, cmd, NULL));
4013         break;
4014     case IPC_INFO:
4015     case MSG_INFO:
4016         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4017         if (host_to_target_msginfo(ptr, &msginfo))
4018             return -TARGET_EFAULT;
4019         break;
4020     }
4021 
4022     return ret;
4023 }
4024 
4025 struct target_msgbuf {
4026     abi_long mtype;
4027     char	mtext[1];
4028 };
4029 
4030 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4031                                  ssize_t msgsz, int msgflg)
4032 {
4033     struct target_msgbuf *target_mb;
4034     struct msgbuf *host_mb;
4035     abi_long ret = 0;
4036 
4037     if (msgsz < 0) {
4038         return -TARGET_EINVAL;
4039     }
4040 
4041     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4042         return -TARGET_EFAULT;
4043     host_mb = g_try_malloc(msgsz + sizeof(long));
4044     if (!host_mb) {
4045         unlock_user_struct(target_mb, msgp, 0);
4046         return -TARGET_ENOMEM;
4047     }
4048     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4049     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4050     ret = -TARGET_ENOSYS;
4051 #ifdef __NR_msgsnd
4052     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4053 #endif
4054 #ifdef __NR_ipc
4055     if (ret == -TARGET_ENOSYS) {
4056         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4057                                  host_mb, 0));
4058     }
4059 #endif
4060     g_free(host_mb);
4061     unlock_user_struct(target_mb, msgp, 0);
4062 
4063     return ret;
4064 }
4065 
4066 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4067                                  ssize_t msgsz, abi_long msgtyp,
4068                                  int msgflg)
4069 {
4070     struct target_msgbuf *target_mb;
4071     char *target_mtext;
4072     struct msgbuf *host_mb;
4073     abi_long ret = 0;
4074 
4075     if (msgsz < 0) {
4076         return -TARGET_EINVAL;
4077     }
4078 
4079     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4080         return -TARGET_EFAULT;
4081 
4082     host_mb = g_try_malloc(msgsz + sizeof(long));
4083     if (!host_mb) {
4084         ret = -TARGET_ENOMEM;
4085         goto end;
4086     }
4087     ret = -TARGET_ENOSYS;
4088 #ifdef __NR_msgrcv
4089     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4090 #endif
4091 #ifdef __NR_ipc
4092     if (ret == -TARGET_ENOSYS) {
4093         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4094                         msgflg, host_mb, msgtyp));
4095     }
4096 #endif
4097 
4098     if (ret > 0) {
4099         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4100         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4101         if (!target_mtext) {
4102             ret = -TARGET_EFAULT;
4103             goto end;
4104         }
4105         memcpy(target_mb->mtext, host_mb->mtext, ret);
4106         unlock_user(target_mtext, target_mtext_addr, ret);
4107     }
4108 
4109     target_mb->mtype = tswapal(host_mb->mtype);
4110 
4111 end:
4112     if (target_mb)
4113         unlock_user_struct(target_mb, msgp, 1);
4114     g_free(host_mb);
4115     return ret;
4116 }
4117 
4118 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4119                                                abi_ulong target_addr)
4120 {
4121     struct target_shmid_ds *target_sd;
4122 
4123     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4124         return -TARGET_EFAULT;
4125     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4126         return -TARGET_EFAULT;
4127     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4128     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4129     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4130     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4131     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4132     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4133     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4134     unlock_user_struct(target_sd, target_addr, 0);
4135     return 0;
4136 }
4137 
4138 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4139                                                struct shmid_ds *host_sd)
4140 {
4141     struct target_shmid_ds *target_sd;
4142 
4143     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4144         return -TARGET_EFAULT;
4145     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4146         return -TARGET_EFAULT;
4147     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4148     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4149     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4150     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4151     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4152     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4153     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4154     unlock_user_struct(target_sd, target_addr, 1);
4155     return 0;
4156 }
4157 
4158 struct  target_shminfo {
4159     abi_ulong shmmax;
4160     abi_ulong shmmin;
4161     abi_ulong shmmni;
4162     abi_ulong shmseg;
4163     abi_ulong shmall;
4164 };
4165 
4166 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4167                                               struct shminfo *host_shminfo)
4168 {
4169     struct target_shminfo *target_shminfo;
4170     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4171         return -TARGET_EFAULT;
4172     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4173     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4174     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4175     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4176     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4177     unlock_user_struct(target_shminfo, target_addr, 1);
4178     return 0;
4179 }
4180 
4181 struct target_shm_info {
4182     int used_ids;
4183     abi_ulong shm_tot;
4184     abi_ulong shm_rss;
4185     abi_ulong shm_swp;
4186     abi_ulong swap_attempts;
4187     abi_ulong swap_successes;
4188 };
4189 
4190 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4191                                                struct shm_info *host_shm_info)
4192 {
4193     struct target_shm_info *target_shm_info;
4194     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4195         return -TARGET_EFAULT;
4196     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4197     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4198     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4199     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4200     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4201     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4202     unlock_user_struct(target_shm_info, target_addr, 1);
4203     return 0;
4204 }
4205 
4206 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4207 {
4208     struct shmid_ds dsarg;
4209     struct shminfo shminfo;
4210     struct shm_info shm_info;
4211     abi_long ret = -TARGET_EINVAL;
4212 
4213     cmd &= 0xff;
4214 
4215     switch(cmd) {
4216     case IPC_STAT:
4217     case IPC_SET:
4218     case SHM_STAT:
4219         if (target_to_host_shmid_ds(&dsarg, buf))
4220             return -TARGET_EFAULT;
4221         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4222         if (host_to_target_shmid_ds(buf, &dsarg))
4223             return -TARGET_EFAULT;
4224         break;
4225     case IPC_INFO:
4226         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4227         if (host_to_target_shminfo(buf, &shminfo))
4228             return -TARGET_EFAULT;
4229         break;
4230     case SHM_INFO:
4231         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4232         if (host_to_target_shm_info(buf, &shm_info))
4233             return -TARGET_EFAULT;
4234         break;
4235     case IPC_RMID:
4236     case SHM_LOCK:
4237     case SHM_UNLOCK:
4238         ret = get_errno(shmctl(shmid, cmd, NULL));
4239         break;
4240     }
4241 
4242     return ret;
4243 }
4244 
4245 #ifndef TARGET_FORCE_SHMLBA
4246 /* For most architectures, SHMLBA is the same as the page size;
4247  * some architectures have larger values, in which case they should
4248  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4249  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4250  * and defining its own value for SHMLBA.
4251  *
4252  * The kernel also permits SHMLBA to be set by the architecture to a
4253  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4254  * this means that addresses are rounded to the large size if
4255  * SHM_RND is set but addresses not aligned to that size are not rejected
4256  * as long as they are at least page-aligned. Since the only architecture
4257  * which uses this is ia64 this code doesn't provide for that oddity.
4258  */
4259 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4260 {
4261     return TARGET_PAGE_SIZE;
4262 }
4263 #endif
4264 
4265 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4266                                  int shmid, abi_ulong shmaddr, int shmflg)
4267 {
4268     abi_long raddr;
4269     void *host_raddr;
4270     struct shmid_ds shm_info;
4271     int i,ret;
4272     abi_ulong shmlba;
4273 
4274     /* find out the length of the shared memory segment */
4275     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4276     if (is_error(ret)) {
4277         /* can't get length, bail out */
4278         return ret;
4279     }
4280 
4281     shmlba = target_shmlba(cpu_env);
4282 
4283     if (shmaddr & (shmlba - 1)) {
4284         if (shmflg & SHM_RND) {
4285             shmaddr &= ~(shmlba - 1);
4286         } else {
4287             return -TARGET_EINVAL;
4288         }
4289     }
4290     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4291         return -TARGET_EINVAL;
4292     }
4293 
4294     mmap_lock();
4295 
4296     if (shmaddr)
4297         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4298     else {
4299         abi_ulong mmap_start;
4300 
4301         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4302         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4303 
4304         if (mmap_start == -1) {
4305             errno = ENOMEM;
4306             host_raddr = (void *)-1;
4307         } else
4308             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4309     }
4310 
4311     if (host_raddr == (void *)-1) {
4312         mmap_unlock();
4313         return get_errno((long)host_raddr);
4314     }
4315     raddr=h2g((unsigned long)host_raddr);
4316 
4317     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4318                    PAGE_VALID | PAGE_READ |
4319                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4320 
4321     for (i = 0; i < N_SHM_REGIONS; i++) {
4322         if (!shm_regions[i].in_use) {
4323             shm_regions[i].in_use = true;
4324             shm_regions[i].start = raddr;
4325             shm_regions[i].size = shm_info.shm_segsz;
4326             break;
4327         }
4328     }
4329 
4330     mmap_unlock();
4331     return raddr;
4332 
4333 }
4334 
4335 static inline abi_long do_shmdt(abi_ulong shmaddr)
4336 {
4337     int i;
4338     abi_long rv;
4339 
4340     mmap_lock();
4341 
4342     for (i = 0; i < N_SHM_REGIONS; ++i) {
4343         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4344             shm_regions[i].in_use = false;
4345             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4346             break;
4347         }
4348     }
4349     rv = get_errno(shmdt(g2h(shmaddr)));
4350 
4351     mmap_unlock();
4352 
4353     return rv;
4354 }
4355 
4356 #ifdef TARGET_NR_ipc
4357 /* ??? This only works with linear mappings.  */
4358 /* do_ipc() must return target values and target errnos. */
4359 static abi_long do_ipc(CPUArchState *cpu_env,
4360                        unsigned int call, abi_long first,
4361                        abi_long second, abi_long third,
4362                        abi_long ptr, abi_long fifth)
4363 {
4364     int version;
4365     abi_long ret = 0;
4366 
4367     version = call >> 16;
4368     call &= 0xffff;
4369 
4370     switch (call) {
4371     case IPCOP_semop:
4372         ret = do_semop(first, ptr, second);
4373         break;
4374 
4375     case IPCOP_semget:
4376         ret = get_errno(semget(first, second, third));
4377         break;
4378 
4379     case IPCOP_semctl: {
4380         /* The semun argument to semctl is passed by value, so dereference the
4381          * ptr argument. */
4382         abi_ulong atptr;
4383         get_user_ual(atptr, ptr);
4384         ret = do_semctl(first, second, third, atptr);
4385         break;
4386     }
4387 
4388     case IPCOP_msgget:
4389         ret = get_errno(msgget(first, second));
4390         break;
4391 
4392     case IPCOP_msgsnd:
4393         ret = do_msgsnd(first, ptr, second, third);
4394         break;
4395 
4396     case IPCOP_msgctl:
4397         ret = do_msgctl(first, second, ptr);
4398         break;
4399 
4400     case IPCOP_msgrcv:
4401         switch (version) {
4402         case 0:
4403             {
4404                 struct target_ipc_kludge {
4405                     abi_long msgp;
4406                     abi_long msgtyp;
4407                 } *tmp;
4408 
4409                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4410                     ret = -TARGET_EFAULT;
4411                     break;
4412                 }
4413 
4414                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4415 
4416                 unlock_user_struct(tmp, ptr, 0);
4417                 break;
4418             }
4419         default:
4420             ret = do_msgrcv(first, ptr, second, fifth, third);
4421         }
4422         break;
4423 
4424     case IPCOP_shmat:
4425         switch (version) {
4426         default:
4427         {
4428             abi_ulong raddr;
4429             raddr = do_shmat(cpu_env, first, ptr, second);
4430             if (is_error(raddr))
4431                 return get_errno(raddr);
4432             if (put_user_ual(raddr, third))
4433                 return -TARGET_EFAULT;
4434             break;
4435         }
4436         case 1:
4437             ret = -TARGET_EINVAL;
4438             break;
4439         }
4440 	break;
4441     case IPCOP_shmdt:
4442         ret = do_shmdt(ptr);
4443 	break;
4444 
4445     case IPCOP_shmget:
4446 	/* IPC_* flag values are the same on all linux platforms */
4447 	ret = get_errno(shmget(first, second, third));
4448 	break;
4449 
4450 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4451     case IPCOP_shmctl:
4452         ret = do_shmctl(first, second, ptr);
4453         break;
4454     default:
4455         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4456                       call, version);
4457 	ret = -TARGET_ENOSYS;
4458 	break;
4459     }
4460     return ret;
4461 }
4462 #endif
4463 
4464 /* kernel structure types definitions */
4465 
4466 #define STRUCT(name, ...) STRUCT_ ## name,
4467 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4468 enum {
4469 #include "syscall_types.h"
4470 STRUCT_MAX
4471 };
4472 #undef STRUCT
4473 #undef STRUCT_SPECIAL
4474 
4475 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4476 #define STRUCT_SPECIAL(name)
4477 #include "syscall_types.h"
4478 #undef STRUCT
4479 #undef STRUCT_SPECIAL
4480 
4481 typedef struct IOCTLEntry IOCTLEntry;
4482 
4483 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4484                              int fd, int cmd, abi_long arg);
4485 
4486 struct IOCTLEntry {
4487     int target_cmd;
4488     unsigned int host_cmd;
4489     const char *name;
4490     int access;
4491     do_ioctl_fn *do_ioctl;
4492     const argtype arg_type[5];
4493 };
4494 
4495 #define IOC_R 0x0001
4496 #define IOC_W 0x0002
4497 #define IOC_RW (IOC_R | IOC_W)
4498 
4499 #define MAX_STRUCT_SIZE 4096
4500 
4501 #ifdef CONFIG_FIEMAP
4502 /* So fiemap access checks don't overflow on 32 bit systems.
4503  * This is very slightly smaller than the limit imposed by
4504  * the underlying kernel.
4505  */
4506 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4507                             / sizeof(struct fiemap_extent))
4508 
4509 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4510                                        int fd, int cmd, abi_long arg)
4511 {
4512     /* The parameter for this ioctl is a struct fiemap followed
4513      * by an array of struct fiemap_extent whose size is set
4514      * in fiemap->fm_extent_count. The array is filled in by the
4515      * ioctl.
4516      */
4517     int target_size_in, target_size_out;
4518     struct fiemap *fm;
4519     const argtype *arg_type = ie->arg_type;
4520     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4521     void *argptr, *p;
4522     abi_long ret;
4523     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4524     uint32_t outbufsz;
4525     int free_fm = 0;
4526 
4527     assert(arg_type[0] == TYPE_PTR);
4528     assert(ie->access == IOC_RW);
4529     arg_type++;
4530     target_size_in = thunk_type_size(arg_type, 0);
4531     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4532     if (!argptr) {
4533         return -TARGET_EFAULT;
4534     }
4535     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4536     unlock_user(argptr, arg, 0);
4537     fm = (struct fiemap *)buf_temp;
4538     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4539         return -TARGET_EINVAL;
4540     }
4541 
4542     outbufsz = sizeof (*fm) +
4543         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4544 
4545     if (outbufsz > MAX_STRUCT_SIZE) {
4546         /* We can't fit all the extents into the fixed size buffer.
4547          * Allocate one that is large enough and use it instead.
4548          */
4549         fm = g_try_malloc(outbufsz);
4550         if (!fm) {
4551             return -TARGET_ENOMEM;
4552         }
4553         memcpy(fm, buf_temp, sizeof(struct fiemap));
4554         free_fm = 1;
4555     }
4556     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4557     if (!is_error(ret)) {
4558         target_size_out = target_size_in;
4559         /* An extent_count of 0 means we were only counting the extents
4560          * so there are no structs to copy
4561          */
4562         if (fm->fm_extent_count != 0) {
4563             target_size_out += fm->fm_mapped_extents * extent_size;
4564         }
4565         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4566         if (!argptr) {
4567             ret = -TARGET_EFAULT;
4568         } else {
4569             /* Convert the struct fiemap */
4570             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4571             if (fm->fm_extent_count != 0) {
4572                 p = argptr + target_size_in;
4573                 /* ...and then all the struct fiemap_extents */
4574                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4575                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4576                                   THUNK_TARGET);
4577                     p += extent_size;
4578                 }
4579             }
4580             unlock_user(argptr, arg, target_size_out);
4581         }
4582     }
4583     if (free_fm) {
4584         g_free(fm);
4585     }
4586     return ret;
4587 }
4588 #endif
4589 
4590 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4591                                 int fd, int cmd, abi_long arg)
4592 {
4593     const argtype *arg_type = ie->arg_type;
4594     int target_size;
4595     void *argptr;
4596     int ret;
4597     struct ifconf *host_ifconf;
4598     uint32_t outbufsz;
4599     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4600     int target_ifreq_size;
4601     int nb_ifreq;
4602     int free_buf = 0;
4603     int i;
4604     int target_ifc_len;
4605     abi_long target_ifc_buf;
4606     int host_ifc_len;
4607     char *host_ifc_buf;
4608 
4609     assert(arg_type[0] == TYPE_PTR);
4610     assert(ie->access == IOC_RW);
4611 
4612     arg_type++;
4613     target_size = thunk_type_size(arg_type, 0);
4614 
4615     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4616     if (!argptr)
4617         return -TARGET_EFAULT;
4618     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4619     unlock_user(argptr, arg, 0);
4620 
4621     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4622     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4623     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4624 
4625     if (target_ifc_buf != 0) {
4626         target_ifc_len = host_ifconf->ifc_len;
4627         nb_ifreq = target_ifc_len / target_ifreq_size;
4628         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4629 
4630         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4631         if (outbufsz > MAX_STRUCT_SIZE) {
4632             /*
4633              * We can't fit all the extents into the fixed size buffer.
4634              * Allocate one that is large enough and use it instead.
4635              */
4636             host_ifconf = malloc(outbufsz);
4637             if (!host_ifconf) {
4638                 return -TARGET_ENOMEM;
4639             }
4640             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4641             free_buf = 1;
4642         }
4643         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4644 
4645         host_ifconf->ifc_len = host_ifc_len;
4646     } else {
4647       host_ifc_buf = NULL;
4648     }
4649     host_ifconf->ifc_buf = host_ifc_buf;
4650 
4651     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4652     if (!is_error(ret)) {
4653 	/* convert host ifc_len to target ifc_len */
4654 
4655         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4656         target_ifc_len = nb_ifreq * target_ifreq_size;
4657         host_ifconf->ifc_len = target_ifc_len;
4658 
4659 	/* restore target ifc_buf */
4660 
4661         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4662 
4663 	/* copy struct ifconf to target user */
4664 
4665         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4666         if (!argptr)
4667             return -TARGET_EFAULT;
4668         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4669         unlock_user(argptr, arg, target_size);
4670 
4671         if (target_ifc_buf != 0) {
4672             /* copy ifreq[] to target user */
4673             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4674             for (i = 0; i < nb_ifreq ; i++) {
4675                 thunk_convert(argptr + i * target_ifreq_size,
4676                               host_ifc_buf + i * sizeof(struct ifreq),
4677                               ifreq_arg_type, THUNK_TARGET);
4678             }
4679             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4680         }
4681     }
4682 
4683     if (free_buf) {
4684         free(host_ifconf);
4685     }
4686 
4687     return ret;
4688 }
4689 
4690 #if defined(CONFIG_USBFS)
4691 #if HOST_LONG_BITS > 64
4692 #error USBDEVFS thunks do not support >64 bit hosts yet.
4693 #endif
4694 struct live_urb {
4695     uint64_t target_urb_adr;
4696     uint64_t target_buf_adr;
4697     char *target_buf_ptr;
4698     struct usbdevfs_urb host_urb;
4699 };
4700 
4701 static GHashTable *usbdevfs_urb_hashtable(void)
4702 {
4703     static GHashTable *urb_hashtable;
4704 
4705     if (!urb_hashtable) {
4706         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4707     }
4708     return urb_hashtable;
4709 }
4710 
4711 static void urb_hashtable_insert(struct live_urb *urb)
4712 {
4713     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4714     g_hash_table_insert(urb_hashtable, urb, urb);
4715 }
4716 
4717 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4718 {
4719     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4720     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4721 }
4722 
4723 static void urb_hashtable_remove(struct live_urb *urb)
4724 {
4725     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4726     g_hash_table_remove(urb_hashtable, urb);
4727 }
4728 
4729 static abi_long
4730 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4731                           int fd, int cmd, abi_long arg)
4732 {
4733     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4734     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4735     struct live_urb *lurb;
4736     void *argptr;
4737     uint64_t hurb;
4738     int target_size;
4739     uintptr_t target_urb_adr;
4740     abi_long ret;
4741 
4742     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4743 
4744     memset(buf_temp, 0, sizeof(uint64_t));
4745     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4746     if (is_error(ret)) {
4747         return ret;
4748     }
4749 
4750     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4751     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4752     if (!lurb->target_urb_adr) {
4753         return -TARGET_EFAULT;
4754     }
4755     urb_hashtable_remove(lurb);
4756     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4757         lurb->host_urb.buffer_length);
4758     lurb->target_buf_ptr = NULL;
4759 
4760     /* restore the guest buffer pointer */
4761     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4762 
4763     /* update the guest urb struct */
4764     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4765     if (!argptr) {
4766         g_free(lurb);
4767         return -TARGET_EFAULT;
4768     }
4769     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4770     unlock_user(argptr, lurb->target_urb_adr, target_size);
4771 
4772     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4773     /* write back the urb handle */
4774     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4775     if (!argptr) {
4776         g_free(lurb);
4777         return -TARGET_EFAULT;
4778     }
4779 
4780     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4781     target_urb_adr = lurb->target_urb_adr;
4782     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4783     unlock_user(argptr, arg, target_size);
4784 
4785     g_free(lurb);
4786     return ret;
4787 }
4788 
4789 static abi_long
4790 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4791                              uint8_t *buf_temp __attribute__((unused)),
4792                              int fd, int cmd, abi_long arg)
4793 {
4794     struct live_urb *lurb;
4795 
4796     /* map target address back to host URB with metadata. */
4797     lurb = urb_hashtable_lookup(arg);
4798     if (!lurb) {
4799         return -TARGET_EFAULT;
4800     }
4801     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4802 }
4803 
4804 static abi_long
4805 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4806                             int fd, int cmd, abi_long arg)
4807 {
4808     const argtype *arg_type = ie->arg_type;
4809     int target_size;
4810     abi_long ret;
4811     void *argptr;
4812     int rw_dir;
4813     struct live_urb *lurb;
4814 
4815     /*
4816      * each submitted URB needs to map to a unique ID for the
4817      * kernel, and that unique ID needs to be a pointer to
4818      * host memory.  hence, we need to malloc for each URB.
4819      * isochronous transfers have a variable length struct.
4820      */
4821     arg_type++;
4822     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4823 
4824     /* construct host copy of urb and metadata */
4825     lurb = g_try_malloc0(sizeof(struct live_urb));
4826     if (!lurb) {
4827         return -TARGET_ENOMEM;
4828     }
4829 
4830     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4831     if (!argptr) {
4832         g_free(lurb);
4833         return -TARGET_EFAULT;
4834     }
4835     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4836     unlock_user(argptr, arg, 0);
4837 
4838     lurb->target_urb_adr = arg;
4839     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4840 
4841     /* buffer space used depends on endpoint type so lock the entire buffer */
4842     /* control type urbs should check the buffer contents for true direction */
4843     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4844     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4845         lurb->host_urb.buffer_length, 1);
4846     if (lurb->target_buf_ptr == NULL) {
4847         g_free(lurb);
4848         return -TARGET_EFAULT;
4849     }
4850 
4851     /* update buffer pointer in host copy */
4852     lurb->host_urb.buffer = lurb->target_buf_ptr;
4853 
4854     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4855     if (is_error(ret)) {
4856         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4857         g_free(lurb);
4858     } else {
4859         urb_hashtable_insert(lurb);
4860     }
4861 
4862     return ret;
4863 }
4864 #endif /* CONFIG_USBFS */
4865 
4866 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4867                             int cmd, abi_long arg)
4868 {
4869     void *argptr;
4870     struct dm_ioctl *host_dm;
4871     abi_long guest_data;
4872     uint32_t guest_data_size;
4873     int target_size;
4874     const argtype *arg_type = ie->arg_type;
4875     abi_long ret;
4876     void *big_buf = NULL;
4877     char *host_data;
4878 
4879     arg_type++;
4880     target_size = thunk_type_size(arg_type, 0);
4881     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4882     if (!argptr) {
4883         ret = -TARGET_EFAULT;
4884         goto out;
4885     }
4886     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4887     unlock_user(argptr, arg, 0);
4888 
4889     /* buf_temp is too small, so fetch things into a bigger buffer */
4890     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4891     memcpy(big_buf, buf_temp, target_size);
4892     buf_temp = big_buf;
4893     host_dm = big_buf;
4894 
4895     guest_data = arg + host_dm->data_start;
4896     if ((guest_data - arg) < 0) {
4897         ret = -TARGET_EINVAL;
4898         goto out;
4899     }
4900     guest_data_size = host_dm->data_size - host_dm->data_start;
4901     host_data = (char*)host_dm + host_dm->data_start;
4902 
4903     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4904     if (!argptr) {
4905         ret = -TARGET_EFAULT;
4906         goto out;
4907     }
4908 
4909     switch (ie->host_cmd) {
4910     case DM_REMOVE_ALL:
4911     case DM_LIST_DEVICES:
4912     case DM_DEV_CREATE:
4913     case DM_DEV_REMOVE:
4914     case DM_DEV_SUSPEND:
4915     case DM_DEV_STATUS:
4916     case DM_DEV_WAIT:
4917     case DM_TABLE_STATUS:
4918     case DM_TABLE_CLEAR:
4919     case DM_TABLE_DEPS:
4920     case DM_LIST_VERSIONS:
4921         /* no input data */
4922         break;
4923     case DM_DEV_RENAME:
4924     case DM_DEV_SET_GEOMETRY:
4925         /* data contains only strings */
4926         memcpy(host_data, argptr, guest_data_size);
4927         break;
4928     case DM_TARGET_MSG:
4929         memcpy(host_data, argptr, guest_data_size);
4930         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4931         break;
4932     case DM_TABLE_LOAD:
4933     {
4934         void *gspec = argptr;
4935         void *cur_data = host_data;
4936         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4937         int spec_size = thunk_type_size(arg_type, 0);
4938         int i;
4939 
4940         for (i = 0; i < host_dm->target_count; i++) {
4941             struct dm_target_spec *spec = cur_data;
4942             uint32_t next;
4943             int slen;
4944 
4945             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4946             slen = strlen((char*)gspec + spec_size) + 1;
4947             next = spec->next;
4948             spec->next = sizeof(*spec) + slen;
4949             strcpy((char*)&spec[1], gspec + spec_size);
4950             gspec += next;
4951             cur_data += spec->next;
4952         }
4953         break;
4954     }
4955     default:
4956         ret = -TARGET_EINVAL;
4957         unlock_user(argptr, guest_data, 0);
4958         goto out;
4959     }
4960     unlock_user(argptr, guest_data, 0);
4961 
4962     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4963     if (!is_error(ret)) {
4964         guest_data = arg + host_dm->data_start;
4965         guest_data_size = host_dm->data_size - host_dm->data_start;
4966         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4967         switch (ie->host_cmd) {
4968         case DM_REMOVE_ALL:
4969         case DM_DEV_CREATE:
4970         case DM_DEV_REMOVE:
4971         case DM_DEV_RENAME:
4972         case DM_DEV_SUSPEND:
4973         case DM_DEV_STATUS:
4974         case DM_TABLE_LOAD:
4975         case DM_TABLE_CLEAR:
4976         case DM_TARGET_MSG:
4977         case DM_DEV_SET_GEOMETRY:
4978             /* no return data */
4979             break;
4980         case DM_LIST_DEVICES:
4981         {
4982             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4983             uint32_t remaining_data = guest_data_size;
4984             void *cur_data = argptr;
4985             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4986             int nl_size = 12; /* can't use thunk_size due to alignment */
4987 
4988             while (1) {
4989                 uint32_t next = nl->next;
4990                 if (next) {
4991                     nl->next = nl_size + (strlen(nl->name) + 1);
4992                 }
4993                 if (remaining_data < nl->next) {
4994                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4995                     break;
4996                 }
4997                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4998                 strcpy(cur_data + nl_size, nl->name);
4999                 cur_data += nl->next;
5000                 remaining_data -= nl->next;
5001                 if (!next) {
5002                     break;
5003                 }
5004                 nl = (void*)nl + next;
5005             }
5006             break;
5007         }
5008         case DM_DEV_WAIT:
5009         case DM_TABLE_STATUS:
5010         {
5011             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5012             void *cur_data = argptr;
5013             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5014             int spec_size = thunk_type_size(arg_type, 0);
5015             int i;
5016 
5017             for (i = 0; i < host_dm->target_count; i++) {
5018                 uint32_t next = spec->next;
5019                 int slen = strlen((char*)&spec[1]) + 1;
5020                 spec->next = (cur_data - argptr) + spec_size + slen;
5021                 if (guest_data_size < spec->next) {
5022                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5023                     break;
5024                 }
5025                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5026                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5027                 cur_data = argptr + spec->next;
5028                 spec = (void*)host_dm + host_dm->data_start + next;
5029             }
5030             break;
5031         }
5032         case DM_TABLE_DEPS:
5033         {
5034             void *hdata = (void*)host_dm + host_dm->data_start;
5035             int count = *(uint32_t*)hdata;
5036             uint64_t *hdev = hdata + 8;
5037             uint64_t *gdev = argptr + 8;
5038             int i;
5039 
5040             *(uint32_t*)argptr = tswap32(count);
5041             for (i = 0; i < count; i++) {
5042                 *gdev = tswap64(*hdev);
5043                 gdev++;
5044                 hdev++;
5045             }
5046             break;
5047         }
5048         case DM_LIST_VERSIONS:
5049         {
5050             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5051             uint32_t remaining_data = guest_data_size;
5052             void *cur_data = argptr;
5053             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5054             int vers_size = thunk_type_size(arg_type, 0);
5055 
5056             while (1) {
5057                 uint32_t next = vers->next;
5058                 if (next) {
5059                     vers->next = vers_size + (strlen(vers->name) + 1);
5060                 }
5061                 if (remaining_data < vers->next) {
5062                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5063                     break;
5064                 }
5065                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5066                 strcpy(cur_data + vers_size, vers->name);
5067                 cur_data += vers->next;
5068                 remaining_data -= vers->next;
5069                 if (!next) {
5070                     break;
5071                 }
5072                 vers = (void*)vers + next;
5073             }
5074             break;
5075         }
5076         default:
5077             unlock_user(argptr, guest_data, 0);
5078             ret = -TARGET_EINVAL;
5079             goto out;
5080         }
5081         unlock_user(argptr, guest_data, guest_data_size);
5082 
5083         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5084         if (!argptr) {
5085             ret = -TARGET_EFAULT;
5086             goto out;
5087         }
5088         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5089         unlock_user(argptr, arg, target_size);
5090     }
5091 out:
5092     g_free(big_buf);
5093     return ret;
5094 }
5095 
5096 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5097                                int cmd, abi_long arg)
5098 {
5099     void *argptr;
5100     int target_size;
5101     const argtype *arg_type = ie->arg_type;
5102     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5103     abi_long ret;
5104 
5105     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5106     struct blkpg_partition host_part;
5107 
5108     /* Read and convert blkpg */
5109     arg_type++;
5110     target_size = thunk_type_size(arg_type, 0);
5111     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5112     if (!argptr) {
5113         ret = -TARGET_EFAULT;
5114         goto out;
5115     }
5116     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5117     unlock_user(argptr, arg, 0);
5118 
5119     switch (host_blkpg->op) {
5120     case BLKPG_ADD_PARTITION:
5121     case BLKPG_DEL_PARTITION:
5122         /* payload is struct blkpg_partition */
5123         break;
5124     default:
5125         /* Unknown opcode */
5126         ret = -TARGET_EINVAL;
5127         goto out;
5128     }
5129 
5130     /* Read and convert blkpg->data */
5131     arg = (abi_long)(uintptr_t)host_blkpg->data;
5132     target_size = thunk_type_size(part_arg_type, 0);
5133     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5134     if (!argptr) {
5135         ret = -TARGET_EFAULT;
5136         goto out;
5137     }
5138     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5139     unlock_user(argptr, arg, 0);
5140 
5141     /* Swizzle the data pointer to our local copy and call! */
5142     host_blkpg->data = &host_part;
5143     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5144 
5145 out:
5146     return ret;
5147 }
5148 
5149 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5150                                 int fd, int cmd, abi_long arg)
5151 {
5152     const argtype *arg_type = ie->arg_type;
5153     const StructEntry *se;
5154     const argtype *field_types;
5155     const int *dst_offsets, *src_offsets;
5156     int target_size;
5157     void *argptr;
5158     abi_ulong *target_rt_dev_ptr = NULL;
5159     unsigned long *host_rt_dev_ptr = NULL;
5160     abi_long ret;
5161     int i;
5162 
5163     assert(ie->access == IOC_W);
5164     assert(*arg_type == TYPE_PTR);
5165     arg_type++;
5166     assert(*arg_type == TYPE_STRUCT);
5167     target_size = thunk_type_size(arg_type, 0);
5168     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5169     if (!argptr) {
5170         return -TARGET_EFAULT;
5171     }
5172     arg_type++;
5173     assert(*arg_type == (int)STRUCT_rtentry);
5174     se = struct_entries + *arg_type++;
5175     assert(se->convert[0] == NULL);
5176     /* convert struct here to be able to catch rt_dev string */
5177     field_types = se->field_types;
5178     dst_offsets = se->field_offsets[THUNK_HOST];
5179     src_offsets = se->field_offsets[THUNK_TARGET];
5180     for (i = 0; i < se->nb_fields; i++) {
5181         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5182             assert(*field_types == TYPE_PTRVOID);
5183             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5184             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5185             if (*target_rt_dev_ptr != 0) {
5186                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5187                                                   tswapal(*target_rt_dev_ptr));
5188                 if (!*host_rt_dev_ptr) {
5189                     unlock_user(argptr, arg, 0);
5190                     return -TARGET_EFAULT;
5191                 }
5192             } else {
5193                 *host_rt_dev_ptr = 0;
5194             }
5195             field_types++;
5196             continue;
5197         }
5198         field_types = thunk_convert(buf_temp + dst_offsets[i],
5199                                     argptr + src_offsets[i],
5200                                     field_types, THUNK_HOST);
5201     }
5202     unlock_user(argptr, arg, 0);
5203 
5204     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5205 
5206     assert(host_rt_dev_ptr != NULL);
5207     assert(target_rt_dev_ptr != NULL);
5208     if (*host_rt_dev_ptr != 0) {
5209         unlock_user((void *)*host_rt_dev_ptr,
5210                     *target_rt_dev_ptr, 0);
5211     }
5212     return ret;
5213 }
5214 
5215 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5216                                      int fd, int cmd, abi_long arg)
5217 {
5218     int sig = target_to_host_signal(arg);
5219     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5220 }
5221 
5222 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5223                                     int fd, int cmd, abi_long arg)
5224 {
5225     struct timeval tv;
5226     abi_long ret;
5227 
5228     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5229     if (is_error(ret)) {
5230         return ret;
5231     }
5232 
5233     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5234         if (copy_to_user_timeval(arg, &tv)) {
5235             return -TARGET_EFAULT;
5236         }
5237     } else {
5238         if (copy_to_user_timeval64(arg, &tv)) {
5239             return -TARGET_EFAULT;
5240         }
5241     }
5242 
5243     return ret;
5244 }
5245 
5246 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5247                                       int fd, int cmd, abi_long arg)
5248 {
5249     struct timespec ts;
5250     abi_long ret;
5251 
5252     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5253     if (is_error(ret)) {
5254         return ret;
5255     }
5256 
5257     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5258         if (host_to_target_timespec(arg, &ts)) {
5259             return -TARGET_EFAULT;
5260         }
5261     } else{
5262         if (host_to_target_timespec64(arg, &ts)) {
5263             return -TARGET_EFAULT;
5264         }
5265     }
5266 
5267     return ret;
5268 }
5269 
5270 #ifdef TIOCGPTPEER
5271 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5272                                      int fd, int cmd, abi_long arg)
5273 {
5274     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5275     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5276 }
5277 #endif
5278 
5279 static IOCTLEntry ioctl_entries[] = {
5280 #define IOCTL(cmd, access, ...) \
5281     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5282 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5283     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5284 #define IOCTL_IGNORE(cmd) \
5285     { TARGET_ ## cmd, 0, #cmd },
5286 #include "ioctls.h"
5287     { 0, 0, },
5288 };
5289 
5290 /* ??? Implement proper locking for ioctls.  */
5291 /* do_ioctl() Must return target values and target errnos. */
5292 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5293 {
5294     const IOCTLEntry *ie;
5295     const argtype *arg_type;
5296     abi_long ret;
5297     uint8_t buf_temp[MAX_STRUCT_SIZE];
5298     int target_size;
5299     void *argptr;
5300 
5301     ie = ioctl_entries;
5302     for(;;) {
5303         if (ie->target_cmd == 0) {
5304             qemu_log_mask(
5305                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5306             return -TARGET_ENOSYS;
5307         }
5308         if (ie->target_cmd == cmd)
5309             break;
5310         ie++;
5311     }
5312     arg_type = ie->arg_type;
5313     if (ie->do_ioctl) {
5314         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5315     } else if (!ie->host_cmd) {
5316         /* Some architectures define BSD ioctls in their headers
5317            that are not implemented in Linux.  */
5318         return -TARGET_ENOSYS;
5319     }
5320 
5321     switch(arg_type[0]) {
5322     case TYPE_NULL:
5323         /* no argument */
5324         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5325         break;
5326     case TYPE_PTRVOID:
5327     case TYPE_INT:
5328     case TYPE_LONG:
5329     case TYPE_ULONG:
5330         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5331         break;
5332     case TYPE_PTR:
5333         arg_type++;
5334         target_size = thunk_type_size(arg_type, 0);
5335         switch(ie->access) {
5336         case IOC_R:
5337             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5338             if (!is_error(ret)) {
5339                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5340                 if (!argptr)
5341                     return -TARGET_EFAULT;
5342                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5343                 unlock_user(argptr, arg, target_size);
5344             }
5345             break;
5346         case IOC_W:
5347             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5348             if (!argptr)
5349                 return -TARGET_EFAULT;
5350             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5351             unlock_user(argptr, arg, 0);
5352             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5353             break;
5354         default:
5355         case IOC_RW:
5356             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5357             if (!argptr)
5358                 return -TARGET_EFAULT;
5359             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5360             unlock_user(argptr, arg, 0);
5361             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5362             if (!is_error(ret)) {
5363                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5364                 if (!argptr)
5365                     return -TARGET_EFAULT;
5366                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5367                 unlock_user(argptr, arg, target_size);
5368             }
5369             break;
5370         }
5371         break;
5372     default:
5373         qemu_log_mask(LOG_UNIMP,
5374                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5375                       (long)cmd, arg_type[0]);
5376         ret = -TARGET_ENOSYS;
5377         break;
5378     }
5379     return ret;
5380 }
5381 
5382 static const bitmask_transtbl iflag_tbl[] = {
5383         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5384         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5385         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5386         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5387         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5388         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5389         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5390         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5391         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5392         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5393         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5394         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5395         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5396         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5397         { 0, 0, 0, 0 }
5398 };
5399 
5400 static const bitmask_transtbl oflag_tbl[] = {
5401 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5402 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5403 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5404 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5405 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5406 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5407 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5408 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5409 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5410 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5411 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5412 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5413 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5414 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5415 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5416 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5417 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5418 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5419 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5420 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5421 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5422 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5423 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5424 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5425 	{ 0, 0, 0, 0 }
5426 };
5427 
5428 static const bitmask_transtbl cflag_tbl[] = {
5429 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5430 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5431 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5432 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5433 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5434 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5435 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5436 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5437 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5438 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5439 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5440 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5441 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5442 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5443 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5444 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5445 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5446 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5447 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5448 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5449 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5450 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5451 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5452 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5453 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5454 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5455 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5456 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5457 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5458 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5459 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5460 	{ 0, 0, 0, 0 }
5461 };
5462 
5463 static const bitmask_transtbl lflag_tbl[] = {
5464 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5465 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5466 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5467 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5468 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5469 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5470 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5471 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5472 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5473 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5474 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5475 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5476 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5477 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5478 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5479 	{ 0, 0, 0, 0 }
5480 };
5481 
5482 static void target_to_host_termios (void *dst, const void *src)
5483 {
5484     struct host_termios *host = dst;
5485     const struct target_termios *target = src;
5486 
5487     host->c_iflag =
5488         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5489     host->c_oflag =
5490         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5491     host->c_cflag =
5492         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5493     host->c_lflag =
5494         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5495     host->c_line = target->c_line;
5496 
5497     memset(host->c_cc, 0, sizeof(host->c_cc));
5498     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5499     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5500     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5501     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5502     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5503     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5504     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5505     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5506     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5507     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5508     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5509     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5510     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5511     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5512     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5513     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5514     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5515 }
5516 
5517 static void host_to_target_termios (void *dst, const void *src)
5518 {
5519     struct target_termios *target = dst;
5520     const struct host_termios *host = src;
5521 
5522     target->c_iflag =
5523         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5524     target->c_oflag =
5525         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5526     target->c_cflag =
5527         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5528     target->c_lflag =
5529         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5530     target->c_line = host->c_line;
5531 
5532     memset(target->c_cc, 0, sizeof(target->c_cc));
5533     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5534     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5535     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5536     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5537     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5538     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5539     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5540     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5541     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5542     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5543     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5544     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5545     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5546     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5547     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5548     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5549     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5550 }
5551 
5552 static const StructEntry struct_termios_def = {
5553     .convert = { host_to_target_termios, target_to_host_termios },
5554     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5555     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5556 };
5557 
5558 static bitmask_transtbl mmap_flags_tbl[] = {
5559     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5560     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5561     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5562     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5563       MAP_ANONYMOUS, MAP_ANONYMOUS },
5564     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5565       MAP_GROWSDOWN, MAP_GROWSDOWN },
5566     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5567       MAP_DENYWRITE, MAP_DENYWRITE },
5568     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5569       MAP_EXECUTABLE, MAP_EXECUTABLE },
5570     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5571     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5572       MAP_NORESERVE, MAP_NORESERVE },
5573     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5574     /* MAP_STACK had been ignored by the kernel for quite some time.
5575        Recognize it for the target insofar as we do not want to pass
5576        it through to the host.  */
5577     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5578     { 0, 0, 0, 0 }
5579 };
5580 
5581 /*
5582  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5583  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5584  */
5585 #if defined(TARGET_I386)
5586 
5587 /* NOTE: there is really one LDT for all the threads */
5588 static uint8_t *ldt_table;
5589 
5590 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5591 {
5592     int size;
5593     void *p;
5594 
5595     if (!ldt_table)
5596         return 0;
5597     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5598     if (size > bytecount)
5599         size = bytecount;
5600     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5601     if (!p)
5602         return -TARGET_EFAULT;
5603     /* ??? Should this by byteswapped?  */
5604     memcpy(p, ldt_table, size);
5605     unlock_user(p, ptr, size);
5606     return size;
5607 }
5608 
5609 /* XXX: add locking support */
5610 static abi_long write_ldt(CPUX86State *env,
5611                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5612 {
5613     struct target_modify_ldt_ldt_s ldt_info;
5614     struct target_modify_ldt_ldt_s *target_ldt_info;
5615     int seg_32bit, contents, read_exec_only, limit_in_pages;
5616     int seg_not_present, useable, lm;
5617     uint32_t *lp, entry_1, entry_2;
5618 
5619     if (bytecount != sizeof(ldt_info))
5620         return -TARGET_EINVAL;
5621     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5622         return -TARGET_EFAULT;
5623     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5624     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5625     ldt_info.limit = tswap32(target_ldt_info->limit);
5626     ldt_info.flags = tswap32(target_ldt_info->flags);
5627     unlock_user_struct(target_ldt_info, ptr, 0);
5628 
5629     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5630         return -TARGET_EINVAL;
5631     seg_32bit = ldt_info.flags & 1;
5632     contents = (ldt_info.flags >> 1) & 3;
5633     read_exec_only = (ldt_info.flags >> 3) & 1;
5634     limit_in_pages = (ldt_info.flags >> 4) & 1;
5635     seg_not_present = (ldt_info.flags >> 5) & 1;
5636     useable = (ldt_info.flags >> 6) & 1;
5637 #ifdef TARGET_ABI32
5638     lm = 0;
5639 #else
5640     lm = (ldt_info.flags >> 7) & 1;
5641 #endif
5642     if (contents == 3) {
5643         if (oldmode)
5644             return -TARGET_EINVAL;
5645         if (seg_not_present == 0)
5646             return -TARGET_EINVAL;
5647     }
5648     /* allocate the LDT */
5649     if (!ldt_table) {
5650         env->ldt.base = target_mmap(0,
5651                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5652                                     PROT_READ|PROT_WRITE,
5653                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5654         if (env->ldt.base == -1)
5655             return -TARGET_ENOMEM;
5656         memset(g2h(env->ldt.base), 0,
5657                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5658         env->ldt.limit = 0xffff;
5659         ldt_table = g2h(env->ldt.base);
5660     }
5661 
5662     /* NOTE: same code as Linux kernel */
5663     /* Allow LDTs to be cleared by the user. */
5664     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5665         if (oldmode ||
5666             (contents == 0		&&
5667              read_exec_only == 1	&&
5668              seg_32bit == 0		&&
5669              limit_in_pages == 0	&&
5670              seg_not_present == 1	&&
5671              useable == 0 )) {
5672             entry_1 = 0;
5673             entry_2 = 0;
5674             goto install;
5675         }
5676     }
5677 
5678     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5679         (ldt_info.limit & 0x0ffff);
5680     entry_2 = (ldt_info.base_addr & 0xff000000) |
5681         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5682         (ldt_info.limit & 0xf0000) |
5683         ((read_exec_only ^ 1) << 9) |
5684         (contents << 10) |
5685         ((seg_not_present ^ 1) << 15) |
5686         (seg_32bit << 22) |
5687         (limit_in_pages << 23) |
5688         (lm << 21) |
5689         0x7000;
5690     if (!oldmode)
5691         entry_2 |= (useable << 20);
5692 
5693     /* Install the new entry ...  */
5694 install:
5695     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5696     lp[0] = tswap32(entry_1);
5697     lp[1] = tswap32(entry_2);
5698     return 0;
5699 }
5700 
5701 /* specific and weird i386 syscalls */
5702 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5703                               unsigned long bytecount)
5704 {
5705     abi_long ret;
5706 
5707     switch (func) {
5708     case 0:
5709         ret = read_ldt(ptr, bytecount);
5710         break;
5711     case 1:
5712         ret = write_ldt(env, ptr, bytecount, 1);
5713         break;
5714     case 0x11:
5715         ret = write_ldt(env, ptr, bytecount, 0);
5716         break;
5717     default:
5718         ret = -TARGET_ENOSYS;
5719         break;
5720     }
5721     return ret;
5722 }
5723 
5724 #if defined(TARGET_ABI32)
5725 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5726 {
5727     uint64_t *gdt_table = g2h(env->gdt.base);
5728     struct target_modify_ldt_ldt_s ldt_info;
5729     struct target_modify_ldt_ldt_s *target_ldt_info;
5730     int seg_32bit, contents, read_exec_only, limit_in_pages;
5731     int seg_not_present, useable, lm;
5732     uint32_t *lp, entry_1, entry_2;
5733     int i;
5734 
5735     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5736     if (!target_ldt_info)
5737         return -TARGET_EFAULT;
5738     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5739     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5740     ldt_info.limit = tswap32(target_ldt_info->limit);
5741     ldt_info.flags = tswap32(target_ldt_info->flags);
5742     if (ldt_info.entry_number == -1) {
5743         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5744             if (gdt_table[i] == 0) {
5745                 ldt_info.entry_number = i;
5746                 target_ldt_info->entry_number = tswap32(i);
5747                 break;
5748             }
5749         }
5750     }
5751     unlock_user_struct(target_ldt_info, ptr, 1);
5752 
5753     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5754         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5755            return -TARGET_EINVAL;
5756     seg_32bit = ldt_info.flags & 1;
5757     contents = (ldt_info.flags >> 1) & 3;
5758     read_exec_only = (ldt_info.flags >> 3) & 1;
5759     limit_in_pages = (ldt_info.flags >> 4) & 1;
5760     seg_not_present = (ldt_info.flags >> 5) & 1;
5761     useable = (ldt_info.flags >> 6) & 1;
5762 #ifdef TARGET_ABI32
5763     lm = 0;
5764 #else
5765     lm = (ldt_info.flags >> 7) & 1;
5766 #endif
5767 
5768     if (contents == 3) {
5769         if (seg_not_present == 0)
5770             return -TARGET_EINVAL;
5771     }
5772 
5773     /* NOTE: same code as Linux kernel */
5774     /* Allow LDTs to be cleared by the user. */
5775     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5776         if ((contents == 0             &&
5777              read_exec_only == 1       &&
5778              seg_32bit == 0            &&
5779              limit_in_pages == 0       &&
5780              seg_not_present == 1      &&
5781              useable == 0 )) {
5782             entry_1 = 0;
5783             entry_2 = 0;
5784             goto install;
5785         }
5786     }
5787 
5788     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5789         (ldt_info.limit & 0x0ffff);
5790     entry_2 = (ldt_info.base_addr & 0xff000000) |
5791         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5792         (ldt_info.limit & 0xf0000) |
5793         ((read_exec_only ^ 1) << 9) |
5794         (contents << 10) |
5795         ((seg_not_present ^ 1) << 15) |
5796         (seg_32bit << 22) |
5797         (limit_in_pages << 23) |
5798         (useable << 20) |
5799         (lm << 21) |
5800         0x7000;
5801 
5802     /* Install the new entry ...  */
5803 install:
5804     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5805     lp[0] = tswap32(entry_1);
5806     lp[1] = tswap32(entry_2);
5807     return 0;
5808 }
5809 
5810 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5811 {
5812     struct target_modify_ldt_ldt_s *target_ldt_info;
5813     uint64_t *gdt_table = g2h(env->gdt.base);
5814     uint32_t base_addr, limit, flags;
5815     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5816     int seg_not_present, useable, lm;
5817     uint32_t *lp, entry_1, entry_2;
5818 
5819     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5820     if (!target_ldt_info)
5821         return -TARGET_EFAULT;
5822     idx = tswap32(target_ldt_info->entry_number);
5823     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5824         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5825         unlock_user_struct(target_ldt_info, ptr, 1);
5826         return -TARGET_EINVAL;
5827     }
5828     lp = (uint32_t *)(gdt_table + idx);
5829     entry_1 = tswap32(lp[0]);
5830     entry_2 = tswap32(lp[1]);
5831 
5832     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5833     contents = (entry_2 >> 10) & 3;
5834     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5835     seg_32bit = (entry_2 >> 22) & 1;
5836     limit_in_pages = (entry_2 >> 23) & 1;
5837     useable = (entry_2 >> 20) & 1;
5838 #ifdef TARGET_ABI32
5839     lm = 0;
5840 #else
5841     lm = (entry_2 >> 21) & 1;
5842 #endif
5843     flags = (seg_32bit << 0) | (contents << 1) |
5844         (read_exec_only << 3) | (limit_in_pages << 4) |
5845         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5846     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5847     base_addr = (entry_1 >> 16) |
5848         (entry_2 & 0xff000000) |
5849         ((entry_2 & 0xff) << 16);
5850     target_ldt_info->base_addr = tswapal(base_addr);
5851     target_ldt_info->limit = tswap32(limit);
5852     target_ldt_info->flags = tswap32(flags);
5853     unlock_user_struct(target_ldt_info, ptr, 1);
5854     return 0;
5855 }
5856 
5857 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5858 {
5859     return -TARGET_ENOSYS;
5860 }
5861 #else
5862 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5863 {
5864     abi_long ret = 0;
5865     abi_ulong val;
5866     int idx;
5867 
5868     switch(code) {
5869     case TARGET_ARCH_SET_GS:
5870     case TARGET_ARCH_SET_FS:
5871         if (code == TARGET_ARCH_SET_GS)
5872             idx = R_GS;
5873         else
5874             idx = R_FS;
5875         cpu_x86_load_seg(env, idx, 0);
5876         env->segs[idx].base = addr;
5877         break;
5878     case TARGET_ARCH_GET_GS:
5879     case TARGET_ARCH_GET_FS:
5880         if (code == TARGET_ARCH_GET_GS)
5881             idx = R_GS;
5882         else
5883             idx = R_FS;
5884         val = env->segs[idx].base;
5885         if (put_user(val, addr, abi_ulong))
5886             ret = -TARGET_EFAULT;
5887         break;
5888     default:
5889         ret = -TARGET_EINVAL;
5890         break;
5891     }
5892     return ret;
5893 }
5894 #endif /* defined(TARGET_ABI32 */
5895 
5896 #endif /* defined(TARGET_I386) */
5897 
5898 #define NEW_STACK_SIZE 0x40000
5899 
5900 
5901 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5902 typedef struct {
5903     CPUArchState *env;
5904     pthread_mutex_t mutex;
5905     pthread_cond_t cond;
5906     pthread_t thread;
5907     uint32_t tid;
5908     abi_ulong child_tidptr;
5909     abi_ulong parent_tidptr;
5910     sigset_t sigmask;
5911 } new_thread_info;
5912 
5913 static void *clone_func(void *arg)
5914 {
5915     new_thread_info *info = arg;
5916     CPUArchState *env;
5917     CPUState *cpu;
5918     TaskState *ts;
5919 
5920     rcu_register_thread();
5921     tcg_register_thread();
5922     env = info->env;
5923     cpu = env_cpu(env);
5924     thread_cpu = cpu;
5925     ts = (TaskState *)cpu->opaque;
5926     info->tid = sys_gettid();
5927     task_settid(ts);
5928     if (info->child_tidptr)
5929         put_user_u32(info->tid, info->child_tidptr);
5930     if (info->parent_tidptr)
5931         put_user_u32(info->tid, info->parent_tidptr);
5932     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5933     /* Enable signals.  */
5934     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5935     /* Signal to the parent that we're ready.  */
5936     pthread_mutex_lock(&info->mutex);
5937     pthread_cond_broadcast(&info->cond);
5938     pthread_mutex_unlock(&info->mutex);
5939     /* Wait until the parent has finished initializing the tls state.  */
5940     pthread_mutex_lock(&clone_lock);
5941     pthread_mutex_unlock(&clone_lock);
5942     cpu_loop(env);
5943     /* never exits */
5944     return NULL;
5945 }
5946 
5947 /* do_fork() Must return host values and target errnos (unlike most
5948    do_*() functions). */
5949 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5950                    abi_ulong parent_tidptr, target_ulong newtls,
5951                    abi_ulong child_tidptr)
5952 {
5953     CPUState *cpu = env_cpu(env);
5954     int ret;
5955     TaskState *ts;
5956     CPUState *new_cpu;
5957     CPUArchState *new_env;
5958     sigset_t sigmask;
5959 
5960     flags &= ~CLONE_IGNORED_FLAGS;
5961 
5962     /* Emulate vfork() with fork() */
5963     if (flags & CLONE_VFORK)
5964         flags &= ~(CLONE_VFORK | CLONE_VM);
5965 
5966     if (flags & CLONE_VM) {
5967         TaskState *parent_ts = (TaskState *)cpu->opaque;
5968         new_thread_info info;
5969         pthread_attr_t attr;
5970 
5971         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5972             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5973             return -TARGET_EINVAL;
5974         }
5975 
5976         ts = g_new0(TaskState, 1);
5977         init_task_state(ts);
5978 
5979         /* Grab a mutex so that thread setup appears atomic.  */
5980         pthread_mutex_lock(&clone_lock);
5981 
5982         /* we create a new CPU instance. */
5983         new_env = cpu_copy(env);
5984         /* Init regs that differ from the parent.  */
5985         cpu_clone_regs_child(new_env, newsp, flags);
5986         cpu_clone_regs_parent(env, flags);
5987         new_cpu = env_cpu(new_env);
5988         new_cpu->opaque = ts;
5989         ts->bprm = parent_ts->bprm;
5990         ts->info = parent_ts->info;
5991         ts->signal_mask = parent_ts->signal_mask;
5992 
5993         if (flags & CLONE_CHILD_CLEARTID) {
5994             ts->child_tidptr = child_tidptr;
5995         }
5996 
5997         if (flags & CLONE_SETTLS) {
5998             cpu_set_tls (new_env, newtls);
5999         }
6000 
6001         memset(&info, 0, sizeof(info));
6002         pthread_mutex_init(&info.mutex, NULL);
6003         pthread_mutex_lock(&info.mutex);
6004         pthread_cond_init(&info.cond, NULL);
6005         info.env = new_env;
6006         if (flags & CLONE_CHILD_SETTID) {
6007             info.child_tidptr = child_tidptr;
6008         }
6009         if (flags & CLONE_PARENT_SETTID) {
6010             info.parent_tidptr = parent_tidptr;
6011         }
6012 
6013         ret = pthread_attr_init(&attr);
6014         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6015         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6016         /* It is not safe to deliver signals until the child has finished
6017            initializing, so temporarily block all signals.  */
6018         sigfillset(&sigmask);
6019         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6020         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6021 
6022         /* If this is our first additional thread, we need to ensure we
6023          * generate code for parallel execution and flush old translations.
6024          */
6025         if (!parallel_cpus) {
6026             parallel_cpus = true;
6027             tb_flush(cpu);
6028         }
6029 
6030         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6031         /* TODO: Free new CPU state if thread creation failed.  */
6032 
6033         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6034         pthread_attr_destroy(&attr);
6035         if (ret == 0) {
6036             /* Wait for the child to initialize.  */
6037             pthread_cond_wait(&info.cond, &info.mutex);
6038             ret = info.tid;
6039         } else {
6040             ret = -1;
6041         }
6042         pthread_mutex_unlock(&info.mutex);
6043         pthread_cond_destroy(&info.cond);
6044         pthread_mutex_destroy(&info.mutex);
6045         pthread_mutex_unlock(&clone_lock);
6046     } else {
6047         /* if no CLONE_VM, we consider it is a fork */
6048         if (flags & CLONE_INVALID_FORK_FLAGS) {
6049             return -TARGET_EINVAL;
6050         }
6051 
6052         /* We can't support custom termination signals */
6053         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6054             return -TARGET_EINVAL;
6055         }
6056 
6057         if (block_signals()) {
6058             return -TARGET_ERESTARTSYS;
6059         }
6060 
6061         fork_start();
6062         ret = fork();
6063         if (ret == 0) {
6064             /* Child Process.  */
6065             cpu_clone_regs_child(env, newsp, flags);
6066             fork_end(1);
6067             /* There is a race condition here.  The parent process could
6068                theoretically read the TID in the child process before the child
6069                tid is set.  This would require using either ptrace
6070                (not implemented) or having *_tidptr to point at a shared memory
6071                mapping.  We can't repeat the spinlock hack used above because
6072                the child process gets its own copy of the lock.  */
6073             if (flags & CLONE_CHILD_SETTID)
6074                 put_user_u32(sys_gettid(), child_tidptr);
6075             if (flags & CLONE_PARENT_SETTID)
6076                 put_user_u32(sys_gettid(), parent_tidptr);
6077             ts = (TaskState *)cpu->opaque;
6078             if (flags & CLONE_SETTLS)
6079                 cpu_set_tls (env, newtls);
6080             if (flags & CLONE_CHILD_CLEARTID)
6081                 ts->child_tidptr = child_tidptr;
6082         } else {
6083             cpu_clone_regs_parent(env, flags);
6084             fork_end(0);
6085         }
6086     }
6087     return ret;
6088 }
6089 
6090 /* warning : doesn't handle linux specific flags... */
6091 static int target_to_host_fcntl_cmd(int cmd)
6092 {
6093     int ret;
6094 
6095     switch(cmd) {
6096     case TARGET_F_DUPFD:
6097     case TARGET_F_GETFD:
6098     case TARGET_F_SETFD:
6099     case TARGET_F_GETFL:
6100     case TARGET_F_SETFL:
6101     case TARGET_F_OFD_GETLK:
6102     case TARGET_F_OFD_SETLK:
6103     case TARGET_F_OFD_SETLKW:
6104         ret = cmd;
6105         break;
6106     case TARGET_F_GETLK:
6107         ret = F_GETLK64;
6108         break;
6109     case TARGET_F_SETLK:
6110         ret = F_SETLK64;
6111         break;
6112     case TARGET_F_SETLKW:
6113         ret = F_SETLKW64;
6114         break;
6115     case TARGET_F_GETOWN:
6116         ret = F_GETOWN;
6117         break;
6118     case TARGET_F_SETOWN:
6119         ret = F_SETOWN;
6120         break;
6121     case TARGET_F_GETSIG:
6122         ret = F_GETSIG;
6123         break;
6124     case TARGET_F_SETSIG:
6125         ret = F_SETSIG;
6126         break;
6127 #if TARGET_ABI_BITS == 32
6128     case TARGET_F_GETLK64:
6129         ret = F_GETLK64;
6130         break;
6131     case TARGET_F_SETLK64:
6132         ret = F_SETLK64;
6133         break;
6134     case TARGET_F_SETLKW64:
6135         ret = F_SETLKW64;
6136         break;
6137 #endif
6138     case TARGET_F_SETLEASE:
6139         ret = F_SETLEASE;
6140         break;
6141     case TARGET_F_GETLEASE:
6142         ret = F_GETLEASE;
6143         break;
6144 #ifdef F_DUPFD_CLOEXEC
6145     case TARGET_F_DUPFD_CLOEXEC:
6146         ret = F_DUPFD_CLOEXEC;
6147         break;
6148 #endif
6149     case TARGET_F_NOTIFY:
6150         ret = F_NOTIFY;
6151         break;
6152 #ifdef F_GETOWN_EX
6153     case TARGET_F_GETOWN_EX:
6154         ret = F_GETOWN_EX;
6155         break;
6156 #endif
6157 #ifdef F_SETOWN_EX
6158     case TARGET_F_SETOWN_EX:
6159         ret = F_SETOWN_EX;
6160         break;
6161 #endif
6162 #ifdef F_SETPIPE_SZ
6163     case TARGET_F_SETPIPE_SZ:
6164         ret = F_SETPIPE_SZ;
6165         break;
6166     case TARGET_F_GETPIPE_SZ:
6167         ret = F_GETPIPE_SZ;
6168         break;
6169 #endif
6170     default:
6171         ret = -TARGET_EINVAL;
6172         break;
6173     }
6174 
6175 #if defined(__powerpc64__)
6176     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6177      * is not supported by kernel. The glibc fcntl call actually adjusts
6178      * them to 5, 6 and 7 before making the syscall(). Since we make the
6179      * syscall directly, adjust to what is supported by the kernel.
6180      */
6181     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6182         ret -= F_GETLK64 - 5;
6183     }
6184 #endif
6185 
6186     return ret;
6187 }
6188 
6189 #define FLOCK_TRANSTBL \
6190     switch (type) { \
6191     TRANSTBL_CONVERT(F_RDLCK); \
6192     TRANSTBL_CONVERT(F_WRLCK); \
6193     TRANSTBL_CONVERT(F_UNLCK); \
6194     TRANSTBL_CONVERT(F_EXLCK); \
6195     TRANSTBL_CONVERT(F_SHLCK); \
6196     }
6197 
6198 static int target_to_host_flock(int type)
6199 {
6200 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6201     FLOCK_TRANSTBL
6202 #undef  TRANSTBL_CONVERT
6203     return -TARGET_EINVAL;
6204 }
6205 
6206 static int host_to_target_flock(int type)
6207 {
6208 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6209     FLOCK_TRANSTBL
6210 #undef  TRANSTBL_CONVERT
6211     /* if we don't know how to convert the value coming
6212      * from the host we copy to the target field as-is
6213      */
6214     return type;
6215 }
6216 
6217 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6218                                             abi_ulong target_flock_addr)
6219 {
6220     struct target_flock *target_fl;
6221     int l_type;
6222 
6223     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6224         return -TARGET_EFAULT;
6225     }
6226 
6227     __get_user(l_type, &target_fl->l_type);
6228     l_type = target_to_host_flock(l_type);
6229     if (l_type < 0) {
6230         return l_type;
6231     }
6232     fl->l_type = l_type;
6233     __get_user(fl->l_whence, &target_fl->l_whence);
6234     __get_user(fl->l_start, &target_fl->l_start);
6235     __get_user(fl->l_len, &target_fl->l_len);
6236     __get_user(fl->l_pid, &target_fl->l_pid);
6237     unlock_user_struct(target_fl, target_flock_addr, 0);
6238     return 0;
6239 }
6240 
6241 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6242                                           const struct flock64 *fl)
6243 {
6244     struct target_flock *target_fl;
6245     short l_type;
6246 
6247     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6248         return -TARGET_EFAULT;
6249     }
6250 
6251     l_type = host_to_target_flock(fl->l_type);
6252     __put_user(l_type, &target_fl->l_type);
6253     __put_user(fl->l_whence, &target_fl->l_whence);
6254     __put_user(fl->l_start, &target_fl->l_start);
6255     __put_user(fl->l_len, &target_fl->l_len);
6256     __put_user(fl->l_pid, &target_fl->l_pid);
6257     unlock_user_struct(target_fl, target_flock_addr, 1);
6258     return 0;
6259 }
6260 
6261 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6262 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6263 
6264 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6265 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6266                                                    abi_ulong target_flock_addr)
6267 {
6268     struct target_oabi_flock64 *target_fl;
6269     int l_type;
6270 
6271     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6272         return -TARGET_EFAULT;
6273     }
6274 
6275     __get_user(l_type, &target_fl->l_type);
6276     l_type = target_to_host_flock(l_type);
6277     if (l_type < 0) {
6278         return l_type;
6279     }
6280     fl->l_type = l_type;
6281     __get_user(fl->l_whence, &target_fl->l_whence);
6282     __get_user(fl->l_start, &target_fl->l_start);
6283     __get_user(fl->l_len, &target_fl->l_len);
6284     __get_user(fl->l_pid, &target_fl->l_pid);
6285     unlock_user_struct(target_fl, target_flock_addr, 0);
6286     return 0;
6287 }
6288 
6289 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6290                                                  const struct flock64 *fl)
6291 {
6292     struct target_oabi_flock64 *target_fl;
6293     short l_type;
6294 
6295     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6296         return -TARGET_EFAULT;
6297     }
6298 
6299     l_type = host_to_target_flock(fl->l_type);
6300     __put_user(l_type, &target_fl->l_type);
6301     __put_user(fl->l_whence, &target_fl->l_whence);
6302     __put_user(fl->l_start, &target_fl->l_start);
6303     __put_user(fl->l_len, &target_fl->l_len);
6304     __put_user(fl->l_pid, &target_fl->l_pid);
6305     unlock_user_struct(target_fl, target_flock_addr, 1);
6306     return 0;
6307 }
6308 #endif
6309 
6310 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6311                                               abi_ulong target_flock_addr)
6312 {
6313     struct target_flock64 *target_fl;
6314     int l_type;
6315 
6316     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6317         return -TARGET_EFAULT;
6318     }
6319 
6320     __get_user(l_type, &target_fl->l_type);
6321     l_type = target_to_host_flock(l_type);
6322     if (l_type < 0) {
6323         return l_type;
6324     }
6325     fl->l_type = l_type;
6326     __get_user(fl->l_whence, &target_fl->l_whence);
6327     __get_user(fl->l_start, &target_fl->l_start);
6328     __get_user(fl->l_len, &target_fl->l_len);
6329     __get_user(fl->l_pid, &target_fl->l_pid);
6330     unlock_user_struct(target_fl, target_flock_addr, 0);
6331     return 0;
6332 }
6333 
6334 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6335                                             const struct flock64 *fl)
6336 {
6337     struct target_flock64 *target_fl;
6338     short l_type;
6339 
6340     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6341         return -TARGET_EFAULT;
6342     }
6343 
6344     l_type = host_to_target_flock(fl->l_type);
6345     __put_user(l_type, &target_fl->l_type);
6346     __put_user(fl->l_whence, &target_fl->l_whence);
6347     __put_user(fl->l_start, &target_fl->l_start);
6348     __put_user(fl->l_len, &target_fl->l_len);
6349     __put_user(fl->l_pid, &target_fl->l_pid);
6350     unlock_user_struct(target_fl, target_flock_addr, 1);
6351     return 0;
6352 }
6353 
6354 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6355 {
6356     struct flock64 fl64;
6357 #ifdef F_GETOWN_EX
6358     struct f_owner_ex fox;
6359     struct target_f_owner_ex *target_fox;
6360 #endif
6361     abi_long ret;
6362     int host_cmd = target_to_host_fcntl_cmd(cmd);
6363 
6364     if (host_cmd == -TARGET_EINVAL)
6365 	    return host_cmd;
6366 
6367     switch(cmd) {
6368     case TARGET_F_GETLK:
6369         ret = copy_from_user_flock(&fl64, arg);
6370         if (ret) {
6371             return ret;
6372         }
6373         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6374         if (ret == 0) {
6375             ret = copy_to_user_flock(arg, &fl64);
6376         }
6377         break;
6378 
6379     case TARGET_F_SETLK:
6380     case TARGET_F_SETLKW:
6381         ret = copy_from_user_flock(&fl64, arg);
6382         if (ret) {
6383             return ret;
6384         }
6385         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6386         break;
6387 
6388     case TARGET_F_GETLK64:
6389     case TARGET_F_OFD_GETLK:
6390         ret = copy_from_user_flock64(&fl64, arg);
6391         if (ret) {
6392             return ret;
6393         }
6394         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6395         if (ret == 0) {
6396             ret = copy_to_user_flock64(arg, &fl64);
6397         }
6398         break;
6399     case TARGET_F_SETLK64:
6400     case TARGET_F_SETLKW64:
6401     case TARGET_F_OFD_SETLK:
6402     case TARGET_F_OFD_SETLKW:
6403         ret = copy_from_user_flock64(&fl64, arg);
6404         if (ret) {
6405             return ret;
6406         }
6407         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6408         break;
6409 
6410     case TARGET_F_GETFL:
6411         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6412         if (ret >= 0) {
6413             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6414         }
6415         break;
6416 
6417     case TARGET_F_SETFL:
6418         ret = get_errno(safe_fcntl(fd, host_cmd,
6419                                    target_to_host_bitmask(arg,
6420                                                           fcntl_flags_tbl)));
6421         break;
6422 
6423 #ifdef F_GETOWN_EX
6424     case TARGET_F_GETOWN_EX:
6425         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6426         if (ret >= 0) {
6427             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6428                 return -TARGET_EFAULT;
6429             target_fox->type = tswap32(fox.type);
6430             target_fox->pid = tswap32(fox.pid);
6431             unlock_user_struct(target_fox, arg, 1);
6432         }
6433         break;
6434 #endif
6435 
6436 #ifdef F_SETOWN_EX
6437     case TARGET_F_SETOWN_EX:
6438         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6439             return -TARGET_EFAULT;
6440         fox.type = tswap32(target_fox->type);
6441         fox.pid = tswap32(target_fox->pid);
6442         unlock_user_struct(target_fox, arg, 0);
6443         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6444         break;
6445 #endif
6446 
6447     case TARGET_F_SETOWN:
6448     case TARGET_F_GETOWN:
6449     case TARGET_F_SETSIG:
6450     case TARGET_F_GETSIG:
6451     case TARGET_F_SETLEASE:
6452     case TARGET_F_GETLEASE:
6453     case TARGET_F_SETPIPE_SZ:
6454     case TARGET_F_GETPIPE_SZ:
6455         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6456         break;
6457 
6458     default:
6459         ret = get_errno(safe_fcntl(fd, cmd, arg));
6460         break;
6461     }
6462     return ret;
6463 }
6464 
6465 #ifdef USE_UID16
6466 
6467 static inline int high2lowuid(int uid)
6468 {
6469     if (uid > 65535)
6470         return 65534;
6471     else
6472         return uid;
6473 }
6474 
6475 static inline int high2lowgid(int gid)
6476 {
6477     if (gid > 65535)
6478         return 65534;
6479     else
6480         return gid;
6481 }
6482 
6483 static inline int low2highuid(int uid)
6484 {
6485     if ((int16_t)uid == -1)
6486         return -1;
6487     else
6488         return uid;
6489 }
6490 
6491 static inline int low2highgid(int gid)
6492 {
6493     if ((int16_t)gid == -1)
6494         return -1;
6495     else
6496         return gid;
6497 }
6498 static inline int tswapid(int id)
6499 {
6500     return tswap16(id);
6501 }
6502 
6503 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6504 
6505 #else /* !USE_UID16 */
6506 static inline int high2lowuid(int uid)
6507 {
6508     return uid;
6509 }
6510 static inline int high2lowgid(int gid)
6511 {
6512     return gid;
6513 }
6514 static inline int low2highuid(int uid)
6515 {
6516     return uid;
6517 }
6518 static inline int low2highgid(int gid)
6519 {
6520     return gid;
6521 }
6522 static inline int tswapid(int id)
6523 {
6524     return tswap32(id);
6525 }
6526 
6527 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6528 
6529 #endif /* USE_UID16 */
6530 
6531 /* We must do direct syscalls for setting UID/GID, because we want to
6532  * implement the Linux system call semantics of "change only for this thread",
6533  * not the libc/POSIX semantics of "change for all threads in process".
6534  * (See http://ewontfix.com/17/ for more details.)
6535  * We use the 32-bit version of the syscalls if present; if it is not
6536  * then either the host architecture supports 32-bit UIDs natively with
6537  * the standard syscall, or the 16-bit UID is the best we can do.
6538  */
6539 #ifdef __NR_setuid32
6540 #define __NR_sys_setuid __NR_setuid32
6541 #else
6542 #define __NR_sys_setuid __NR_setuid
6543 #endif
6544 #ifdef __NR_setgid32
6545 #define __NR_sys_setgid __NR_setgid32
6546 #else
6547 #define __NR_sys_setgid __NR_setgid
6548 #endif
6549 #ifdef __NR_setresuid32
6550 #define __NR_sys_setresuid __NR_setresuid32
6551 #else
6552 #define __NR_sys_setresuid __NR_setresuid
6553 #endif
6554 #ifdef __NR_setresgid32
6555 #define __NR_sys_setresgid __NR_setresgid32
6556 #else
6557 #define __NR_sys_setresgid __NR_setresgid
6558 #endif
6559 
6560 _syscall1(int, sys_setuid, uid_t, uid)
6561 _syscall1(int, sys_setgid, gid_t, gid)
6562 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6563 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6564 
6565 void syscall_init(void)
6566 {
6567     IOCTLEntry *ie;
6568     const argtype *arg_type;
6569     int size;
6570     int i;
6571 
6572     thunk_init(STRUCT_MAX);
6573 
6574 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6575 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6576 #include "syscall_types.h"
6577 #undef STRUCT
6578 #undef STRUCT_SPECIAL
6579 
6580     /* Build target_to_host_errno_table[] table from
6581      * host_to_target_errno_table[]. */
6582     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6583         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6584     }
6585 
6586     /* we patch the ioctl size if necessary. We rely on the fact that
6587        no ioctl has all the bits at '1' in the size field */
6588     ie = ioctl_entries;
6589     while (ie->target_cmd != 0) {
6590         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6591             TARGET_IOC_SIZEMASK) {
6592             arg_type = ie->arg_type;
6593             if (arg_type[0] != TYPE_PTR) {
6594                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6595                         ie->target_cmd);
6596                 exit(1);
6597             }
6598             arg_type++;
6599             size = thunk_type_size(arg_type, 0);
6600             ie->target_cmd = (ie->target_cmd &
6601                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6602                 (size << TARGET_IOC_SIZESHIFT);
6603         }
6604 
6605         /* automatic consistency check if same arch */
6606 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6607     (defined(__x86_64__) && defined(TARGET_X86_64))
6608         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6609             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6610                     ie->name, ie->target_cmd, ie->host_cmd);
6611         }
6612 #endif
6613         ie++;
6614     }
6615 }
6616 
6617 #if TARGET_ABI_BITS == 32
6618 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6619 {
6620 #ifdef TARGET_WORDS_BIGENDIAN
6621     return ((uint64_t)word0 << 32) | word1;
6622 #else
6623     return ((uint64_t)word1 << 32) | word0;
6624 #endif
6625 }
6626 #else /* TARGET_ABI_BITS == 32 */
6627 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6628 {
6629     return word0;
6630 }
6631 #endif /* TARGET_ABI_BITS != 32 */
6632 
6633 #ifdef TARGET_NR_truncate64
6634 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6635                                          abi_long arg2,
6636                                          abi_long arg3,
6637                                          abi_long arg4)
6638 {
6639     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6640         arg2 = arg3;
6641         arg3 = arg4;
6642     }
6643     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6644 }
6645 #endif
6646 
6647 #ifdef TARGET_NR_ftruncate64
6648 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6649                                           abi_long arg2,
6650                                           abi_long arg3,
6651                                           abi_long arg4)
6652 {
6653     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6654         arg2 = arg3;
6655         arg3 = arg4;
6656     }
6657     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6658 }
6659 #endif
6660 
6661 #if defined(TARGET_NR_timer_settime) || \
6662     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6663 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6664                                                  abi_ulong target_addr)
6665 {
6666     struct target_itimerspec *target_itspec;
6667 
6668     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6669         return -TARGET_EFAULT;
6670     }
6671 
6672     host_itspec->it_interval.tv_sec =
6673                             tswapal(target_itspec->it_interval.tv_sec);
6674     host_itspec->it_interval.tv_nsec =
6675                             tswapal(target_itspec->it_interval.tv_nsec);
6676     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6677     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6678 
6679     unlock_user_struct(target_itspec, target_addr, 1);
6680     return 0;
6681 }
6682 #endif
6683 
6684 #if ((defined(TARGET_NR_timerfd_gettime) || \
6685       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6686     defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6687 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6688                                                struct itimerspec *host_its)
6689 {
6690     struct target_itimerspec *target_itspec;
6691 
6692     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6693         return -TARGET_EFAULT;
6694     }
6695 
6696     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6697     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6698 
6699     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6700     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6701 
6702     unlock_user_struct(target_itspec, target_addr, 0);
6703     return 0;
6704 }
6705 #endif
6706 
6707 #if defined(TARGET_NR_adjtimex) || \
6708     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6709 static inline abi_long target_to_host_timex(struct timex *host_tx,
6710                                             abi_long target_addr)
6711 {
6712     struct target_timex *target_tx;
6713 
6714     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6715         return -TARGET_EFAULT;
6716     }
6717 
6718     __get_user(host_tx->modes, &target_tx->modes);
6719     __get_user(host_tx->offset, &target_tx->offset);
6720     __get_user(host_tx->freq, &target_tx->freq);
6721     __get_user(host_tx->maxerror, &target_tx->maxerror);
6722     __get_user(host_tx->esterror, &target_tx->esterror);
6723     __get_user(host_tx->status, &target_tx->status);
6724     __get_user(host_tx->constant, &target_tx->constant);
6725     __get_user(host_tx->precision, &target_tx->precision);
6726     __get_user(host_tx->tolerance, &target_tx->tolerance);
6727     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6728     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6729     __get_user(host_tx->tick, &target_tx->tick);
6730     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6731     __get_user(host_tx->jitter, &target_tx->jitter);
6732     __get_user(host_tx->shift, &target_tx->shift);
6733     __get_user(host_tx->stabil, &target_tx->stabil);
6734     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6735     __get_user(host_tx->calcnt, &target_tx->calcnt);
6736     __get_user(host_tx->errcnt, &target_tx->errcnt);
6737     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6738     __get_user(host_tx->tai, &target_tx->tai);
6739 
6740     unlock_user_struct(target_tx, target_addr, 0);
6741     return 0;
6742 }
6743 
6744 static inline abi_long host_to_target_timex(abi_long target_addr,
6745                                             struct timex *host_tx)
6746 {
6747     struct target_timex *target_tx;
6748 
6749     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6750         return -TARGET_EFAULT;
6751     }
6752 
6753     __put_user(host_tx->modes, &target_tx->modes);
6754     __put_user(host_tx->offset, &target_tx->offset);
6755     __put_user(host_tx->freq, &target_tx->freq);
6756     __put_user(host_tx->maxerror, &target_tx->maxerror);
6757     __put_user(host_tx->esterror, &target_tx->esterror);
6758     __put_user(host_tx->status, &target_tx->status);
6759     __put_user(host_tx->constant, &target_tx->constant);
6760     __put_user(host_tx->precision, &target_tx->precision);
6761     __put_user(host_tx->tolerance, &target_tx->tolerance);
6762     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6763     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6764     __put_user(host_tx->tick, &target_tx->tick);
6765     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6766     __put_user(host_tx->jitter, &target_tx->jitter);
6767     __put_user(host_tx->shift, &target_tx->shift);
6768     __put_user(host_tx->stabil, &target_tx->stabil);
6769     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6770     __put_user(host_tx->calcnt, &target_tx->calcnt);
6771     __put_user(host_tx->errcnt, &target_tx->errcnt);
6772     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6773     __put_user(host_tx->tai, &target_tx->tai);
6774 
6775     unlock_user_struct(target_tx, target_addr, 1);
6776     return 0;
6777 }
6778 #endif
6779 
6780 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6781                                                abi_ulong target_addr)
6782 {
6783     struct target_sigevent *target_sevp;
6784 
6785     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6786         return -TARGET_EFAULT;
6787     }
6788 
6789     /* This union is awkward on 64 bit systems because it has a 32 bit
6790      * integer and a pointer in it; we follow the conversion approach
6791      * used for handling sigval types in signal.c so the guest should get
6792      * the correct value back even if we did a 64 bit byteswap and it's
6793      * using the 32 bit integer.
6794      */
6795     host_sevp->sigev_value.sival_ptr =
6796         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6797     host_sevp->sigev_signo =
6798         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6799     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6800     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6801 
6802     unlock_user_struct(target_sevp, target_addr, 1);
6803     return 0;
6804 }
6805 
6806 #if defined(TARGET_NR_mlockall)
6807 static inline int target_to_host_mlockall_arg(int arg)
6808 {
6809     int result = 0;
6810 
6811     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6812         result |= MCL_CURRENT;
6813     }
6814     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6815         result |= MCL_FUTURE;
6816     }
6817     return result;
6818 }
6819 #endif
6820 
6821 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6822      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6823      defined(TARGET_NR_newfstatat))
6824 static inline abi_long host_to_target_stat64(void *cpu_env,
6825                                              abi_ulong target_addr,
6826                                              struct stat *host_st)
6827 {
6828 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6829     if (((CPUARMState *)cpu_env)->eabi) {
6830         struct target_eabi_stat64 *target_st;
6831 
6832         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6833             return -TARGET_EFAULT;
6834         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6835         __put_user(host_st->st_dev, &target_st->st_dev);
6836         __put_user(host_st->st_ino, &target_st->st_ino);
6837 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6838         __put_user(host_st->st_ino, &target_st->__st_ino);
6839 #endif
6840         __put_user(host_st->st_mode, &target_st->st_mode);
6841         __put_user(host_st->st_nlink, &target_st->st_nlink);
6842         __put_user(host_st->st_uid, &target_st->st_uid);
6843         __put_user(host_st->st_gid, &target_st->st_gid);
6844         __put_user(host_st->st_rdev, &target_st->st_rdev);
6845         __put_user(host_st->st_size, &target_st->st_size);
6846         __put_user(host_st->st_blksize, &target_st->st_blksize);
6847         __put_user(host_st->st_blocks, &target_st->st_blocks);
6848         __put_user(host_st->st_atime, &target_st->target_st_atime);
6849         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6850         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6851 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6852         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6853         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6854         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6855 #endif
6856         unlock_user_struct(target_st, target_addr, 1);
6857     } else
6858 #endif
6859     {
6860 #if defined(TARGET_HAS_STRUCT_STAT64)
6861         struct target_stat64 *target_st;
6862 #else
6863         struct target_stat *target_st;
6864 #endif
6865 
6866         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6867             return -TARGET_EFAULT;
6868         memset(target_st, 0, sizeof(*target_st));
6869         __put_user(host_st->st_dev, &target_st->st_dev);
6870         __put_user(host_st->st_ino, &target_st->st_ino);
6871 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6872         __put_user(host_st->st_ino, &target_st->__st_ino);
6873 #endif
6874         __put_user(host_st->st_mode, &target_st->st_mode);
6875         __put_user(host_st->st_nlink, &target_st->st_nlink);
6876         __put_user(host_st->st_uid, &target_st->st_uid);
6877         __put_user(host_st->st_gid, &target_st->st_gid);
6878         __put_user(host_st->st_rdev, &target_st->st_rdev);
6879         /* XXX: better use of kernel struct */
6880         __put_user(host_st->st_size, &target_st->st_size);
6881         __put_user(host_st->st_blksize, &target_st->st_blksize);
6882         __put_user(host_st->st_blocks, &target_st->st_blocks);
6883         __put_user(host_st->st_atime, &target_st->target_st_atime);
6884         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6885         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6886 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6887         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6888         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6889         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6890 #endif
6891         unlock_user_struct(target_st, target_addr, 1);
6892     }
6893 
6894     return 0;
6895 }
6896 #endif
6897 
6898 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6899 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6900                                             abi_ulong target_addr)
6901 {
6902     struct target_statx *target_stx;
6903 
6904     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6905         return -TARGET_EFAULT;
6906     }
6907     memset(target_stx, 0, sizeof(*target_stx));
6908 
6909     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6910     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6911     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6912     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6913     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6914     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6915     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6916     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6917     __put_user(host_stx->stx_size, &target_stx->stx_size);
6918     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6919     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6920     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6921     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6922     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
6923     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
6924     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
6925     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
6926     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
6927     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
6928     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6929     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6930     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6931     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6932 
6933     unlock_user_struct(target_stx, target_addr, 1);
6934 
6935     return 0;
6936 }
6937 #endif
6938 
6939 static int do_sys_futex(int *uaddr, int op, int val,
6940                          const struct timespec *timeout, int *uaddr2,
6941                          int val3)
6942 {
6943 #if HOST_LONG_BITS == 64
6944 #if defined(__NR_futex)
6945     /* always a 64-bit time_t, it doesn't define _time64 version  */
6946     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
6947 
6948 #endif
6949 #else /* HOST_LONG_BITS == 64 */
6950 #if defined(__NR_futex_time64)
6951     if (sizeof(timeout->tv_sec) == 8) {
6952         /* _time64 function on 32bit arch */
6953         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
6954     }
6955 #endif
6956 #if defined(__NR_futex)
6957     /* old function on 32bit arch */
6958     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
6959 #endif
6960 #endif /* HOST_LONG_BITS == 64 */
6961     g_assert_not_reached();
6962 }
6963 
6964 static int do_safe_futex(int *uaddr, int op, int val,
6965                          const struct timespec *timeout, int *uaddr2,
6966                          int val3)
6967 {
6968 #if HOST_LONG_BITS == 64
6969 #if defined(__NR_futex)
6970     /* always a 64-bit time_t, it doesn't define _time64 version  */
6971     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
6972 #endif
6973 #else /* HOST_LONG_BITS == 64 */
6974 #if defined(__NR_futex_time64)
6975     if (sizeof(timeout->tv_sec) == 8) {
6976         /* _time64 function on 32bit arch */
6977         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
6978                                            val3));
6979     }
6980 #endif
6981 #if defined(__NR_futex)
6982     /* old function on 32bit arch */
6983     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
6984 #endif
6985 #endif /* HOST_LONG_BITS == 64 */
6986     return -TARGET_ENOSYS;
6987 }
6988 
6989 /* ??? Using host futex calls even when target atomic operations
6990    are not really atomic probably breaks things.  However implementing
6991    futexes locally would make futexes shared between multiple processes
6992    tricky.  However they're probably useless because guest atomic
6993    operations won't work either.  */
6994 #if defined(TARGET_NR_futex)
6995 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6996                     target_ulong uaddr2, int val3)
6997 {
6998     struct timespec ts, *pts;
6999     int base_op;
7000 
7001     /* ??? We assume FUTEX_* constants are the same on both host
7002        and target.  */
7003 #ifdef FUTEX_CMD_MASK
7004     base_op = op & FUTEX_CMD_MASK;
7005 #else
7006     base_op = op;
7007 #endif
7008     switch (base_op) {
7009     case FUTEX_WAIT:
7010     case FUTEX_WAIT_BITSET:
7011         if (timeout) {
7012             pts = &ts;
7013             target_to_host_timespec(pts, timeout);
7014         } else {
7015             pts = NULL;
7016         }
7017         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7018     case FUTEX_WAKE:
7019         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7020     case FUTEX_FD:
7021         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7022     case FUTEX_REQUEUE:
7023     case FUTEX_CMP_REQUEUE:
7024     case FUTEX_WAKE_OP:
7025         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7026            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7027            But the prototype takes a `struct timespec *'; insert casts
7028            to satisfy the compiler.  We do not need to tswap TIMEOUT
7029            since it's not compared to guest memory.  */
7030         pts = (struct timespec *)(uintptr_t) timeout;
7031         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7032                              (base_op == FUTEX_CMP_REQUEUE
7033                                       ? tswap32(val3)
7034                                       : val3));
7035     default:
7036         return -TARGET_ENOSYS;
7037     }
7038 }
7039 #endif
7040 
7041 #if defined(TARGET_NR_futex_time64)
7042 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7043                            target_ulong uaddr2, int val3)
7044 {
7045     struct timespec ts, *pts;
7046     int base_op;
7047 
7048     /* ??? We assume FUTEX_* constants are the same on both host
7049        and target.  */
7050 #ifdef FUTEX_CMD_MASK
7051     base_op = op & FUTEX_CMD_MASK;
7052 #else
7053     base_op = op;
7054 #endif
7055     switch (base_op) {
7056     case FUTEX_WAIT:
7057     case FUTEX_WAIT_BITSET:
7058         if (timeout) {
7059             pts = &ts;
7060             target_to_host_timespec64(pts, timeout);
7061         } else {
7062             pts = NULL;
7063         }
7064         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7065     case FUTEX_WAKE:
7066         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7067     case FUTEX_FD:
7068         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7069     case FUTEX_REQUEUE:
7070     case FUTEX_CMP_REQUEUE:
7071     case FUTEX_WAKE_OP:
7072         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7073            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7074            But the prototype takes a `struct timespec *'; insert casts
7075            to satisfy the compiler.  We do not need to tswap TIMEOUT
7076            since it's not compared to guest memory.  */
7077         pts = (struct timespec *)(uintptr_t) timeout;
7078         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7079                              (base_op == FUTEX_CMP_REQUEUE
7080                                       ? tswap32(val3)
7081                                       : val3));
7082     default:
7083         return -TARGET_ENOSYS;
7084     }
7085 }
7086 #endif
7087 
7088 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7089 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7090                                      abi_long handle, abi_long mount_id,
7091                                      abi_long flags)
7092 {
7093     struct file_handle *target_fh;
7094     struct file_handle *fh;
7095     int mid = 0;
7096     abi_long ret;
7097     char *name;
7098     unsigned int size, total_size;
7099 
7100     if (get_user_s32(size, handle)) {
7101         return -TARGET_EFAULT;
7102     }
7103 
7104     name = lock_user_string(pathname);
7105     if (!name) {
7106         return -TARGET_EFAULT;
7107     }
7108 
7109     total_size = sizeof(struct file_handle) + size;
7110     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7111     if (!target_fh) {
7112         unlock_user(name, pathname, 0);
7113         return -TARGET_EFAULT;
7114     }
7115 
7116     fh = g_malloc0(total_size);
7117     fh->handle_bytes = size;
7118 
7119     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7120     unlock_user(name, pathname, 0);
7121 
7122     /* man name_to_handle_at(2):
7123      * Other than the use of the handle_bytes field, the caller should treat
7124      * the file_handle structure as an opaque data type
7125      */
7126 
7127     memcpy(target_fh, fh, total_size);
7128     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7129     target_fh->handle_type = tswap32(fh->handle_type);
7130     g_free(fh);
7131     unlock_user(target_fh, handle, total_size);
7132 
7133     if (put_user_s32(mid, mount_id)) {
7134         return -TARGET_EFAULT;
7135     }
7136 
7137     return ret;
7138 
7139 }
7140 #endif
7141 
7142 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7143 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7144                                      abi_long flags)
7145 {
7146     struct file_handle *target_fh;
7147     struct file_handle *fh;
7148     unsigned int size, total_size;
7149     abi_long ret;
7150 
7151     if (get_user_s32(size, handle)) {
7152         return -TARGET_EFAULT;
7153     }
7154 
7155     total_size = sizeof(struct file_handle) + size;
7156     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7157     if (!target_fh) {
7158         return -TARGET_EFAULT;
7159     }
7160 
7161     fh = g_memdup(target_fh, total_size);
7162     fh->handle_bytes = size;
7163     fh->handle_type = tswap32(target_fh->handle_type);
7164 
7165     ret = get_errno(open_by_handle_at(mount_fd, fh,
7166                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7167 
7168     g_free(fh);
7169 
7170     unlock_user(target_fh, handle, total_size);
7171 
7172     return ret;
7173 }
7174 #endif
7175 
7176 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7177 
7178 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7179 {
7180     int host_flags;
7181     target_sigset_t *target_mask;
7182     sigset_t host_mask;
7183     abi_long ret;
7184 
7185     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7186         return -TARGET_EINVAL;
7187     }
7188     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7189         return -TARGET_EFAULT;
7190     }
7191 
7192     target_to_host_sigset(&host_mask, target_mask);
7193 
7194     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7195 
7196     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7197     if (ret >= 0) {
7198         fd_trans_register(ret, &target_signalfd_trans);
7199     }
7200 
7201     unlock_user_struct(target_mask, mask, 0);
7202 
7203     return ret;
7204 }
7205 #endif
7206 
7207 /* Map host to target signal numbers for the wait family of syscalls.
7208    Assume all other status bits are the same.  */
7209 int host_to_target_waitstatus(int status)
7210 {
7211     if (WIFSIGNALED(status)) {
7212         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7213     }
7214     if (WIFSTOPPED(status)) {
7215         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7216                | (status & 0xff);
7217     }
7218     return status;
7219 }
7220 
7221 static int open_self_cmdline(void *cpu_env, int fd)
7222 {
7223     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7224     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7225     int i;
7226 
7227     for (i = 0; i < bprm->argc; i++) {
7228         size_t len = strlen(bprm->argv[i]) + 1;
7229 
7230         if (write(fd, bprm->argv[i], len) != len) {
7231             return -1;
7232         }
7233     }
7234 
7235     return 0;
7236 }
7237 
7238 static int open_self_maps(void *cpu_env, int fd)
7239 {
7240     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7241     TaskState *ts = cpu->opaque;
7242     GSList *map_info = read_self_maps();
7243     GSList *s;
7244     int count;
7245 
7246     for (s = map_info; s; s = g_slist_next(s)) {
7247         MapInfo *e = (MapInfo *) s->data;
7248 
7249         if (h2g_valid(e->start)) {
7250             unsigned long min = e->start;
7251             unsigned long max = e->end;
7252             int flags = page_get_flags(h2g(min));
7253             const char *path;
7254 
7255             max = h2g_valid(max - 1) ?
7256                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7257 
7258             if (page_check_range(h2g(min), max - min, flags) == -1) {
7259                 continue;
7260             }
7261 
7262             if (h2g(min) == ts->info->stack_limit) {
7263                 path = "[stack]";
7264             } else {
7265                 path = e->path;
7266             }
7267 
7268             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7269                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7270                             h2g(min), h2g(max - 1) + 1,
7271                             e->is_read ? 'r' : '-',
7272                             e->is_write ? 'w' : '-',
7273                             e->is_exec ? 'x' : '-',
7274                             e->is_priv ? 'p' : '-',
7275                             (uint64_t) e->offset, e->dev, e->inode);
7276             if (path) {
7277                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7278             } else {
7279                 dprintf(fd, "\n");
7280             }
7281         }
7282     }
7283 
7284     free_self_maps(map_info);
7285 
7286 #ifdef TARGET_VSYSCALL_PAGE
7287     /*
7288      * We only support execution from the vsyscall page.
7289      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7290      */
7291     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7292                     " --xp 00000000 00:00 0",
7293                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7294     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7295 #endif
7296 
7297     return 0;
7298 }
7299 
7300 static int open_self_stat(void *cpu_env, int fd)
7301 {
7302     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7303     TaskState *ts = cpu->opaque;
7304     g_autoptr(GString) buf = g_string_new(NULL);
7305     int i;
7306 
7307     for (i = 0; i < 44; i++) {
7308         if (i == 0) {
7309             /* pid */
7310             g_string_printf(buf, FMT_pid " ", getpid());
7311         } else if (i == 1) {
7312             /* app name */
7313             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7314             bin = bin ? bin + 1 : ts->bprm->argv[0];
7315             g_string_printf(buf, "(%.15s) ", bin);
7316         } else if (i == 27) {
7317             /* stack bottom */
7318             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7319         } else {
7320             /* for the rest, there is MasterCard */
7321             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7322         }
7323 
7324         if (write(fd, buf->str, buf->len) != buf->len) {
7325             return -1;
7326         }
7327     }
7328 
7329     return 0;
7330 }
7331 
7332 static int open_self_auxv(void *cpu_env, int fd)
7333 {
7334     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7335     TaskState *ts = cpu->opaque;
7336     abi_ulong auxv = ts->info->saved_auxv;
7337     abi_ulong len = ts->info->auxv_len;
7338     char *ptr;
7339 
7340     /*
7341      * Auxiliary vector is stored in target process stack.
7342      * read in whole auxv vector and copy it to file
7343      */
7344     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7345     if (ptr != NULL) {
7346         while (len > 0) {
7347             ssize_t r;
7348             r = write(fd, ptr, len);
7349             if (r <= 0) {
7350                 break;
7351             }
7352             len -= r;
7353             ptr += r;
7354         }
7355         lseek(fd, 0, SEEK_SET);
7356         unlock_user(ptr, auxv, len);
7357     }
7358 
7359     return 0;
7360 }
7361 
7362 static int is_proc_myself(const char *filename, const char *entry)
7363 {
7364     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7365         filename += strlen("/proc/");
7366         if (!strncmp(filename, "self/", strlen("self/"))) {
7367             filename += strlen("self/");
7368         } else if (*filename >= '1' && *filename <= '9') {
7369             char myself[80];
7370             snprintf(myself, sizeof(myself), "%d/", getpid());
7371             if (!strncmp(filename, myself, strlen(myself))) {
7372                 filename += strlen(myself);
7373             } else {
7374                 return 0;
7375             }
7376         } else {
7377             return 0;
7378         }
7379         if (!strcmp(filename, entry)) {
7380             return 1;
7381         }
7382     }
7383     return 0;
7384 }
7385 
7386 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7387     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7388 static int is_proc(const char *filename, const char *entry)
7389 {
7390     return strcmp(filename, entry) == 0;
7391 }
7392 #endif
7393 
7394 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7395 static int open_net_route(void *cpu_env, int fd)
7396 {
7397     FILE *fp;
7398     char *line = NULL;
7399     size_t len = 0;
7400     ssize_t read;
7401 
7402     fp = fopen("/proc/net/route", "r");
7403     if (fp == NULL) {
7404         return -1;
7405     }
7406 
7407     /* read header */
7408 
7409     read = getline(&line, &len, fp);
7410     dprintf(fd, "%s", line);
7411 
7412     /* read routes */
7413 
7414     while ((read = getline(&line, &len, fp)) != -1) {
7415         char iface[16];
7416         uint32_t dest, gw, mask;
7417         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7418         int fields;
7419 
7420         fields = sscanf(line,
7421                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7422                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7423                         &mask, &mtu, &window, &irtt);
7424         if (fields != 11) {
7425             continue;
7426         }
7427         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7428                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7429                 metric, tswap32(mask), mtu, window, irtt);
7430     }
7431 
7432     free(line);
7433     fclose(fp);
7434 
7435     return 0;
7436 }
7437 #endif
7438 
7439 #if defined(TARGET_SPARC)
7440 static int open_cpuinfo(void *cpu_env, int fd)
7441 {
7442     dprintf(fd, "type\t\t: sun4u\n");
7443     return 0;
7444 }
7445 #endif
7446 
7447 #if defined(TARGET_HPPA)
7448 static int open_cpuinfo(void *cpu_env, int fd)
7449 {
7450     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7451     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7452     dprintf(fd, "capabilities\t: os32\n");
7453     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7454     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7455     return 0;
7456 }
7457 #endif
7458 
7459 #if defined(TARGET_M68K)
7460 static int open_hardware(void *cpu_env, int fd)
7461 {
7462     dprintf(fd, "Model:\t\tqemu-m68k\n");
7463     return 0;
7464 }
7465 #endif
7466 
7467 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7468 {
7469     struct fake_open {
7470         const char *filename;
7471         int (*fill)(void *cpu_env, int fd);
7472         int (*cmp)(const char *s1, const char *s2);
7473     };
7474     const struct fake_open *fake_open;
7475     static const struct fake_open fakes[] = {
7476         { "maps", open_self_maps, is_proc_myself },
7477         { "stat", open_self_stat, is_proc_myself },
7478         { "auxv", open_self_auxv, is_proc_myself },
7479         { "cmdline", open_self_cmdline, is_proc_myself },
7480 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7481         { "/proc/net/route", open_net_route, is_proc },
7482 #endif
7483 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7484         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7485 #endif
7486 #if defined(TARGET_M68K)
7487         { "/proc/hardware", open_hardware, is_proc },
7488 #endif
7489         { NULL, NULL, NULL }
7490     };
7491 
7492     if (is_proc_myself(pathname, "exe")) {
7493         int execfd = qemu_getauxval(AT_EXECFD);
7494         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7495     }
7496 
7497     for (fake_open = fakes; fake_open->filename; fake_open++) {
7498         if (fake_open->cmp(pathname, fake_open->filename)) {
7499             break;
7500         }
7501     }
7502 
7503     if (fake_open->filename) {
7504         const char *tmpdir;
7505         char filename[PATH_MAX];
7506         int fd, r;
7507 
7508         /* create temporary file to map stat to */
7509         tmpdir = getenv("TMPDIR");
7510         if (!tmpdir)
7511             tmpdir = "/tmp";
7512         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7513         fd = mkstemp(filename);
7514         if (fd < 0) {
7515             return fd;
7516         }
7517         unlink(filename);
7518 
7519         if ((r = fake_open->fill(cpu_env, fd))) {
7520             int e = errno;
7521             close(fd);
7522             errno = e;
7523             return r;
7524         }
7525         lseek(fd, 0, SEEK_SET);
7526 
7527         return fd;
7528     }
7529 
7530     return safe_openat(dirfd, path(pathname), flags, mode);
7531 }
7532 
7533 #define TIMER_MAGIC 0x0caf0000
7534 #define TIMER_MAGIC_MASK 0xffff0000
7535 
7536 /* Convert QEMU provided timer ID back to internal 16bit index format */
7537 static target_timer_t get_timer_id(abi_long arg)
7538 {
7539     target_timer_t timerid = arg;
7540 
7541     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7542         return -TARGET_EINVAL;
7543     }
7544 
7545     timerid &= 0xffff;
7546 
7547     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7548         return -TARGET_EINVAL;
7549     }
7550 
7551     return timerid;
7552 }
7553 
7554 static int target_to_host_cpu_mask(unsigned long *host_mask,
7555                                    size_t host_size,
7556                                    abi_ulong target_addr,
7557                                    size_t target_size)
7558 {
7559     unsigned target_bits = sizeof(abi_ulong) * 8;
7560     unsigned host_bits = sizeof(*host_mask) * 8;
7561     abi_ulong *target_mask;
7562     unsigned i, j;
7563 
7564     assert(host_size >= target_size);
7565 
7566     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7567     if (!target_mask) {
7568         return -TARGET_EFAULT;
7569     }
7570     memset(host_mask, 0, host_size);
7571 
7572     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7573         unsigned bit = i * target_bits;
7574         abi_ulong val;
7575 
7576         __get_user(val, &target_mask[i]);
7577         for (j = 0; j < target_bits; j++, bit++) {
7578             if (val & (1UL << j)) {
7579                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7580             }
7581         }
7582     }
7583 
7584     unlock_user(target_mask, target_addr, 0);
7585     return 0;
7586 }
7587 
7588 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7589                                    size_t host_size,
7590                                    abi_ulong target_addr,
7591                                    size_t target_size)
7592 {
7593     unsigned target_bits = sizeof(abi_ulong) * 8;
7594     unsigned host_bits = sizeof(*host_mask) * 8;
7595     abi_ulong *target_mask;
7596     unsigned i, j;
7597 
7598     assert(host_size >= target_size);
7599 
7600     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7601     if (!target_mask) {
7602         return -TARGET_EFAULT;
7603     }
7604 
7605     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7606         unsigned bit = i * target_bits;
7607         abi_ulong val = 0;
7608 
7609         for (j = 0; j < target_bits; j++, bit++) {
7610             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7611                 val |= 1UL << j;
7612             }
7613         }
7614         __put_user(val, &target_mask[i]);
7615     }
7616 
7617     unlock_user(target_mask, target_addr, target_size);
7618     return 0;
7619 }
7620 
7621 /* This is an internal helper for do_syscall so that it is easier
7622  * to have a single return point, so that actions, such as logging
7623  * of syscall results, can be performed.
7624  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7625  */
7626 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7627                             abi_long arg2, abi_long arg3, abi_long arg4,
7628                             abi_long arg5, abi_long arg6, abi_long arg7,
7629                             abi_long arg8)
7630 {
7631     CPUState *cpu = env_cpu(cpu_env);
7632     abi_long ret;
7633 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7634     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7635     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7636     || defined(TARGET_NR_statx)
7637     struct stat st;
7638 #endif
7639 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7640     || defined(TARGET_NR_fstatfs)
7641     struct statfs stfs;
7642 #endif
7643     void *p;
7644 
7645     switch(num) {
7646     case TARGET_NR_exit:
7647         /* In old applications this may be used to implement _exit(2).
7648            However in threaded applictions it is used for thread termination,
7649            and _exit_group is used for application termination.
7650            Do thread termination if we have more then one thread.  */
7651 
7652         if (block_signals()) {
7653             return -TARGET_ERESTARTSYS;
7654         }
7655 
7656         pthread_mutex_lock(&clone_lock);
7657 
7658         if (CPU_NEXT(first_cpu)) {
7659             TaskState *ts = cpu->opaque;
7660 
7661             object_property_set_bool(OBJECT(cpu), false, "realized", NULL);
7662             object_unref(OBJECT(cpu));
7663             /*
7664              * At this point the CPU should be unrealized and removed
7665              * from cpu lists. We can clean-up the rest of the thread
7666              * data without the lock held.
7667              */
7668 
7669             pthread_mutex_unlock(&clone_lock);
7670 
7671             if (ts->child_tidptr) {
7672                 put_user_u32(0, ts->child_tidptr);
7673                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7674                           NULL, NULL, 0);
7675             }
7676             thread_cpu = NULL;
7677             g_free(ts);
7678             rcu_unregister_thread();
7679             pthread_exit(NULL);
7680         }
7681 
7682         pthread_mutex_unlock(&clone_lock);
7683         preexit_cleanup(cpu_env, arg1);
7684         _exit(arg1);
7685         return 0; /* avoid warning */
7686     case TARGET_NR_read:
7687         if (arg2 == 0 && arg3 == 0) {
7688             return get_errno(safe_read(arg1, 0, 0));
7689         } else {
7690             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7691                 return -TARGET_EFAULT;
7692             ret = get_errno(safe_read(arg1, p, arg3));
7693             if (ret >= 0 &&
7694                 fd_trans_host_to_target_data(arg1)) {
7695                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7696             }
7697             unlock_user(p, arg2, ret);
7698         }
7699         return ret;
7700     case TARGET_NR_write:
7701         if (arg2 == 0 && arg3 == 0) {
7702             return get_errno(safe_write(arg1, 0, 0));
7703         }
7704         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7705             return -TARGET_EFAULT;
7706         if (fd_trans_target_to_host_data(arg1)) {
7707             void *copy = g_malloc(arg3);
7708             memcpy(copy, p, arg3);
7709             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7710             if (ret >= 0) {
7711                 ret = get_errno(safe_write(arg1, copy, ret));
7712             }
7713             g_free(copy);
7714         } else {
7715             ret = get_errno(safe_write(arg1, p, arg3));
7716         }
7717         unlock_user(p, arg2, 0);
7718         return ret;
7719 
7720 #ifdef TARGET_NR_open
7721     case TARGET_NR_open:
7722         if (!(p = lock_user_string(arg1)))
7723             return -TARGET_EFAULT;
7724         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7725                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7726                                   arg3));
7727         fd_trans_unregister(ret);
7728         unlock_user(p, arg1, 0);
7729         return ret;
7730 #endif
7731     case TARGET_NR_openat:
7732         if (!(p = lock_user_string(arg2)))
7733             return -TARGET_EFAULT;
7734         ret = get_errno(do_openat(cpu_env, arg1, p,
7735                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7736                                   arg4));
7737         fd_trans_unregister(ret);
7738         unlock_user(p, arg2, 0);
7739         return ret;
7740 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7741     case TARGET_NR_name_to_handle_at:
7742         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7743         return ret;
7744 #endif
7745 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7746     case TARGET_NR_open_by_handle_at:
7747         ret = do_open_by_handle_at(arg1, arg2, arg3);
7748         fd_trans_unregister(ret);
7749         return ret;
7750 #endif
7751     case TARGET_NR_close:
7752         fd_trans_unregister(arg1);
7753         return get_errno(close(arg1));
7754 
7755     case TARGET_NR_brk:
7756         return do_brk(arg1);
7757 #ifdef TARGET_NR_fork
7758     case TARGET_NR_fork:
7759         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7760 #endif
7761 #ifdef TARGET_NR_waitpid
7762     case TARGET_NR_waitpid:
7763         {
7764             int status;
7765             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7766             if (!is_error(ret) && arg2 && ret
7767                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7768                 return -TARGET_EFAULT;
7769         }
7770         return ret;
7771 #endif
7772 #ifdef TARGET_NR_waitid
7773     case TARGET_NR_waitid:
7774         {
7775             siginfo_t info;
7776             info.si_pid = 0;
7777             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7778             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7779                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7780                     return -TARGET_EFAULT;
7781                 host_to_target_siginfo(p, &info);
7782                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7783             }
7784         }
7785         return ret;
7786 #endif
7787 #ifdef TARGET_NR_creat /* not on alpha */
7788     case TARGET_NR_creat:
7789         if (!(p = lock_user_string(arg1)))
7790             return -TARGET_EFAULT;
7791         ret = get_errno(creat(p, arg2));
7792         fd_trans_unregister(ret);
7793         unlock_user(p, arg1, 0);
7794         return ret;
7795 #endif
7796 #ifdef TARGET_NR_link
7797     case TARGET_NR_link:
7798         {
7799             void * p2;
7800             p = lock_user_string(arg1);
7801             p2 = lock_user_string(arg2);
7802             if (!p || !p2)
7803                 ret = -TARGET_EFAULT;
7804             else
7805                 ret = get_errno(link(p, p2));
7806             unlock_user(p2, arg2, 0);
7807             unlock_user(p, arg1, 0);
7808         }
7809         return ret;
7810 #endif
7811 #if defined(TARGET_NR_linkat)
7812     case TARGET_NR_linkat:
7813         {
7814             void * p2 = NULL;
7815             if (!arg2 || !arg4)
7816                 return -TARGET_EFAULT;
7817             p  = lock_user_string(arg2);
7818             p2 = lock_user_string(arg4);
7819             if (!p || !p2)
7820                 ret = -TARGET_EFAULT;
7821             else
7822                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7823             unlock_user(p, arg2, 0);
7824             unlock_user(p2, arg4, 0);
7825         }
7826         return ret;
7827 #endif
7828 #ifdef TARGET_NR_unlink
7829     case TARGET_NR_unlink:
7830         if (!(p = lock_user_string(arg1)))
7831             return -TARGET_EFAULT;
7832         ret = get_errno(unlink(p));
7833         unlock_user(p, arg1, 0);
7834         return ret;
7835 #endif
7836 #if defined(TARGET_NR_unlinkat)
7837     case TARGET_NR_unlinkat:
7838         if (!(p = lock_user_string(arg2)))
7839             return -TARGET_EFAULT;
7840         ret = get_errno(unlinkat(arg1, p, arg3));
7841         unlock_user(p, arg2, 0);
7842         return ret;
7843 #endif
7844     case TARGET_NR_execve:
7845         {
7846             char **argp, **envp;
7847             int argc, envc;
7848             abi_ulong gp;
7849             abi_ulong guest_argp;
7850             abi_ulong guest_envp;
7851             abi_ulong addr;
7852             char **q;
7853             int total_size = 0;
7854 
7855             argc = 0;
7856             guest_argp = arg2;
7857             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7858                 if (get_user_ual(addr, gp))
7859                     return -TARGET_EFAULT;
7860                 if (!addr)
7861                     break;
7862                 argc++;
7863             }
7864             envc = 0;
7865             guest_envp = arg3;
7866             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7867                 if (get_user_ual(addr, gp))
7868                     return -TARGET_EFAULT;
7869                 if (!addr)
7870                     break;
7871                 envc++;
7872             }
7873 
7874             argp = g_new0(char *, argc + 1);
7875             envp = g_new0(char *, envc + 1);
7876 
7877             for (gp = guest_argp, q = argp; gp;
7878                   gp += sizeof(abi_ulong), q++) {
7879                 if (get_user_ual(addr, gp))
7880                     goto execve_efault;
7881                 if (!addr)
7882                     break;
7883                 if (!(*q = lock_user_string(addr)))
7884                     goto execve_efault;
7885                 total_size += strlen(*q) + 1;
7886             }
7887             *q = NULL;
7888 
7889             for (gp = guest_envp, q = envp; gp;
7890                   gp += sizeof(abi_ulong), q++) {
7891                 if (get_user_ual(addr, gp))
7892                     goto execve_efault;
7893                 if (!addr)
7894                     break;
7895                 if (!(*q = lock_user_string(addr)))
7896                     goto execve_efault;
7897                 total_size += strlen(*q) + 1;
7898             }
7899             *q = NULL;
7900 
7901             if (!(p = lock_user_string(arg1)))
7902                 goto execve_efault;
7903             /* Although execve() is not an interruptible syscall it is
7904              * a special case where we must use the safe_syscall wrapper:
7905              * if we allow a signal to happen before we make the host
7906              * syscall then we will 'lose' it, because at the point of
7907              * execve the process leaves QEMU's control. So we use the
7908              * safe syscall wrapper to ensure that we either take the
7909              * signal as a guest signal, or else it does not happen
7910              * before the execve completes and makes it the other
7911              * program's problem.
7912              */
7913             ret = get_errno(safe_execve(p, argp, envp));
7914             unlock_user(p, arg1, 0);
7915 
7916             goto execve_end;
7917 
7918         execve_efault:
7919             ret = -TARGET_EFAULT;
7920 
7921         execve_end:
7922             for (gp = guest_argp, q = argp; *q;
7923                   gp += sizeof(abi_ulong), q++) {
7924                 if (get_user_ual(addr, gp)
7925                     || !addr)
7926                     break;
7927                 unlock_user(*q, addr, 0);
7928             }
7929             for (gp = guest_envp, q = envp; *q;
7930                   gp += sizeof(abi_ulong), q++) {
7931                 if (get_user_ual(addr, gp)
7932                     || !addr)
7933                     break;
7934                 unlock_user(*q, addr, 0);
7935             }
7936 
7937             g_free(argp);
7938             g_free(envp);
7939         }
7940         return ret;
7941     case TARGET_NR_chdir:
7942         if (!(p = lock_user_string(arg1)))
7943             return -TARGET_EFAULT;
7944         ret = get_errno(chdir(p));
7945         unlock_user(p, arg1, 0);
7946         return ret;
7947 #ifdef TARGET_NR_time
7948     case TARGET_NR_time:
7949         {
7950             time_t host_time;
7951             ret = get_errno(time(&host_time));
7952             if (!is_error(ret)
7953                 && arg1
7954                 && put_user_sal(host_time, arg1))
7955                 return -TARGET_EFAULT;
7956         }
7957         return ret;
7958 #endif
7959 #ifdef TARGET_NR_mknod
7960     case TARGET_NR_mknod:
7961         if (!(p = lock_user_string(arg1)))
7962             return -TARGET_EFAULT;
7963         ret = get_errno(mknod(p, arg2, arg3));
7964         unlock_user(p, arg1, 0);
7965         return ret;
7966 #endif
7967 #if defined(TARGET_NR_mknodat)
7968     case TARGET_NR_mknodat:
7969         if (!(p = lock_user_string(arg2)))
7970             return -TARGET_EFAULT;
7971         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7972         unlock_user(p, arg2, 0);
7973         return ret;
7974 #endif
7975 #ifdef TARGET_NR_chmod
7976     case TARGET_NR_chmod:
7977         if (!(p = lock_user_string(arg1)))
7978             return -TARGET_EFAULT;
7979         ret = get_errno(chmod(p, arg2));
7980         unlock_user(p, arg1, 0);
7981         return ret;
7982 #endif
7983 #ifdef TARGET_NR_lseek
7984     case TARGET_NR_lseek:
7985         return get_errno(lseek(arg1, arg2, arg3));
7986 #endif
7987 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7988     /* Alpha specific */
7989     case TARGET_NR_getxpid:
7990         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7991         return get_errno(getpid());
7992 #endif
7993 #ifdef TARGET_NR_getpid
7994     case TARGET_NR_getpid:
7995         return get_errno(getpid());
7996 #endif
7997     case TARGET_NR_mount:
7998         {
7999             /* need to look at the data field */
8000             void *p2, *p3;
8001 
8002             if (arg1) {
8003                 p = lock_user_string(arg1);
8004                 if (!p) {
8005                     return -TARGET_EFAULT;
8006                 }
8007             } else {
8008                 p = NULL;
8009             }
8010 
8011             p2 = lock_user_string(arg2);
8012             if (!p2) {
8013                 if (arg1) {
8014                     unlock_user(p, arg1, 0);
8015                 }
8016                 return -TARGET_EFAULT;
8017             }
8018 
8019             if (arg3) {
8020                 p3 = lock_user_string(arg3);
8021                 if (!p3) {
8022                     if (arg1) {
8023                         unlock_user(p, arg1, 0);
8024                     }
8025                     unlock_user(p2, arg2, 0);
8026                     return -TARGET_EFAULT;
8027                 }
8028             } else {
8029                 p3 = NULL;
8030             }
8031 
8032             /* FIXME - arg5 should be locked, but it isn't clear how to
8033              * do that since it's not guaranteed to be a NULL-terminated
8034              * string.
8035              */
8036             if (!arg5) {
8037                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8038             } else {
8039                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8040             }
8041             ret = get_errno(ret);
8042 
8043             if (arg1) {
8044                 unlock_user(p, arg1, 0);
8045             }
8046             unlock_user(p2, arg2, 0);
8047             if (arg3) {
8048                 unlock_user(p3, arg3, 0);
8049             }
8050         }
8051         return ret;
8052 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8053 #if defined(TARGET_NR_umount)
8054     case TARGET_NR_umount:
8055 #endif
8056 #if defined(TARGET_NR_oldumount)
8057     case TARGET_NR_oldumount:
8058 #endif
8059         if (!(p = lock_user_string(arg1)))
8060             return -TARGET_EFAULT;
8061         ret = get_errno(umount(p));
8062         unlock_user(p, arg1, 0);
8063         return ret;
8064 #endif
8065 #ifdef TARGET_NR_stime /* not on alpha */
8066     case TARGET_NR_stime:
8067         {
8068             struct timespec ts;
8069             ts.tv_nsec = 0;
8070             if (get_user_sal(ts.tv_sec, arg1)) {
8071                 return -TARGET_EFAULT;
8072             }
8073             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8074         }
8075 #endif
8076 #ifdef TARGET_NR_alarm /* not on alpha */
8077     case TARGET_NR_alarm:
8078         return alarm(arg1);
8079 #endif
8080 #ifdef TARGET_NR_pause /* not on alpha */
8081     case TARGET_NR_pause:
8082         if (!block_signals()) {
8083             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8084         }
8085         return -TARGET_EINTR;
8086 #endif
8087 #ifdef TARGET_NR_utime
8088     case TARGET_NR_utime:
8089         {
8090             struct utimbuf tbuf, *host_tbuf;
8091             struct target_utimbuf *target_tbuf;
8092             if (arg2) {
8093                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8094                     return -TARGET_EFAULT;
8095                 tbuf.actime = tswapal(target_tbuf->actime);
8096                 tbuf.modtime = tswapal(target_tbuf->modtime);
8097                 unlock_user_struct(target_tbuf, arg2, 0);
8098                 host_tbuf = &tbuf;
8099             } else {
8100                 host_tbuf = NULL;
8101             }
8102             if (!(p = lock_user_string(arg1)))
8103                 return -TARGET_EFAULT;
8104             ret = get_errno(utime(p, host_tbuf));
8105             unlock_user(p, arg1, 0);
8106         }
8107         return ret;
8108 #endif
8109 #ifdef TARGET_NR_utimes
8110     case TARGET_NR_utimes:
8111         {
8112             struct timeval *tvp, tv[2];
8113             if (arg2) {
8114                 if (copy_from_user_timeval(&tv[0], arg2)
8115                     || copy_from_user_timeval(&tv[1],
8116                                               arg2 + sizeof(struct target_timeval)))
8117                     return -TARGET_EFAULT;
8118                 tvp = tv;
8119             } else {
8120                 tvp = NULL;
8121             }
8122             if (!(p = lock_user_string(arg1)))
8123                 return -TARGET_EFAULT;
8124             ret = get_errno(utimes(p, tvp));
8125             unlock_user(p, arg1, 0);
8126         }
8127         return ret;
8128 #endif
8129 #if defined(TARGET_NR_futimesat)
8130     case TARGET_NR_futimesat:
8131         {
8132             struct timeval *tvp, tv[2];
8133             if (arg3) {
8134                 if (copy_from_user_timeval(&tv[0], arg3)
8135                     || copy_from_user_timeval(&tv[1],
8136                                               arg3 + sizeof(struct target_timeval)))
8137                     return -TARGET_EFAULT;
8138                 tvp = tv;
8139             } else {
8140                 tvp = NULL;
8141             }
8142             if (!(p = lock_user_string(arg2))) {
8143                 return -TARGET_EFAULT;
8144             }
8145             ret = get_errno(futimesat(arg1, path(p), tvp));
8146             unlock_user(p, arg2, 0);
8147         }
8148         return ret;
8149 #endif
8150 #ifdef TARGET_NR_access
8151     case TARGET_NR_access:
8152         if (!(p = lock_user_string(arg1))) {
8153             return -TARGET_EFAULT;
8154         }
8155         ret = get_errno(access(path(p), arg2));
8156         unlock_user(p, arg1, 0);
8157         return ret;
8158 #endif
8159 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8160     case TARGET_NR_faccessat:
8161         if (!(p = lock_user_string(arg2))) {
8162             return -TARGET_EFAULT;
8163         }
8164         ret = get_errno(faccessat(arg1, p, arg3, 0));
8165         unlock_user(p, arg2, 0);
8166         return ret;
8167 #endif
8168 #ifdef TARGET_NR_nice /* not on alpha */
8169     case TARGET_NR_nice:
8170         return get_errno(nice(arg1));
8171 #endif
8172     case TARGET_NR_sync:
8173         sync();
8174         return 0;
8175 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8176     case TARGET_NR_syncfs:
8177         return get_errno(syncfs(arg1));
8178 #endif
8179     case TARGET_NR_kill:
8180         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8181 #ifdef TARGET_NR_rename
8182     case TARGET_NR_rename:
8183         {
8184             void *p2;
8185             p = lock_user_string(arg1);
8186             p2 = lock_user_string(arg2);
8187             if (!p || !p2)
8188                 ret = -TARGET_EFAULT;
8189             else
8190                 ret = get_errno(rename(p, p2));
8191             unlock_user(p2, arg2, 0);
8192             unlock_user(p, arg1, 0);
8193         }
8194         return ret;
8195 #endif
8196 #if defined(TARGET_NR_renameat)
8197     case TARGET_NR_renameat:
8198         {
8199             void *p2;
8200             p  = lock_user_string(arg2);
8201             p2 = lock_user_string(arg4);
8202             if (!p || !p2)
8203                 ret = -TARGET_EFAULT;
8204             else
8205                 ret = get_errno(renameat(arg1, p, arg3, p2));
8206             unlock_user(p2, arg4, 0);
8207             unlock_user(p, arg2, 0);
8208         }
8209         return ret;
8210 #endif
8211 #if defined(TARGET_NR_renameat2)
8212     case TARGET_NR_renameat2:
8213         {
8214             void *p2;
8215             p  = lock_user_string(arg2);
8216             p2 = lock_user_string(arg4);
8217             if (!p || !p2) {
8218                 ret = -TARGET_EFAULT;
8219             } else {
8220                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8221             }
8222             unlock_user(p2, arg4, 0);
8223             unlock_user(p, arg2, 0);
8224         }
8225         return ret;
8226 #endif
8227 #ifdef TARGET_NR_mkdir
8228     case TARGET_NR_mkdir:
8229         if (!(p = lock_user_string(arg1)))
8230             return -TARGET_EFAULT;
8231         ret = get_errno(mkdir(p, arg2));
8232         unlock_user(p, arg1, 0);
8233         return ret;
8234 #endif
8235 #if defined(TARGET_NR_mkdirat)
8236     case TARGET_NR_mkdirat:
8237         if (!(p = lock_user_string(arg2)))
8238             return -TARGET_EFAULT;
8239         ret = get_errno(mkdirat(arg1, p, arg3));
8240         unlock_user(p, arg2, 0);
8241         return ret;
8242 #endif
8243 #ifdef TARGET_NR_rmdir
8244     case TARGET_NR_rmdir:
8245         if (!(p = lock_user_string(arg1)))
8246             return -TARGET_EFAULT;
8247         ret = get_errno(rmdir(p));
8248         unlock_user(p, arg1, 0);
8249         return ret;
8250 #endif
8251     case TARGET_NR_dup:
8252         ret = get_errno(dup(arg1));
8253         if (ret >= 0) {
8254             fd_trans_dup(arg1, ret);
8255         }
8256         return ret;
8257 #ifdef TARGET_NR_pipe
8258     case TARGET_NR_pipe:
8259         return do_pipe(cpu_env, arg1, 0, 0);
8260 #endif
8261 #ifdef TARGET_NR_pipe2
8262     case TARGET_NR_pipe2:
8263         return do_pipe(cpu_env, arg1,
8264                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8265 #endif
8266     case TARGET_NR_times:
8267         {
8268             struct target_tms *tmsp;
8269             struct tms tms;
8270             ret = get_errno(times(&tms));
8271             if (arg1) {
8272                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8273                 if (!tmsp)
8274                     return -TARGET_EFAULT;
8275                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8276                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8277                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8278                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8279             }
8280             if (!is_error(ret))
8281                 ret = host_to_target_clock_t(ret);
8282         }
8283         return ret;
8284     case TARGET_NR_acct:
8285         if (arg1 == 0) {
8286             ret = get_errno(acct(NULL));
8287         } else {
8288             if (!(p = lock_user_string(arg1))) {
8289                 return -TARGET_EFAULT;
8290             }
8291             ret = get_errno(acct(path(p)));
8292             unlock_user(p, arg1, 0);
8293         }
8294         return ret;
8295 #ifdef TARGET_NR_umount2
8296     case TARGET_NR_umount2:
8297         if (!(p = lock_user_string(arg1)))
8298             return -TARGET_EFAULT;
8299         ret = get_errno(umount2(p, arg2));
8300         unlock_user(p, arg1, 0);
8301         return ret;
8302 #endif
8303     case TARGET_NR_ioctl:
8304         return do_ioctl(arg1, arg2, arg3);
8305 #ifdef TARGET_NR_fcntl
8306     case TARGET_NR_fcntl:
8307         return do_fcntl(arg1, arg2, arg3);
8308 #endif
8309     case TARGET_NR_setpgid:
8310         return get_errno(setpgid(arg1, arg2));
8311     case TARGET_NR_umask:
8312         return get_errno(umask(arg1));
8313     case TARGET_NR_chroot:
8314         if (!(p = lock_user_string(arg1)))
8315             return -TARGET_EFAULT;
8316         ret = get_errno(chroot(p));
8317         unlock_user(p, arg1, 0);
8318         return ret;
8319 #ifdef TARGET_NR_dup2
8320     case TARGET_NR_dup2:
8321         ret = get_errno(dup2(arg1, arg2));
8322         if (ret >= 0) {
8323             fd_trans_dup(arg1, arg2);
8324         }
8325         return ret;
8326 #endif
8327 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8328     case TARGET_NR_dup3:
8329     {
8330         int host_flags;
8331 
8332         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8333             return -EINVAL;
8334         }
8335         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8336         ret = get_errno(dup3(arg1, arg2, host_flags));
8337         if (ret >= 0) {
8338             fd_trans_dup(arg1, arg2);
8339         }
8340         return ret;
8341     }
8342 #endif
8343 #ifdef TARGET_NR_getppid /* not on alpha */
8344     case TARGET_NR_getppid:
8345         return get_errno(getppid());
8346 #endif
8347 #ifdef TARGET_NR_getpgrp
8348     case TARGET_NR_getpgrp:
8349         return get_errno(getpgrp());
8350 #endif
8351     case TARGET_NR_setsid:
8352         return get_errno(setsid());
8353 #ifdef TARGET_NR_sigaction
8354     case TARGET_NR_sigaction:
8355         {
8356 #if defined(TARGET_ALPHA)
8357             struct target_sigaction act, oact, *pact = 0;
8358             struct target_old_sigaction *old_act;
8359             if (arg2) {
8360                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8361                     return -TARGET_EFAULT;
8362                 act._sa_handler = old_act->_sa_handler;
8363                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8364                 act.sa_flags = old_act->sa_flags;
8365                 act.sa_restorer = 0;
8366                 unlock_user_struct(old_act, arg2, 0);
8367                 pact = &act;
8368             }
8369             ret = get_errno(do_sigaction(arg1, pact, &oact));
8370             if (!is_error(ret) && arg3) {
8371                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8372                     return -TARGET_EFAULT;
8373                 old_act->_sa_handler = oact._sa_handler;
8374                 old_act->sa_mask = oact.sa_mask.sig[0];
8375                 old_act->sa_flags = oact.sa_flags;
8376                 unlock_user_struct(old_act, arg3, 1);
8377             }
8378 #elif defined(TARGET_MIPS)
8379 	    struct target_sigaction act, oact, *pact, *old_act;
8380 
8381 	    if (arg2) {
8382                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8383                     return -TARGET_EFAULT;
8384 		act._sa_handler = old_act->_sa_handler;
8385 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8386 		act.sa_flags = old_act->sa_flags;
8387 		unlock_user_struct(old_act, arg2, 0);
8388 		pact = &act;
8389 	    } else {
8390 		pact = NULL;
8391 	    }
8392 
8393 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8394 
8395 	    if (!is_error(ret) && arg3) {
8396                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8397                     return -TARGET_EFAULT;
8398 		old_act->_sa_handler = oact._sa_handler;
8399 		old_act->sa_flags = oact.sa_flags;
8400 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8401 		old_act->sa_mask.sig[1] = 0;
8402 		old_act->sa_mask.sig[2] = 0;
8403 		old_act->sa_mask.sig[3] = 0;
8404 		unlock_user_struct(old_act, arg3, 1);
8405 	    }
8406 #else
8407             struct target_old_sigaction *old_act;
8408             struct target_sigaction act, oact, *pact;
8409             if (arg2) {
8410                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8411                     return -TARGET_EFAULT;
8412                 act._sa_handler = old_act->_sa_handler;
8413                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8414                 act.sa_flags = old_act->sa_flags;
8415                 act.sa_restorer = old_act->sa_restorer;
8416 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8417                 act.ka_restorer = 0;
8418 #endif
8419                 unlock_user_struct(old_act, arg2, 0);
8420                 pact = &act;
8421             } else {
8422                 pact = NULL;
8423             }
8424             ret = get_errno(do_sigaction(arg1, pact, &oact));
8425             if (!is_error(ret) && arg3) {
8426                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8427                     return -TARGET_EFAULT;
8428                 old_act->_sa_handler = oact._sa_handler;
8429                 old_act->sa_mask = oact.sa_mask.sig[0];
8430                 old_act->sa_flags = oact.sa_flags;
8431                 old_act->sa_restorer = oact.sa_restorer;
8432                 unlock_user_struct(old_act, arg3, 1);
8433             }
8434 #endif
8435         }
8436         return ret;
8437 #endif
8438     case TARGET_NR_rt_sigaction:
8439         {
8440 #if defined(TARGET_ALPHA)
8441             /* For Alpha and SPARC this is a 5 argument syscall, with
8442              * a 'restorer' parameter which must be copied into the
8443              * sa_restorer field of the sigaction struct.
8444              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8445              * and arg5 is the sigsetsize.
8446              * Alpha also has a separate rt_sigaction struct that it uses
8447              * here; SPARC uses the usual sigaction struct.
8448              */
8449             struct target_rt_sigaction *rt_act;
8450             struct target_sigaction act, oact, *pact = 0;
8451 
8452             if (arg4 != sizeof(target_sigset_t)) {
8453                 return -TARGET_EINVAL;
8454             }
8455             if (arg2) {
8456                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8457                     return -TARGET_EFAULT;
8458                 act._sa_handler = rt_act->_sa_handler;
8459                 act.sa_mask = rt_act->sa_mask;
8460                 act.sa_flags = rt_act->sa_flags;
8461                 act.sa_restorer = arg5;
8462                 unlock_user_struct(rt_act, arg2, 0);
8463                 pact = &act;
8464             }
8465             ret = get_errno(do_sigaction(arg1, pact, &oact));
8466             if (!is_error(ret) && arg3) {
8467                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8468                     return -TARGET_EFAULT;
8469                 rt_act->_sa_handler = oact._sa_handler;
8470                 rt_act->sa_mask = oact.sa_mask;
8471                 rt_act->sa_flags = oact.sa_flags;
8472                 unlock_user_struct(rt_act, arg3, 1);
8473             }
8474 #else
8475 #ifdef TARGET_SPARC
8476             target_ulong restorer = arg4;
8477             target_ulong sigsetsize = arg5;
8478 #else
8479             target_ulong sigsetsize = arg4;
8480 #endif
8481             struct target_sigaction *act;
8482             struct target_sigaction *oact;
8483 
8484             if (sigsetsize != sizeof(target_sigset_t)) {
8485                 return -TARGET_EINVAL;
8486             }
8487             if (arg2) {
8488                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8489                     return -TARGET_EFAULT;
8490                 }
8491 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8492                 act->ka_restorer = restorer;
8493 #endif
8494             } else {
8495                 act = NULL;
8496             }
8497             if (arg3) {
8498                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8499                     ret = -TARGET_EFAULT;
8500                     goto rt_sigaction_fail;
8501                 }
8502             } else
8503                 oact = NULL;
8504             ret = get_errno(do_sigaction(arg1, act, oact));
8505 	rt_sigaction_fail:
8506             if (act)
8507                 unlock_user_struct(act, arg2, 0);
8508             if (oact)
8509                 unlock_user_struct(oact, arg3, 1);
8510 #endif
8511         }
8512         return ret;
8513 #ifdef TARGET_NR_sgetmask /* not on alpha */
8514     case TARGET_NR_sgetmask:
8515         {
8516             sigset_t cur_set;
8517             abi_ulong target_set;
8518             ret = do_sigprocmask(0, NULL, &cur_set);
8519             if (!ret) {
8520                 host_to_target_old_sigset(&target_set, &cur_set);
8521                 ret = target_set;
8522             }
8523         }
8524         return ret;
8525 #endif
8526 #ifdef TARGET_NR_ssetmask /* not on alpha */
8527     case TARGET_NR_ssetmask:
8528         {
8529             sigset_t set, oset;
8530             abi_ulong target_set = arg1;
8531             target_to_host_old_sigset(&set, &target_set);
8532             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8533             if (!ret) {
8534                 host_to_target_old_sigset(&target_set, &oset);
8535                 ret = target_set;
8536             }
8537         }
8538         return ret;
8539 #endif
8540 #ifdef TARGET_NR_sigprocmask
8541     case TARGET_NR_sigprocmask:
8542         {
8543 #if defined(TARGET_ALPHA)
8544             sigset_t set, oldset;
8545             abi_ulong mask;
8546             int how;
8547 
8548             switch (arg1) {
8549             case TARGET_SIG_BLOCK:
8550                 how = SIG_BLOCK;
8551                 break;
8552             case TARGET_SIG_UNBLOCK:
8553                 how = SIG_UNBLOCK;
8554                 break;
8555             case TARGET_SIG_SETMASK:
8556                 how = SIG_SETMASK;
8557                 break;
8558             default:
8559                 return -TARGET_EINVAL;
8560             }
8561             mask = arg2;
8562             target_to_host_old_sigset(&set, &mask);
8563 
8564             ret = do_sigprocmask(how, &set, &oldset);
8565             if (!is_error(ret)) {
8566                 host_to_target_old_sigset(&mask, &oldset);
8567                 ret = mask;
8568                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8569             }
8570 #else
8571             sigset_t set, oldset, *set_ptr;
8572             int how;
8573 
8574             if (arg2) {
8575                 switch (arg1) {
8576                 case TARGET_SIG_BLOCK:
8577                     how = SIG_BLOCK;
8578                     break;
8579                 case TARGET_SIG_UNBLOCK:
8580                     how = SIG_UNBLOCK;
8581                     break;
8582                 case TARGET_SIG_SETMASK:
8583                     how = SIG_SETMASK;
8584                     break;
8585                 default:
8586                     return -TARGET_EINVAL;
8587                 }
8588                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8589                     return -TARGET_EFAULT;
8590                 target_to_host_old_sigset(&set, p);
8591                 unlock_user(p, arg2, 0);
8592                 set_ptr = &set;
8593             } else {
8594                 how = 0;
8595                 set_ptr = NULL;
8596             }
8597             ret = do_sigprocmask(how, set_ptr, &oldset);
8598             if (!is_error(ret) && arg3) {
8599                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8600                     return -TARGET_EFAULT;
8601                 host_to_target_old_sigset(p, &oldset);
8602                 unlock_user(p, arg3, sizeof(target_sigset_t));
8603             }
8604 #endif
8605         }
8606         return ret;
8607 #endif
8608     case TARGET_NR_rt_sigprocmask:
8609         {
8610             int how = arg1;
8611             sigset_t set, oldset, *set_ptr;
8612 
8613             if (arg4 != sizeof(target_sigset_t)) {
8614                 return -TARGET_EINVAL;
8615             }
8616 
8617             if (arg2) {
8618                 switch(how) {
8619                 case TARGET_SIG_BLOCK:
8620                     how = SIG_BLOCK;
8621                     break;
8622                 case TARGET_SIG_UNBLOCK:
8623                     how = SIG_UNBLOCK;
8624                     break;
8625                 case TARGET_SIG_SETMASK:
8626                     how = SIG_SETMASK;
8627                     break;
8628                 default:
8629                     return -TARGET_EINVAL;
8630                 }
8631                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8632                     return -TARGET_EFAULT;
8633                 target_to_host_sigset(&set, p);
8634                 unlock_user(p, arg2, 0);
8635                 set_ptr = &set;
8636             } else {
8637                 how = 0;
8638                 set_ptr = NULL;
8639             }
8640             ret = do_sigprocmask(how, set_ptr, &oldset);
8641             if (!is_error(ret) && arg3) {
8642                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8643                     return -TARGET_EFAULT;
8644                 host_to_target_sigset(p, &oldset);
8645                 unlock_user(p, arg3, sizeof(target_sigset_t));
8646             }
8647         }
8648         return ret;
8649 #ifdef TARGET_NR_sigpending
8650     case TARGET_NR_sigpending:
8651         {
8652             sigset_t set;
8653             ret = get_errno(sigpending(&set));
8654             if (!is_error(ret)) {
8655                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8656                     return -TARGET_EFAULT;
8657                 host_to_target_old_sigset(p, &set);
8658                 unlock_user(p, arg1, sizeof(target_sigset_t));
8659             }
8660         }
8661         return ret;
8662 #endif
8663     case TARGET_NR_rt_sigpending:
8664         {
8665             sigset_t set;
8666 
8667             /* Yes, this check is >, not != like most. We follow the kernel's
8668              * logic and it does it like this because it implements
8669              * NR_sigpending through the same code path, and in that case
8670              * the old_sigset_t is smaller in size.
8671              */
8672             if (arg2 > sizeof(target_sigset_t)) {
8673                 return -TARGET_EINVAL;
8674             }
8675 
8676             ret = get_errno(sigpending(&set));
8677             if (!is_error(ret)) {
8678                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8679                     return -TARGET_EFAULT;
8680                 host_to_target_sigset(p, &set);
8681                 unlock_user(p, arg1, sizeof(target_sigset_t));
8682             }
8683         }
8684         return ret;
8685 #ifdef TARGET_NR_sigsuspend
8686     case TARGET_NR_sigsuspend:
8687         {
8688             TaskState *ts = cpu->opaque;
8689 #if defined(TARGET_ALPHA)
8690             abi_ulong mask = arg1;
8691             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8692 #else
8693             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8694                 return -TARGET_EFAULT;
8695             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8696             unlock_user(p, arg1, 0);
8697 #endif
8698             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8699                                                SIGSET_T_SIZE));
8700             if (ret != -TARGET_ERESTARTSYS) {
8701                 ts->in_sigsuspend = 1;
8702             }
8703         }
8704         return ret;
8705 #endif
8706     case TARGET_NR_rt_sigsuspend:
8707         {
8708             TaskState *ts = cpu->opaque;
8709 
8710             if (arg2 != sizeof(target_sigset_t)) {
8711                 return -TARGET_EINVAL;
8712             }
8713             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8714                 return -TARGET_EFAULT;
8715             target_to_host_sigset(&ts->sigsuspend_mask, p);
8716             unlock_user(p, arg1, 0);
8717             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8718                                                SIGSET_T_SIZE));
8719             if (ret != -TARGET_ERESTARTSYS) {
8720                 ts->in_sigsuspend = 1;
8721             }
8722         }
8723         return ret;
8724 #ifdef TARGET_NR_rt_sigtimedwait
8725     case TARGET_NR_rt_sigtimedwait:
8726         {
8727             sigset_t set;
8728             struct timespec uts, *puts;
8729             siginfo_t uinfo;
8730 
8731             if (arg4 != sizeof(target_sigset_t)) {
8732                 return -TARGET_EINVAL;
8733             }
8734 
8735             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8736                 return -TARGET_EFAULT;
8737             target_to_host_sigset(&set, p);
8738             unlock_user(p, arg1, 0);
8739             if (arg3) {
8740                 puts = &uts;
8741                 target_to_host_timespec(puts, arg3);
8742             } else {
8743                 puts = NULL;
8744             }
8745             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8746                                                  SIGSET_T_SIZE));
8747             if (!is_error(ret)) {
8748                 if (arg2) {
8749                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8750                                   0);
8751                     if (!p) {
8752                         return -TARGET_EFAULT;
8753                     }
8754                     host_to_target_siginfo(p, &uinfo);
8755                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8756                 }
8757                 ret = host_to_target_signal(ret);
8758             }
8759         }
8760         return ret;
8761 #endif
8762     case TARGET_NR_rt_sigqueueinfo:
8763         {
8764             siginfo_t uinfo;
8765 
8766             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8767             if (!p) {
8768                 return -TARGET_EFAULT;
8769             }
8770             target_to_host_siginfo(&uinfo, p);
8771             unlock_user(p, arg3, 0);
8772             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8773         }
8774         return ret;
8775     case TARGET_NR_rt_tgsigqueueinfo:
8776         {
8777             siginfo_t uinfo;
8778 
8779             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8780             if (!p) {
8781                 return -TARGET_EFAULT;
8782             }
8783             target_to_host_siginfo(&uinfo, p);
8784             unlock_user(p, arg4, 0);
8785             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8786         }
8787         return ret;
8788 #ifdef TARGET_NR_sigreturn
8789     case TARGET_NR_sigreturn:
8790         if (block_signals()) {
8791             return -TARGET_ERESTARTSYS;
8792         }
8793         return do_sigreturn(cpu_env);
8794 #endif
8795     case TARGET_NR_rt_sigreturn:
8796         if (block_signals()) {
8797             return -TARGET_ERESTARTSYS;
8798         }
8799         return do_rt_sigreturn(cpu_env);
8800     case TARGET_NR_sethostname:
8801         if (!(p = lock_user_string(arg1)))
8802             return -TARGET_EFAULT;
8803         ret = get_errno(sethostname(p, arg2));
8804         unlock_user(p, arg1, 0);
8805         return ret;
8806 #ifdef TARGET_NR_setrlimit
8807     case TARGET_NR_setrlimit:
8808         {
8809             int resource = target_to_host_resource(arg1);
8810             struct target_rlimit *target_rlim;
8811             struct rlimit rlim;
8812             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8813                 return -TARGET_EFAULT;
8814             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8815             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8816             unlock_user_struct(target_rlim, arg2, 0);
8817             /*
8818              * If we just passed through resource limit settings for memory then
8819              * they would also apply to QEMU's own allocations, and QEMU will
8820              * crash or hang or die if its allocations fail. Ideally we would
8821              * track the guest allocations in QEMU and apply the limits ourselves.
8822              * For now, just tell the guest the call succeeded but don't actually
8823              * limit anything.
8824              */
8825             if (resource != RLIMIT_AS &&
8826                 resource != RLIMIT_DATA &&
8827                 resource != RLIMIT_STACK) {
8828                 return get_errno(setrlimit(resource, &rlim));
8829             } else {
8830                 return 0;
8831             }
8832         }
8833 #endif
8834 #ifdef TARGET_NR_getrlimit
8835     case TARGET_NR_getrlimit:
8836         {
8837             int resource = target_to_host_resource(arg1);
8838             struct target_rlimit *target_rlim;
8839             struct rlimit rlim;
8840 
8841             ret = get_errno(getrlimit(resource, &rlim));
8842             if (!is_error(ret)) {
8843                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8844                     return -TARGET_EFAULT;
8845                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8846                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8847                 unlock_user_struct(target_rlim, arg2, 1);
8848             }
8849         }
8850         return ret;
8851 #endif
8852     case TARGET_NR_getrusage:
8853         {
8854             struct rusage rusage;
8855             ret = get_errno(getrusage(arg1, &rusage));
8856             if (!is_error(ret)) {
8857                 ret = host_to_target_rusage(arg2, &rusage);
8858             }
8859         }
8860         return ret;
8861 #if defined(TARGET_NR_gettimeofday)
8862     case TARGET_NR_gettimeofday:
8863         {
8864             struct timeval tv;
8865             struct timezone tz;
8866 
8867             ret = get_errno(gettimeofday(&tv, &tz));
8868             if (!is_error(ret)) {
8869                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
8870                     return -TARGET_EFAULT;
8871                 }
8872                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
8873                     return -TARGET_EFAULT;
8874                 }
8875             }
8876         }
8877         return ret;
8878 #endif
8879 #if defined(TARGET_NR_settimeofday)
8880     case TARGET_NR_settimeofday:
8881         {
8882             struct timeval tv, *ptv = NULL;
8883             struct timezone tz, *ptz = NULL;
8884 
8885             if (arg1) {
8886                 if (copy_from_user_timeval(&tv, arg1)) {
8887                     return -TARGET_EFAULT;
8888                 }
8889                 ptv = &tv;
8890             }
8891 
8892             if (arg2) {
8893                 if (copy_from_user_timezone(&tz, arg2)) {
8894                     return -TARGET_EFAULT;
8895                 }
8896                 ptz = &tz;
8897             }
8898 
8899             return get_errno(settimeofday(ptv, ptz));
8900         }
8901 #endif
8902 #if defined(TARGET_NR_select)
8903     case TARGET_NR_select:
8904 #if defined(TARGET_WANT_NI_OLD_SELECT)
8905         /* some architectures used to have old_select here
8906          * but now ENOSYS it.
8907          */
8908         ret = -TARGET_ENOSYS;
8909 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8910         ret = do_old_select(arg1);
8911 #else
8912         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8913 #endif
8914         return ret;
8915 #endif
8916 #ifdef TARGET_NR_pselect6
8917     case TARGET_NR_pselect6:
8918         {
8919             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8920             fd_set rfds, wfds, efds;
8921             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8922             struct timespec ts, *ts_ptr;
8923 
8924             /*
8925              * The 6th arg is actually two args smashed together,
8926              * so we cannot use the C library.
8927              */
8928             sigset_t set;
8929             struct {
8930                 sigset_t *set;
8931                 size_t size;
8932             } sig, *sig_ptr;
8933 
8934             abi_ulong arg_sigset, arg_sigsize, *arg7;
8935             target_sigset_t *target_sigset;
8936 
8937             n = arg1;
8938             rfd_addr = arg2;
8939             wfd_addr = arg3;
8940             efd_addr = arg4;
8941             ts_addr = arg5;
8942 
8943             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8944             if (ret) {
8945                 return ret;
8946             }
8947             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8948             if (ret) {
8949                 return ret;
8950             }
8951             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8952             if (ret) {
8953                 return ret;
8954             }
8955 
8956             /*
8957              * This takes a timespec, and not a timeval, so we cannot
8958              * use the do_select() helper ...
8959              */
8960             if (ts_addr) {
8961                 if (target_to_host_timespec(&ts, ts_addr)) {
8962                     return -TARGET_EFAULT;
8963                 }
8964                 ts_ptr = &ts;
8965             } else {
8966                 ts_ptr = NULL;
8967             }
8968 
8969             /* Extract the two packed args for the sigset */
8970             if (arg6) {
8971                 sig_ptr = &sig;
8972                 sig.size = SIGSET_T_SIZE;
8973 
8974                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8975                 if (!arg7) {
8976                     return -TARGET_EFAULT;
8977                 }
8978                 arg_sigset = tswapal(arg7[0]);
8979                 arg_sigsize = tswapal(arg7[1]);
8980                 unlock_user(arg7, arg6, 0);
8981 
8982                 if (arg_sigset) {
8983                     sig.set = &set;
8984                     if (arg_sigsize != sizeof(*target_sigset)) {
8985                         /* Like the kernel, we enforce correct size sigsets */
8986                         return -TARGET_EINVAL;
8987                     }
8988                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8989                                               sizeof(*target_sigset), 1);
8990                     if (!target_sigset) {
8991                         return -TARGET_EFAULT;
8992                     }
8993                     target_to_host_sigset(&set, target_sigset);
8994                     unlock_user(target_sigset, arg_sigset, 0);
8995                 } else {
8996                     sig.set = NULL;
8997                 }
8998             } else {
8999                 sig_ptr = NULL;
9000             }
9001 
9002             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9003                                           ts_ptr, sig_ptr));
9004 
9005             if (!is_error(ret)) {
9006                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9007                     return -TARGET_EFAULT;
9008                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9009                     return -TARGET_EFAULT;
9010                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9011                     return -TARGET_EFAULT;
9012 
9013                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9014                     return -TARGET_EFAULT;
9015             }
9016         }
9017         return ret;
9018 #endif
9019 #ifdef TARGET_NR_symlink
9020     case TARGET_NR_symlink:
9021         {
9022             void *p2;
9023             p = lock_user_string(arg1);
9024             p2 = lock_user_string(arg2);
9025             if (!p || !p2)
9026                 ret = -TARGET_EFAULT;
9027             else
9028                 ret = get_errno(symlink(p, p2));
9029             unlock_user(p2, arg2, 0);
9030             unlock_user(p, arg1, 0);
9031         }
9032         return ret;
9033 #endif
9034 #if defined(TARGET_NR_symlinkat)
9035     case TARGET_NR_symlinkat:
9036         {
9037             void *p2;
9038             p  = lock_user_string(arg1);
9039             p2 = lock_user_string(arg3);
9040             if (!p || !p2)
9041                 ret = -TARGET_EFAULT;
9042             else
9043                 ret = get_errno(symlinkat(p, arg2, p2));
9044             unlock_user(p2, arg3, 0);
9045             unlock_user(p, arg1, 0);
9046         }
9047         return ret;
9048 #endif
9049 #ifdef TARGET_NR_readlink
9050     case TARGET_NR_readlink:
9051         {
9052             void *p2;
9053             p = lock_user_string(arg1);
9054             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9055             if (!p || !p2) {
9056                 ret = -TARGET_EFAULT;
9057             } else if (!arg3) {
9058                 /* Short circuit this for the magic exe check. */
9059                 ret = -TARGET_EINVAL;
9060             } else if (is_proc_myself((const char *)p, "exe")) {
9061                 char real[PATH_MAX], *temp;
9062                 temp = realpath(exec_path, real);
9063                 /* Return value is # of bytes that we wrote to the buffer. */
9064                 if (temp == NULL) {
9065                     ret = get_errno(-1);
9066                 } else {
9067                     /* Don't worry about sign mismatch as earlier mapping
9068                      * logic would have thrown a bad address error. */
9069                     ret = MIN(strlen(real), arg3);
9070                     /* We cannot NUL terminate the string. */
9071                     memcpy(p2, real, ret);
9072                 }
9073             } else {
9074                 ret = get_errno(readlink(path(p), p2, arg3));
9075             }
9076             unlock_user(p2, arg2, ret);
9077             unlock_user(p, arg1, 0);
9078         }
9079         return ret;
9080 #endif
9081 #if defined(TARGET_NR_readlinkat)
9082     case TARGET_NR_readlinkat:
9083         {
9084             void *p2;
9085             p  = lock_user_string(arg2);
9086             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9087             if (!p || !p2) {
9088                 ret = -TARGET_EFAULT;
9089             } else if (is_proc_myself((const char *)p, "exe")) {
9090                 char real[PATH_MAX], *temp;
9091                 temp = realpath(exec_path, real);
9092                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9093                 snprintf((char *)p2, arg4, "%s", real);
9094             } else {
9095                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9096             }
9097             unlock_user(p2, arg3, ret);
9098             unlock_user(p, arg2, 0);
9099         }
9100         return ret;
9101 #endif
9102 #ifdef TARGET_NR_swapon
9103     case TARGET_NR_swapon:
9104         if (!(p = lock_user_string(arg1)))
9105             return -TARGET_EFAULT;
9106         ret = get_errno(swapon(p, arg2));
9107         unlock_user(p, arg1, 0);
9108         return ret;
9109 #endif
9110     case TARGET_NR_reboot:
9111         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9112            /* arg4 must be ignored in all other cases */
9113            p = lock_user_string(arg4);
9114            if (!p) {
9115                return -TARGET_EFAULT;
9116            }
9117            ret = get_errno(reboot(arg1, arg2, arg3, p));
9118            unlock_user(p, arg4, 0);
9119         } else {
9120            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9121         }
9122         return ret;
9123 #ifdef TARGET_NR_mmap
9124     case TARGET_NR_mmap:
9125 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9126     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9127     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9128     || defined(TARGET_S390X)
9129         {
9130             abi_ulong *v;
9131             abi_ulong v1, v2, v3, v4, v5, v6;
9132             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9133                 return -TARGET_EFAULT;
9134             v1 = tswapal(v[0]);
9135             v2 = tswapal(v[1]);
9136             v3 = tswapal(v[2]);
9137             v4 = tswapal(v[3]);
9138             v5 = tswapal(v[4]);
9139             v6 = tswapal(v[5]);
9140             unlock_user(v, arg1, 0);
9141             ret = get_errno(target_mmap(v1, v2, v3,
9142                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9143                                         v5, v6));
9144         }
9145 #else
9146         ret = get_errno(target_mmap(arg1, arg2, arg3,
9147                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9148                                     arg5,
9149                                     arg6));
9150 #endif
9151         return ret;
9152 #endif
9153 #ifdef TARGET_NR_mmap2
9154     case TARGET_NR_mmap2:
9155 #ifndef MMAP_SHIFT
9156 #define MMAP_SHIFT 12
9157 #endif
9158         ret = target_mmap(arg1, arg2, arg3,
9159                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9160                           arg5, arg6 << MMAP_SHIFT);
9161         return get_errno(ret);
9162 #endif
9163     case TARGET_NR_munmap:
9164         return get_errno(target_munmap(arg1, arg2));
9165     case TARGET_NR_mprotect:
9166         {
9167             TaskState *ts = cpu->opaque;
9168             /* Special hack to detect libc making the stack executable.  */
9169             if ((arg3 & PROT_GROWSDOWN)
9170                 && arg1 >= ts->info->stack_limit
9171                 && arg1 <= ts->info->start_stack) {
9172                 arg3 &= ~PROT_GROWSDOWN;
9173                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9174                 arg1 = ts->info->stack_limit;
9175             }
9176         }
9177         return get_errno(target_mprotect(arg1, arg2, arg3));
9178 #ifdef TARGET_NR_mremap
9179     case TARGET_NR_mremap:
9180         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9181 #endif
9182         /* ??? msync/mlock/munlock are broken for softmmu.  */
9183 #ifdef TARGET_NR_msync
9184     case TARGET_NR_msync:
9185         return get_errno(msync(g2h(arg1), arg2, arg3));
9186 #endif
9187 #ifdef TARGET_NR_mlock
9188     case TARGET_NR_mlock:
9189         return get_errno(mlock(g2h(arg1), arg2));
9190 #endif
9191 #ifdef TARGET_NR_munlock
9192     case TARGET_NR_munlock:
9193         return get_errno(munlock(g2h(arg1), arg2));
9194 #endif
9195 #ifdef TARGET_NR_mlockall
9196     case TARGET_NR_mlockall:
9197         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9198 #endif
9199 #ifdef TARGET_NR_munlockall
9200     case TARGET_NR_munlockall:
9201         return get_errno(munlockall());
9202 #endif
9203 #ifdef TARGET_NR_truncate
9204     case TARGET_NR_truncate:
9205         if (!(p = lock_user_string(arg1)))
9206             return -TARGET_EFAULT;
9207         ret = get_errno(truncate(p, arg2));
9208         unlock_user(p, arg1, 0);
9209         return ret;
9210 #endif
9211 #ifdef TARGET_NR_ftruncate
9212     case TARGET_NR_ftruncate:
9213         return get_errno(ftruncate(arg1, arg2));
9214 #endif
9215     case TARGET_NR_fchmod:
9216         return get_errno(fchmod(arg1, arg2));
9217 #if defined(TARGET_NR_fchmodat)
9218     case TARGET_NR_fchmodat:
9219         if (!(p = lock_user_string(arg2)))
9220             return -TARGET_EFAULT;
9221         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9222         unlock_user(p, arg2, 0);
9223         return ret;
9224 #endif
9225     case TARGET_NR_getpriority:
9226         /* Note that negative values are valid for getpriority, so we must
9227            differentiate based on errno settings.  */
9228         errno = 0;
9229         ret = getpriority(arg1, arg2);
9230         if (ret == -1 && errno != 0) {
9231             return -host_to_target_errno(errno);
9232         }
9233 #ifdef TARGET_ALPHA
9234         /* Return value is the unbiased priority.  Signal no error.  */
9235         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9236 #else
9237         /* Return value is a biased priority to avoid negative numbers.  */
9238         ret = 20 - ret;
9239 #endif
9240         return ret;
9241     case TARGET_NR_setpriority:
9242         return get_errno(setpriority(arg1, arg2, arg3));
9243 #ifdef TARGET_NR_statfs
9244     case TARGET_NR_statfs:
9245         if (!(p = lock_user_string(arg1))) {
9246             return -TARGET_EFAULT;
9247         }
9248         ret = get_errno(statfs(path(p), &stfs));
9249         unlock_user(p, arg1, 0);
9250     convert_statfs:
9251         if (!is_error(ret)) {
9252             struct target_statfs *target_stfs;
9253 
9254             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9255                 return -TARGET_EFAULT;
9256             __put_user(stfs.f_type, &target_stfs->f_type);
9257             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9258             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9259             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9260             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9261             __put_user(stfs.f_files, &target_stfs->f_files);
9262             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9263             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9264             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9265             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9266             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9267 #ifdef _STATFS_F_FLAGS
9268             __put_user(stfs.f_flags, &target_stfs->f_flags);
9269 #else
9270             __put_user(0, &target_stfs->f_flags);
9271 #endif
9272             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9273             unlock_user_struct(target_stfs, arg2, 1);
9274         }
9275         return ret;
9276 #endif
9277 #ifdef TARGET_NR_fstatfs
9278     case TARGET_NR_fstatfs:
9279         ret = get_errno(fstatfs(arg1, &stfs));
9280         goto convert_statfs;
9281 #endif
9282 #ifdef TARGET_NR_statfs64
9283     case TARGET_NR_statfs64:
9284         if (!(p = lock_user_string(arg1))) {
9285             return -TARGET_EFAULT;
9286         }
9287         ret = get_errno(statfs(path(p), &stfs));
9288         unlock_user(p, arg1, 0);
9289     convert_statfs64:
9290         if (!is_error(ret)) {
9291             struct target_statfs64 *target_stfs;
9292 
9293             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9294                 return -TARGET_EFAULT;
9295             __put_user(stfs.f_type, &target_stfs->f_type);
9296             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9297             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9298             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9299             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9300             __put_user(stfs.f_files, &target_stfs->f_files);
9301             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9302             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9303             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9304             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9305             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9306             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9307             unlock_user_struct(target_stfs, arg3, 1);
9308         }
9309         return ret;
9310     case TARGET_NR_fstatfs64:
9311         ret = get_errno(fstatfs(arg1, &stfs));
9312         goto convert_statfs64;
9313 #endif
9314 #ifdef TARGET_NR_socketcall
9315     case TARGET_NR_socketcall:
9316         return do_socketcall(arg1, arg2);
9317 #endif
9318 #ifdef TARGET_NR_accept
9319     case TARGET_NR_accept:
9320         return do_accept4(arg1, arg2, arg3, 0);
9321 #endif
9322 #ifdef TARGET_NR_accept4
9323     case TARGET_NR_accept4:
9324         return do_accept4(arg1, arg2, arg3, arg4);
9325 #endif
9326 #ifdef TARGET_NR_bind
9327     case TARGET_NR_bind:
9328         return do_bind(arg1, arg2, arg3);
9329 #endif
9330 #ifdef TARGET_NR_connect
9331     case TARGET_NR_connect:
9332         return do_connect(arg1, arg2, arg3);
9333 #endif
9334 #ifdef TARGET_NR_getpeername
9335     case TARGET_NR_getpeername:
9336         return do_getpeername(arg1, arg2, arg3);
9337 #endif
9338 #ifdef TARGET_NR_getsockname
9339     case TARGET_NR_getsockname:
9340         return do_getsockname(arg1, arg2, arg3);
9341 #endif
9342 #ifdef TARGET_NR_getsockopt
9343     case TARGET_NR_getsockopt:
9344         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9345 #endif
9346 #ifdef TARGET_NR_listen
9347     case TARGET_NR_listen:
9348         return get_errno(listen(arg1, arg2));
9349 #endif
9350 #ifdef TARGET_NR_recv
9351     case TARGET_NR_recv:
9352         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9353 #endif
9354 #ifdef TARGET_NR_recvfrom
9355     case TARGET_NR_recvfrom:
9356         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9357 #endif
9358 #ifdef TARGET_NR_recvmsg
9359     case TARGET_NR_recvmsg:
9360         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9361 #endif
9362 #ifdef TARGET_NR_send
9363     case TARGET_NR_send:
9364         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9365 #endif
9366 #ifdef TARGET_NR_sendmsg
9367     case TARGET_NR_sendmsg:
9368         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9369 #endif
9370 #ifdef TARGET_NR_sendmmsg
9371     case TARGET_NR_sendmmsg:
9372         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9373 #endif
9374 #ifdef TARGET_NR_recvmmsg
9375     case TARGET_NR_recvmmsg:
9376         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9377 #endif
9378 #ifdef TARGET_NR_sendto
9379     case TARGET_NR_sendto:
9380         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9381 #endif
9382 #ifdef TARGET_NR_shutdown
9383     case TARGET_NR_shutdown:
9384         return get_errno(shutdown(arg1, arg2));
9385 #endif
9386 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9387     case TARGET_NR_getrandom:
9388         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9389         if (!p) {
9390             return -TARGET_EFAULT;
9391         }
9392         ret = get_errno(getrandom(p, arg2, arg3));
9393         unlock_user(p, arg1, ret);
9394         return ret;
9395 #endif
9396 #ifdef TARGET_NR_socket
9397     case TARGET_NR_socket:
9398         return do_socket(arg1, arg2, arg3);
9399 #endif
9400 #ifdef TARGET_NR_socketpair
9401     case TARGET_NR_socketpair:
9402         return do_socketpair(arg1, arg2, arg3, arg4);
9403 #endif
9404 #ifdef TARGET_NR_setsockopt
9405     case TARGET_NR_setsockopt:
9406         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9407 #endif
9408 #if defined(TARGET_NR_syslog)
9409     case TARGET_NR_syslog:
9410         {
9411             int len = arg2;
9412 
9413             switch (arg1) {
9414             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9415             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9416             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9417             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9418             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9419             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9420             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9421             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9422                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9423             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9424             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9425             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9426                 {
9427                     if (len < 0) {
9428                         return -TARGET_EINVAL;
9429                     }
9430                     if (len == 0) {
9431                         return 0;
9432                     }
9433                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9434                     if (!p) {
9435                         return -TARGET_EFAULT;
9436                     }
9437                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9438                     unlock_user(p, arg2, arg3);
9439                 }
9440                 return ret;
9441             default:
9442                 return -TARGET_EINVAL;
9443             }
9444         }
9445         break;
9446 #endif
9447     case TARGET_NR_setitimer:
9448         {
9449             struct itimerval value, ovalue, *pvalue;
9450 
9451             if (arg2) {
9452                 pvalue = &value;
9453                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9454                     || copy_from_user_timeval(&pvalue->it_value,
9455                                               arg2 + sizeof(struct target_timeval)))
9456                     return -TARGET_EFAULT;
9457             } else {
9458                 pvalue = NULL;
9459             }
9460             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9461             if (!is_error(ret) && arg3) {
9462                 if (copy_to_user_timeval(arg3,
9463                                          &ovalue.it_interval)
9464                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9465                                             &ovalue.it_value))
9466                     return -TARGET_EFAULT;
9467             }
9468         }
9469         return ret;
9470     case TARGET_NR_getitimer:
9471         {
9472             struct itimerval value;
9473 
9474             ret = get_errno(getitimer(arg1, &value));
9475             if (!is_error(ret) && arg2) {
9476                 if (copy_to_user_timeval(arg2,
9477                                          &value.it_interval)
9478                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9479                                             &value.it_value))
9480                     return -TARGET_EFAULT;
9481             }
9482         }
9483         return ret;
9484 #ifdef TARGET_NR_stat
9485     case TARGET_NR_stat:
9486         if (!(p = lock_user_string(arg1))) {
9487             return -TARGET_EFAULT;
9488         }
9489         ret = get_errno(stat(path(p), &st));
9490         unlock_user(p, arg1, 0);
9491         goto do_stat;
9492 #endif
9493 #ifdef TARGET_NR_lstat
9494     case TARGET_NR_lstat:
9495         if (!(p = lock_user_string(arg1))) {
9496             return -TARGET_EFAULT;
9497         }
9498         ret = get_errno(lstat(path(p), &st));
9499         unlock_user(p, arg1, 0);
9500         goto do_stat;
9501 #endif
9502 #ifdef TARGET_NR_fstat
9503     case TARGET_NR_fstat:
9504         {
9505             ret = get_errno(fstat(arg1, &st));
9506 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9507         do_stat:
9508 #endif
9509             if (!is_error(ret)) {
9510                 struct target_stat *target_st;
9511 
9512                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9513                     return -TARGET_EFAULT;
9514                 memset(target_st, 0, sizeof(*target_st));
9515                 __put_user(st.st_dev, &target_st->st_dev);
9516                 __put_user(st.st_ino, &target_st->st_ino);
9517                 __put_user(st.st_mode, &target_st->st_mode);
9518                 __put_user(st.st_uid, &target_st->st_uid);
9519                 __put_user(st.st_gid, &target_st->st_gid);
9520                 __put_user(st.st_nlink, &target_st->st_nlink);
9521                 __put_user(st.st_rdev, &target_st->st_rdev);
9522                 __put_user(st.st_size, &target_st->st_size);
9523                 __put_user(st.st_blksize, &target_st->st_blksize);
9524                 __put_user(st.st_blocks, &target_st->st_blocks);
9525                 __put_user(st.st_atime, &target_st->target_st_atime);
9526                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9527                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9528 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9529     defined(TARGET_STAT_HAVE_NSEC)
9530                 __put_user(st.st_atim.tv_nsec,
9531                            &target_st->target_st_atime_nsec);
9532                 __put_user(st.st_mtim.tv_nsec,
9533                            &target_st->target_st_mtime_nsec);
9534                 __put_user(st.st_ctim.tv_nsec,
9535                            &target_st->target_st_ctime_nsec);
9536 #endif
9537                 unlock_user_struct(target_st, arg2, 1);
9538             }
9539         }
9540         return ret;
9541 #endif
9542     case TARGET_NR_vhangup:
9543         return get_errno(vhangup());
9544 #ifdef TARGET_NR_syscall
9545     case TARGET_NR_syscall:
9546         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9547                           arg6, arg7, arg8, 0);
9548 #endif
9549 #if defined(TARGET_NR_wait4)
9550     case TARGET_NR_wait4:
9551         {
9552             int status;
9553             abi_long status_ptr = arg2;
9554             struct rusage rusage, *rusage_ptr;
9555             abi_ulong target_rusage = arg4;
9556             abi_long rusage_err;
9557             if (target_rusage)
9558                 rusage_ptr = &rusage;
9559             else
9560                 rusage_ptr = NULL;
9561             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9562             if (!is_error(ret)) {
9563                 if (status_ptr && ret) {
9564                     status = host_to_target_waitstatus(status);
9565                     if (put_user_s32(status, status_ptr))
9566                         return -TARGET_EFAULT;
9567                 }
9568                 if (target_rusage) {
9569                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9570                     if (rusage_err) {
9571                         ret = rusage_err;
9572                     }
9573                 }
9574             }
9575         }
9576         return ret;
9577 #endif
9578 #ifdef TARGET_NR_swapoff
9579     case TARGET_NR_swapoff:
9580         if (!(p = lock_user_string(arg1)))
9581             return -TARGET_EFAULT;
9582         ret = get_errno(swapoff(p));
9583         unlock_user(p, arg1, 0);
9584         return ret;
9585 #endif
9586     case TARGET_NR_sysinfo:
9587         {
9588             struct target_sysinfo *target_value;
9589             struct sysinfo value;
9590             ret = get_errno(sysinfo(&value));
9591             if (!is_error(ret) && arg1)
9592             {
9593                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9594                     return -TARGET_EFAULT;
9595                 __put_user(value.uptime, &target_value->uptime);
9596                 __put_user(value.loads[0], &target_value->loads[0]);
9597                 __put_user(value.loads[1], &target_value->loads[1]);
9598                 __put_user(value.loads[2], &target_value->loads[2]);
9599                 __put_user(value.totalram, &target_value->totalram);
9600                 __put_user(value.freeram, &target_value->freeram);
9601                 __put_user(value.sharedram, &target_value->sharedram);
9602                 __put_user(value.bufferram, &target_value->bufferram);
9603                 __put_user(value.totalswap, &target_value->totalswap);
9604                 __put_user(value.freeswap, &target_value->freeswap);
9605                 __put_user(value.procs, &target_value->procs);
9606                 __put_user(value.totalhigh, &target_value->totalhigh);
9607                 __put_user(value.freehigh, &target_value->freehigh);
9608                 __put_user(value.mem_unit, &target_value->mem_unit);
9609                 unlock_user_struct(target_value, arg1, 1);
9610             }
9611         }
9612         return ret;
9613 #ifdef TARGET_NR_ipc
9614     case TARGET_NR_ipc:
9615         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9616 #endif
9617 #ifdef TARGET_NR_semget
9618     case TARGET_NR_semget:
9619         return get_errno(semget(arg1, arg2, arg3));
9620 #endif
9621 #ifdef TARGET_NR_semop
9622     case TARGET_NR_semop:
9623         return do_semop(arg1, arg2, arg3);
9624 #endif
9625 #ifdef TARGET_NR_semctl
9626     case TARGET_NR_semctl:
9627         return do_semctl(arg1, arg2, arg3, arg4);
9628 #endif
9629 #ifdef TARGET_NR_msgctl
9630     case TARGET_NR_msgctl:
9631         return do_msgctl(arg1, arg2, arg3);
9632 #endif
9633 #ifdef TARGET_NR_msgget
9634     case TARGET_NR_msgget:
9635         return get_errno(msgget(arg1, arg2));
9636 #endif
9637 #ifdef TARGET_NR_msgrcv
9638     case TARGET_NR_msgrcv:
9639         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9640 #endif
9641 #ifdef TARGET_NR_msgsnd
9642     case TARGET_NR_msgsnd:
9643         return do_msgsnd(arg1, arg2, arg3, arg4);
9644 #endif
9645 #ifdef TARGET_NR_shmget
9646     case TARGET_NR_shmget:
9647         return get_errno(shmget(arg1, arg2, arg3));
9648 #endif
9649 #ifdef TARGET_NR_shmctl
9650     case TARGET_NR_shmctl:
9651         return do_shmctl(arg1, arg2, arg3);
9652 #endif
9653 #ifdef TARGET_NR_shmat
9654     case TARGET_NR_shmat:
9655         return do_shmat(cpu_env, arg1, arg2, arg3);
9656 #endif
9657 #ifdef TARGET_NR_shmdt
9658     case TARGET_NR_shmdt:
9659         return do_shmdt(arg1);
9660 #endif
9661     case TARGET_NR_fsync:
9662         return get_errno(fsync(arg1));
9663     case TARGET_NR_clone:
9664         /* Linux manages to have three different orderings for its
9665          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9666          * match the kernel's CONFIG_CLONE_* settings.
9667          * Microblaze is further special in that it uses a sixth
9668          * implicit argument to clone for the TLS pointer.
9669          */
9670 #if defined(TARGET_MICROBLAZE)
9671         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9672 #elif defined(TARGET_CLONE_BACKWARDS)
9673         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9674 #elif defined(TARGET_CLONE_BACKWARDS2)
9675         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9676 #else
9677         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9678 #endif
9679         return ret;
9680 #ifdef __NR_exit_group
9681         /* new thread calls */
9682     case TARGET_NR_exit_group:
9683         preexit_cleanup(cpu_env, arg1);
9684         return get_errno(exit_group(arg1));
9685 #endif
9686     case TARGET_NR_setdomainname:
9687         if (!(p = lock_user_string(arg1)))
9688             return -TARGET_EFAULT;
9689         ret = get_errno(setdomainname(p, arg2));
9690         unlock_user(p, arg1, 0);
9691         return ret;
9692     case TARGET_NR_uname:
9693         /* no need to transcode because we use the linux syscall */
9694         {
9695             struct new_utsname * buf;
9696 
9697             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9698                 return -TARGET_EFAULT;
9699             ret = get_errno(sys_uname(buf));
9700             if (!is_error(ret)) {
9701                 /* Overwrite the native machine name with whatever is being
9702                    emulated. */
9703                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9704                           sizeof(buf->machine));
9705                 /* Allow the user to override the reported release.  */
9706                 if (qemu_uname_release && *qemu_uname_release) {
9707                     g_strlcpy(buf->release, qemu_uname_release,
9708                               sizeof(buf->release));
9709                 }
9710             }
9711             unlock_user_struct(buf, arg1, 1);
9712         }
9713         return ret;
9714 #ifdef TARGET_I386
9715     case TARGET_NR_modify_ldt:
9716         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9717 #if !defined(TARGET_X86_64)
9718     case TARGET_NR_vm86:
9719         return do_vm86(cpu_env, arg1, arg2);
9720 #endif
9721 #endif
9722 #if defined(TARGET_NR_adjtimex)
9723     case TARGET_NR_adjtimex:
9724         {
9725             struct timex host_buf;
9726 
9727             if (target_to_host_timex(&host_buf, arg1) != 0) {
9728                 return -TARGET_EFAULT;
9729             }
9730             ret = get_errno(adjtimex(&host_buf));
9731             if (!is_error(ret)) {
9732                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9733                     return -TARGET_EFAULT;
9734                 }
9735             }
9736         }
9737         return ret;
9738 #endif
9739 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9740     case TARGET_NR_clock_adjtime:
9741         {
9742             struct timex htx, *phtx = &htx;
9743 
9744             if (target_to_host_timex(phtx, arg2) != 0) {
9745                 return -TARGET_EFAULT;
9746             }
9747             ret = get_errno(clock_adjtime(arg1, phtx));
9748             if (!is_error(ret) && phtx) {
9749                 if (host_to_target_timex(arg2, phtx) != 0) {
9750                     return -TARGET_EFAULT;
9751                 }
9752             }
9753         }
9754         return ret;
9755 #endif
9756     case TARGET_NR_getpgid:
9757         return get_errno(getpgid(arg1));
9758     case TARGET_NR_fchdir:
9759         return get_errno(fchdir(arg1));
9760     case TARGET_NR_personality:
9761         return get_errno(personality(arg1));
9762 #ifdef TARGET_NR__llseek /* Not on alpha */
9763     case TARGET_NR__llseek:
9764         {
9765             int64_t res;
9766 #if !defined(__NR_llseek)
9767             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9768             if (res == -1) {
9769                 ret = get_errno(res);
9770             } else {
9771                 ret = 0;
9772             }
9773 #else
9774             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9775 #endif
9776             if ((ret == 0) && put_user_s64(res, arg4)) {
9777                 return -TARGET_EFAULT;
9778             }
9779         }
9780         return ret;
9781 #endif
9782 #ifdef TARGET_NR_getdents
9783     case TARGET_NR_getdents:
9784 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9785 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9786         {
9787             struct target_dirent *target_dirp;
9788             struct linux_dirent *dirp;
9789             abi_long count = arg3;
9790 
9791             dirp = g_try_malloc(count);
9792             if (!dirp) {
9793                 return -TARGET_ENOMEM;
9794             }
9795 
9796             ret = get_errno(sys_getdents(arg1, dirp, count));
9797             if (!is_error(ret)) {
9798                 struct linux_dirent *de;
9799 		struct target_dirent *tde;
9800                 int len = ret;
9801                 int reclen, treclen;
9802 		int count1, tnamelen;
9803 
9804 		count1 = 0;
9805                 de = dirp;
9806                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9807                     return -TARGET_EFAULT;
9808 		tde = target_dirp;
9809                 while (len > 0) {
9810                     reclen = de->d_reclen;
9811                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9812                     assert(tnamelen >= 0);
9813                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9814                     assert(count1 + treclen <= count);
9815                     tde->d_reclen = tswap16(treclen);
9816                     tde->d_ino = tswapal(de->d_ino);
9817                     tde->d_off = tswapal(de->d_off);
9818                     memcpy(tde->d_name, de->d_name, tnamelen);
9819                     de = (struct linux_dirent *)((char *)de + reclen);
9820                     len -= reclen;
9821                     tde = (struct target_dirent *)((char *)tde + treclen);
9822 		    count1 += treclen;
9823                 }
9824 		ret = count1;
9825                 unlock_user(target_dirp, arg2, ret);
9826             }
9827             g_free(dirp);
9828         }
9829 #else
9830         {
9831             struct linux_dirent *dirp;
9832             abi_long count = arg3;
9833 
9834             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9835                 return -TARGET_EFAULT;
9836             ret = get_errno(sys_getdents(arg1, dirp, count));
9837             if (!is_error(ret)) {
9838                 struct linux_dirent *de;
9839                 int len = ret;
9840                 int reclen;
9841                 de = dirp;
9842                 while (len > 0) {
9843                     reclen = de->d_reclen;
9844                     if (reclen > len)
9845                         break;
9846                     de->d_reclen = tswap16(reclen);
9847                     tswapls(&de->d_ino);
9848                     tswapls(&de->d_off);
9849                     de = (struct linux_dirent *)((char *)de + reclen);
9850                     len -= reclen;
9851                 }
9852             }
9853             unlock_user(dirp, arg2, ret);
9854         }
9855 #endif
9856 #else
9857         /* Implement getdents in terms of getdents64 */
9858         {
9859             struct linux_dirent64 *dirp;
9860             abi_long count = arg3;
9861 
9862             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9863             if (!dirp) {
9864                 return -TARGET_EFAULT;
9865             }
9866             ret = get_errno(sys_getdents64(arg1, dirp, count));
9867             if (!is_error(ret)) {
9868                 /* Convert the dirent64 structs to target dirent.  We do this
9869                  * in-place, since we can guarantee that a target_dirent is no
9870                  * larger than a dirent64; however this means we have to be
9871                  * careful to read everything before writing in the new format.
9872                  */
9873                 struct linux_dirent64 *de;
9874                 struct target_dirent *tde;
9875                 int len = ret;
9876                 int tlen = 0;
9877 
9878                 de = dirp;
9879                 tde = (struct target_dirent *)dirp;
9880                 while (len > 0) {
9881                     int namelen, treclen;
9882                     int reclen = de->d_reclen;
9883                     uint64_t ino = de->d_ino;
9884                     int64_t off = de->d_off;
9885                     uint8_t type = de->d_type;
9886 
9887                     namelen = strlen(de->d_name);
9888                     treclen = offsetof(struct target_dirent, d_name)
9889                         + namelen + 2;
9890                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9891 
9892                     memmove(tde->d_name, de->d_name, namelen + 1);
9893                     tde->d_ino = tswapal(ino);
9894                     tde->d_off = tswapal(off);
9895                     tde->d_reclen = tswap16(treclen);
9896                     /* The target_dirent type is in what was formerly a padding
9897                      * byte at the end of the structure:
9898                      */
9899                     *(((char *)tde) + treclen - 1) = type;
9900 
9901                     de = (struct linux_dirent64 *)((char *)de + reclen);
9902                     tde = (struct target_dirent *)((char *)tde + treclen);
9903                     len -= reclen;
9904                     tlen += treclen;
9905                 }
9906                 ret = tlen;
9907             }
9908             unlock_user(dirp, arg2, ret);
9909         }
9910 #endif
9911         return ret;
9912 #endif /* TARGET_NR_getdents */
9913 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9914     case TARGET_NR_getdents64:
9915         {
9916             struct linux_dirent64 *dirp;
9917             abi_long count = arg3;
9918             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9919                 return -TARGET_EFAULT;
9920             ret = get_errno(sys_getdents64(arg1, dirp, count));
9921             if (!is_error(ret)) {
9922                 struct linux_dirent64 *de;
9923                 int len = ret;
9924                 int reclen;
9925                 de = dirp;
9926                 while (len > 0) {
9927                     reclen = de->d_reclen;
9928                     if (reclen > len)
9929                         break;
9930                     de->d_reclen = tswap16(reclen);
9931                     tswap64s((uint64_t *)&de->d_ino);
9932                     tswap64s((uint64_t *)&de->d_off);
9933                     de = (struct linux_dirent64 *)((char *)de + reclen);
9934                     len -= reclen;
9935                 }
9936             }
9937             unlock_user(dirp, arg2, ret);
9938         }
9939         return ret;
9940 #endif /* TARGET_NR_getdents64 */
9941 #if defined(TARGET_NR__newselect)
9942     case TARGET_NR__newselect:
9943         return do_select(arg1, arg2, arg3, arg4, arg5);
9944 #endif
9945 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9946 # ifdef TARGET_NR_poll
9947     case TARGET_NR_poll:
9948 # endif
9949 # ifdef TARGET_NR_ppoll
9950     case TARGET_NR_ppoll:
9951 # endif
9952         {
9953             struct target_pollfd *target_pfd;
9954             unsigned int nfds = arg2;
9955             struct pollfd *pfd;
9956             unsigned int i;
9957 
9958             pfd = NULL;
9959             target_pfd = NULL;
9960             if (nfds) {
9961                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9962                     return -TARGET_EINVAL;
9963                 }
9964 
9965                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9966                                        sizeof(struct target_pollfd) * nfds, 1);
9967                 if (!target_pfd) {
9968                     return -TARGET_EFAULT;
9969                 }
9970 
9971                 pfd = alloca(sizeof(struct pollfd) * nfds);
9972                 for (i = 0; i < nfds; i++) {
9973                     pfd[i].fd = tswap32(target_pfd[i].fd);
9974                     pfd[i].events = tswap16(target_pfd[i].events);
9975                 }
9976             }
9977 
9978             switch (num) {
9979 # ifdef TARGET_NR_ppoll
9980             case TARGET_NR_ppoll:
9981             {
9982                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9983                 target_sigset_t *target_set;
9984                 sigset_t _set, *set = &_set;
9985 
9986                 if (arg3) {
9987                     if (target_to_host_timespec(timeout_ts, arg3)) {
9988                         unlock_user(target_pfd, arg1, 0);
9989                         return -TARGET_EFAULT;
9990                     }
9991                 } else {
9992                     timeout_ts = NULL;
9993                 }
9994 
9995                 if (arg4) {
9996                     if (arg5 != sizeof(target_sigset_t)) {
9997                         unlock_user(target_pfd, arg1, 0);
9998                         return -TARGET_EINVAL;
9999                     }
10000 
10001                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10002                     if (!target_set) {
10003                         unlock_user(target_pfd, arg1, 0);
10004                         return -TARGET_EFAULT;
10005                     }
10006                     target_to_host_sigset(set, target_set);
10007                 } else {
10008                     set = NULL;
10009                 }
10010 
10011                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10012                                            set, SIGSET_T_SIZE));
10013 
10014                 if (!is_error(ret) && arg3) {
10015                     host_to_target_timespec(arg3, timeout_ts);
10016                 }
10017                 if (arg4) {
10018                     unlock_user(target_set, arg4, 0);
10019                 }
10020                 break;
10021             }
10022 # endif
10023 # ifdef TARGET_NR_poll
10024             case TARGET_NR_poll:
10025             {
10026                 struct timespec ts, *pts;
10027 
10028                 if (arg3 >= 0) {
10029                     /* Convert ms to secs, ns */
10030                     ts.tv_sec = arg3 / 1000;
10031                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10032                     pts = &ts;
10033                 } else {
10034                     /* -ve poll() timeout means "infinite" */
10035                     pts = NULL;
10036                 }
10037                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10038                 break;
10039             }
10040 # endif
10041             default:
10042                 g_assert_not_reached();
10043             }
10044 
10045             if (!is_error(ret)) {
10046                 for(i = 0; i < nfds; i++) {
10047                     target_pfd[i].revents = tswap16(pfd[i].revents);
10048                 }
10049             }
10050             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10051         }
10052         return ret;
10053 #endif
10054     case TARGET_NR_flock:
10055         /* NOTE: the flock constant seems to be the same for every
10056            Linux platform */
10057         return get_errno(safe_flock(arg1, arg2));
10058     case TARGET_NR_readv:
10059         {
10060             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10061             if (vec != NULL) {
10062                 ret = get_errno(safe_readv(arg1, vec, arg3));
10063                 unlock_iovec(vec, arg2, arg3, 1);
10064             } else {
10065                 ret = -host_to_target_errno(errno);
10066             }
10067         }
10068         return ret;
10069     case TARGET_NR_writev:
10070         {
10071             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10072             if (vec != NULL) {
10073                 ret = get_errno(safe_writev(arg1, vec, arg3));
10074                 unlock_iovec(vec, arg2, arg3, 0);
10075             } else {
10076                 ret = -host_to_target_errno(errno);
10077             }
10078         }
10079         return ret;
10080 #if defined(TARGET_NR_preadv)
10081     case TARGET_NR_preadv:
10082         {
10083             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10084             if (vec != NULL) {
10085                 unsigned long low, high;
10086 
10087                 target_to_host_low_high(arg4, arg5, &low, &high);
10088                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10089                 unlock_iovec(vec, arg2, arg3, 1);
10090             } else {
10091                 ret = -host_to_target_errno(errno);
10092            }
10093         }
10094         return ret;
10095 #endif
10096 #if defined(TARGET_NR_pwritev)
10097     case TARGET_NR_pwritev:
10098         {
10099             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10100             if (vec != NULL) {
10101                 unsigned long low, high;
10102 
10103                 target_to_host_low_high(arg4, arg5, &low, &high);
10104                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10105                 unlock_iovec(vec, arg2, arg3, 0);
10106             } else {
10107                 ret = -host_to_target_errno(errno);
10108            }
10109         }
10110         return ret;
10111 #endif
10112     case TARGET_NR_getsid:
10113         return get_errno(getsid(arg1));
10114 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10115     case TARGET_NR_fdatasync:
10116         return get_errno(fdatasync(arg1));
10117 #endif
10118 #ifdef TARGET_NR__sysctl
10119     case TARGET_NR__sysctl:
10120         /* We don't implement this, but ENOTDIR is always a safe
10121            return value. */
10122         return -TARGET_ENOTDIR;
10123 #endif
10124     case TARGET_NR_sched_getaffinity:
10125         {
10126             unsigned int mask_size;
10127             unsigned long *mask;
10128 
10129             /*
10130              * sched_getaffinity needs multiples of ulong, so need to take
10131              * care of mismatches between target ulong and host ulong sizes.
10132              */
10133             if (arg2 & (sizeof(abi_ulong) - 1)) {
10134                 return -TARGET_EINVAL;
10135             }
10136             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10137 
10138             mask = alloca(mask_size);
10139             memset(mask, 0, mask_size);
10140             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10141 
10142             if (!is_error(ret)) {
10143                 if (ret > arg2) {
10144                     /* More data returned than the caller's buffer will fit.
10145                      * This only happens if sizeof(abi_long) < sizeof(long)
10146                      * and the caller passed us a buffer holding an odd number
10147                      * of abi_longs. If the host kernel is actually using the
10148                      * extra 4 bytes then fail EINVAL; otherwise we can just
10149                      * ignore them and only copy the interesting part.
10150                      */
10151                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10152                     if (numcpus > arg2 * 8) {
10153                         return -TARGET_EINVAL;
10154                     }
10155                     ret = arg2;
10156                 }
10157 
10158                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10159                     return -TARGET_EFAULT;
10160                 }
10161             }
10162         }
10163         return ret;
10164     case TARGET_NR_sched_setaffinity:
10165         {
10166             unsigned int mask_size;
10167             unsigned long *mask;
10168 
10169             /*
10170              * sched_setaffinity needs multiples of ulong, so need to take
10171              * care of mismatches between target ulong and host ulong sizes.
10172              */
10173             if (arg2 & (sizeof(abi_ulong) - 1)) {
10174                 return -TARGET_EINVAL;
10175             }
10176             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10177             mask = alloca(mask_size);
10178 
10179             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10180             if (ret) {
10181                 return ret;
10182             }
10183 
10184             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10185         }
10186     case TARGET_NR_getcpu:
10187         {
10188             unsigned cpu, node;
10189             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10190                                        arg2 ? &node : NULL,
10191                                        NULL));
10192             if (is_error(ret)) {
10193                 return ret;
10194             }
10195             if (arg1 && put_user_u32(cpu, arg1)) {
10196                 return -TARGET_EFAULT;
10197             }
10198             if (arg2 && put_user_u32(node, arg2)) {
10199                 return -TARGET_EFAULT;
10200             }
10201         }
10202         return ret;
10203     case TARGET_NR_sched_setparam:
10204         {
10205             struct sched_param *target_schp;
10206             struct sched_param schp;
10207 
10208             if (arg2 == 0) {
10209                 return -TARGET_EINVAL;
10210             }
10211             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10212                 return -TARGET_EFAULT;
10213             schp.sched_priority = tswap32(target_schp->sched_priority);
10214             unlock_user_struct(target_schp, arg2, 0);
10215             return get_errno(sched_setparam(arg1, &schp));
10216         }
10217     case TARGET_NR_sched_getparam:
10218         {
10219             struct sched_param *target_schp;
10220             struct sched_param schp;
10221 
10222             if (arg2 == 0) {
10223                 return -TARGET_EINVAL;
10224             }
10225             ret = get_errno(sched_getparam(arg1, &schp));
10226             if (!is_error(ret)) {
10227                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10228                     return -TARGET_EFAULT;
10229                 target_schp->sched_priority = tswap32(schp.sched_priority);
10230                 unlock_user_struct(target_schp, arg2, 1);
10231             }
10232         }
10233         return ret;
10234     case TARGET_NR_sched_setscheduler:
10235         {
10236             struct sched_param *target_schp;
10237             struct sched_param schp;
10238             if (arg3 == 0) {
10239                 return -TARGET_EINVAL;
10240             }
10241             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10242                 return -TARGET_EFAULT;
10243             schp.sched_priority = tswap32(target_schp->sched_priority);
10244             unlock_user_struct(target_schp, arg3, 0);
10245             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10246         }
10247     case TARGET_NR_sched_getscheduler:
10248         return get_errno(sched_getscheduler(arg1));
10249     case TARGET_NR_sched_yield:
10250         return get_errno(sched_yield());
10251     case TARGET_NR_sched_get_priority_max:
10252         return get_errno(sched_get_priority_max(arg1));
10253     case TARGET_NR_sched_get_priority_min:
10254         return get_errno(sched_get_priority_min(arg1));
10255 #ifdef TARGET_NR_sched_rr_get_interval
10256     case TARGET_NR_sched_rr_get_interval:
10257         {
10258             struct timespec ts;
10259             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10260             if (!is_error(ret)) {
10261                 ret = host_to_target_timespec(arg2, &ts);
10262             }
10263         }
10264         return ret;
10265 #endif
10266 #if defined(TARGET_NR_nanosleep)
10267     case TARGET_NR_nanosleep:
10268         {
10269             struct timespec req, rem;
10270             target_to_host_timespec(&req, arg1);
10271             ret = get_errno(safe_nanosleep(&req, &rem));
10272             if (is_error(ret) && arg2) {
10273                 host_to_target_timespec(arg2, &rem);
10274             }
10275         }
10276         return ret;
10277 #endif
10278     case TARGET_NR_prctl:
10279         switch (arg1) {
10280         case PR_GET_PDEATHSIG:
10281         {
10282             int deathsig;
10283             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10284             if (!is_error(ret) && arg2
10285                 && put_user_ual(deathsig, arg2)) {
10286                 return -TARGET_EFAULT;
10287             }
10288             return ret;
10289         }
10290 #ifdef PR_GET_NAME
10291         case PR_GET_NAME:
10292         {
10293             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10294             if (!name) {
10295                 return -TARGET_EFAULT;
10296             }
10297             ret = get_errno(prctl(arg1, (unsigned long)name,
10298                                   arg3, arg4, arg5));
10299             unlock_user(name, arg2, 16);
10300             return ret;
10301         }
10302         case PR_SET_NAME:
10303         {
10304             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10305             if (!name) {
10306                 return -TARGET_EFAULT;
10307             }
10308             ret = get_errno(prctl(arg1, (unsigned long)name,
10309                                   arg3, arg4, arg5));
10310             unlock_user(name, arg2, 0);
10311             return ret;
10312         }
10313 #endif
10314 #ifdef TARGET_MIPS
10315         case TARGET_PR_GET_FP_MODE:
10316         {
10317             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10318             ret = 0;
10319             if (env->CP0_Status & (1 << CP0St_FR)) {
10320                 ret |= TARGET_PR_FP_MODE_FR;
10321             }
10322             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10323                 ret |= TARGET_PR_FP_MODE_FRE;
10324             }
10325             return ret;
10326         }
10327         case TARGET_PR_SET_FP_MODE:
10328         {
10329             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10330             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10331             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10332             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10333             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10334 
10335             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10336                                             TARGET_PR_FP_MODE_FRE;
10337 
10338             /* If nothing to change, return right away, successfully.  */
10339             if (old_fr == new_fr && old_fre == new_fre) {
10340                 return 0;
10341             }
10342             /* Check the value is valid */
10343             if (arg2 & ~known_bits) {
10344                 return -TARGET_EOPNOTSUPP;
10345             }
10346             /* Setting FRE without FR is not supported.  */
10347             if (new_fre && !new_fr) {
10348                 return -TARGET_EOPNOTSUPP;
10349             }
10350             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10351                 /* FR1 is not supported */
10352                 return -TARGET_EOPNOTSUPP;
10353             }
10354             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10355                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10356                 /* cannot set FR=0 */
10357                 return -TARGET_EOPNOTSUPP;
10358             }
10359             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10360                 /* Cannot set FRE=1 */
10361                 return -TARGET_EOPNOTSUPP;
10362             }
10363 
10364             int i;
10365             fpr_t *fpr = env->active_fpu.fpr;
10366             for (i = 0; i < 32 ; i += 2) {
10367                 if (!old_fr && new_fr) {
10368                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10369                 } else if (old_fr && !new_fr) {
10370                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10371                 }
10372             }
10373 
10374             if (new_fr) {
10375                 env->CP0_Status |= (1 << CP0St_FR);
10376                 env->hflags |= MIPS_HFLAG_F64;
10377             } else {
10378                 env->CP0_Status &= ~(1 << CP0St_FR);
10379                 env->hflags &= ~MIPS_HFLAG_F64;
10380             }
10381             if (new_fre) {
10382                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10383                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10384                     env->hflags |= MIPS_HFLAG_FRE;
10385                 }
10386             } else {
10387                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10388                 env->hflags &= ~MIPS_HFLAG_FRE;
10389             }
10390 
10391             return 0;
10392         }
10393 #endif /* MIPS */
10394 #ifdef TARGET_AARCH64
10395         case TARGET_PR_SVE_SET_VL:
10396             /*
10397              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10398              * PR_SVE_VL_INHERIT.  Note the kernel definition
10399              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10400              * even though the current architectural maximum is VQ=16.
10401              */
10402             ret = -TARGET_EINVAL;
10403             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10404                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10405                 CPUARMState *env = cpu_env;
10406                 ARMCPU *cpu = env_archcpu(env);
10407                 uint32_t vq, old_vq;
10408 
10409                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10410                 vq = MAX(arg2 / 16, 1);
10411                 vq = MIN(vq, cpu->sve_max_vq);
10412 
10413                 if (vq < old_vq) {
10414                     aarch64_sve_narrow_vq(env, vq);
10415                 }
10416                 env->vfp.zcr_el[1] = vq - 1;
10417                 arm_rebuild_hflags(env);
10418                 ret = vq * 16;
10419             }
10420             return ret;
10421         case TARGET_PR_SVE_GET_VL:
10422             ret = -TARGET_EINVAL;
10423             {
10424                 ARMCPU *cpu = env_archcpu(cpu_env);
10425                 if (cpu_isar_feature(aa64_sve, cpu)) {
10426                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10427                 }
10428             }
10429             return ret;
10430         case TARGET_PR_PAC_RESET_KEYS:
10431             {
10432                 CPUARMState *env = cpu_env;
10433                 ARMCPU *cpu = env_archcpu(env);
10434 
10435                 if (arg3 || arg4 || arg5) {
10436                     return -TARGET_EINVAL;
10437                 }
10438                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10439                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10440                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10441                                TARGET_PR_PAC_APGAKEY);
10442                     int ret = 0;
10443                     Error *err = NULL;
10444 
10445                     if (arg2 == 0) {
10446                         arg2 = all;
10447                     } else if (arg2 & ~all) {
10448                         return -TARGET_EINVAL;
10449                     }
10450                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10451                         ret |= qemu_guest_getrandom(&env->keys.apia,
10452                                                     sizeof(ARMPACKey), &err);
10453                     }
10454                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10455                         ret |= qemu_guest_getrandom(&env->keys.apib,
10456                                                     sizeof(ARMPACKey), &err);
10457                     }
10458                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10459                         ret |= qemu_guest_getrandom(&env->keys.apda,
10460                                                     sizeof(ARMPACKey), &err);
10461                     }
10462                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10463                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10464                                                     sizeof(ARMPACKey), &err);
10465                     }
10466                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10467                         ret |= qemu_guest_getrandom(&env->keys.apga,
10468                                                     sizeof(ARMPACKey), &err);
10469                     }
10470                     if (ret != 0) {
10471                         /*
10472                          * Some unknown failure in the crypto.  The best
10473                          * we can do is log it and fail the syscall.
10474                          * The real syscall cannot fail this way.
10475                          */
10476                         qemu_log_mask(LOG_UNIMP,
10477                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10478                                       error_get_pretty(err));
10479                         error_free(err);
10480                         return -TARGET_EIO;
10481                     }
10482                     return 0;
10483                 }
10484             }
10485             return -TARGET_EINVAL;
10486 #endif /* AARCH64 */
10487         case PR_GET_SECCOMP:
10488         case PR_SET_SECCOMP:
10489             /* Disable seccomp to prevent the target disabling syscalls we
10490              * need. */
10491             return -TARGET_EINVAL;
10492         default:
10493             /* Most prctl options have no pointer arguments */
10494             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10495         }
10496         break;
10497 #ifdef TARGET_NR_arch_prctl
10498     case TARGET_NR_arch_prctl:
10499         return do_arch_prctl(cpu_env, arg1, arg2);
10500 #endif
10501 #ifdef TARGET_NR_pread64
10502     case TARGET_NR_pread64:
10503         if (regpairs_aligned(cpu_env, num)) {
10504             arg4 = arg5;
10505             arg5 = arg6;
10506         }
10507         if (arg2 == 0 && arg3 == 0) {
10508             /* Special-case NULL buffer and zero length, which should succeed */
10509             p = 0;
10510         } else {
10511             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10512             if (!p) {
10513                 return -TARGET_EFAULT;
10514             }
10515         }
10516         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10517         unlock_user(p, arg2, ret);
10518         return ret;
10519     case TARGET_NR_pwrite64:
10520         if (regpairs_aligned(cpu_env, num)) {
10521             arg4 = arg5;
10522             arg5 = arg6;
10523         }
10524         if (arg2 == 0 && arg3 == 0) {
10525             /* Special-case NULL buffer and zero length, which should succeed */
10526             p = 0;
10527         } else {
10528             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10529             if (!p) {
10530                 return -TARGET_EFAULT;
10531             }
10532         }
10533         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10534         unlock_user(p, arg2, 0);
10535         return ret;
10536 #endif
10537     case TARGET_NR_getcwd:
10538         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10539             return -TARGET_EFAULT;
10540         ret = get_errno(sys_getcwd1(p, arg2));
10541         unlock_user(p, arg1, ret);
10542         return ret;
10543     case TARGET_NR_capget:
10544     case TARGET_NR_capset:
10545     {
10546         struct target_user_cap_header *target_header;
10547         struct target_user_cap_data *target_data = NULL;
10548         struct __user_cap_header_struct header;
10549         struct __user_cap_data_struct data[2];
10550         struct __user_cap_data_struct *dataptr = NULL;
10551         int i, target_datalen;
10552         int data_items = 1;
10553 
10554         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10555             return -TARGET_EFAULT;
10556         }
10557         header.version = tswap32(target_header->version);
10558         header.pid = tswap32(target_header->pid);
10559 
10560         if (header.version != _LINUX_CAPABILITY_VERSION) {
10561             /* Version 2 and up takes pointer to two user_data structs */
10562             data_items = 2;
10563         }
10564 
10565         target_datalen = sizeof(*target_data) * data_items;
10566 
10567         if (arg2) {
10568             if (num == TARGET_NR_capget) {
10569                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10570             } else {
10571                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10572             }
10573             if (!target_data) {
10574                 unlock_user_struct(target_header, arg1, 0);
10575                 return -TARGET_EFAULT;
10576             }
10577 
10578             if (num == TARGET_NR_capset) {
10579                 for (i = 0; i < data_items; i++) {
10580                     data[i].effective = tswap32(target_data[i].effective);
10581                     data[i].permitted = tswap32(target_data[i].permitted);
10582                     data[i].inheritable = tswap32(target_data[i].inheritable);
10583                 }
10584             }
10585 
10586             dataptr = data;
10587         }
10588 
10589         if (num == TARGET_NR_capget) {
10590             ret = get_errno(capget(&header, dataptr));
10591         } else {
10592             ret = get_errno(capset(&header, dataptr));
10593         }
10594 
10595         /* The kernel always updates version for both capget and capset */
10596         target_header->version = tswap32(header.version);
10597         unlock_user_struct(target_header, arg1, 1);
10598 
10599         if (arg2) {
10600             if (num == TARGET_NR_capget) {
10601                 for (i = 0; i < data_items; i++) {
10602                     target_data[i].effective = tswap32(data[i].effective);
10603                     target_data[i].permitted = tswap32(data[i].permitted);
10604                     target_data[i].inheritable = tswap32(data[i].inheritable);
10605                 }
10606                 unlock_user(target_data, arg2, target_datalen);
10607             } else {
10608                 unlock_user(target_data, arg2, 0);
10609             }
10610         }
10611         return ret;
10612     }
10613     case TARGET_NR_sigaltstack:
10614         return do_sigaltstack(arg1, arg2,
10615                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10616 
10617 #ifdef CONFIG_SENDFILE
10618 #ifdef TARGET_NR_sendfile
10619     case TARGET_NR_sendfile:
10620     {
10621         off_t *offp = NULL;
10622         off_t off;
10623         if (arg3) {
10624             ret = get_user_sal(off, arg3);
10625             if (is_error(ret)) {
10626                 return ret;
10627             }
10628             offp = &off;
10629         }
10630         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10631         if (!is_error(ret) && arg3) {
10632             abi_long ret2 = put_user_sal(off, arg3);
10633             if (is_error(ret2)) {
10634                 ret = ret2;
10635             }
10636         }
10637         return ret;
10638     }
10639 #endif
10640 #ifdef TARGET_NR_sendfile64
10641     case TARGET_NR_sendfile64:
10642     {
10643         off_t *offp = NULL;
10644         off_t off;
10645         if (arg3) {
10646             ret = get_user_s64(off, arg3);
10647             if (is_error(ret)) {
10648                 return ret;
10649             }
10650             offp = &off;
10651         }
10652         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10653         if (!is_error(ret) && arg3) {
10654             abi_long ret2 = put_user_s64(off, arg3);
10655             if (is_error(ret2)) {
10656                 ret = ret2;
10657             }
10658         }
10659         return ret;
10660     }
10661 #endif
10662 #endif
10663 #ifdef TARGET_NR_vfork
10664     case TARGET_NR_vfork:
10665         return get_errno(do_fork(cpu_env,
10666                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10667                          0, 0, 0, 0));
10668 #endif
10669 #ifdef TARGET_NR_ugetrlimit
10670     case TARGET_NR_ugetrlimit:
10671     {
10672 	struct rlimit rlim;
10673 	int resource = target_to_host_resource(arg1);
10674 	ret = get_errno(getrlimit(resource, &rlim));
10675 	if (!is_error(ret)) {
10676 	    struct target_rlimit *target_rlim;
10677             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10678                 return -TARGET_EFAULT;
10679 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10680 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10681             unlock_user_struct(target_rlim, arg2, 1);
10682 	}
10683         return ret;
10684     }
10685 #endif
10686 #ifdef TARGET_NR_truncate64
10687     case TARGET_NR_truncate64:
10688         if (!(p = lock_user_string(arg1)))
10689             return -TARGET_EFAULT;
10690 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10691         unlock_user(p, arg1, 0);
10692         return ret;
10693 #endif
10694 #ifdef TARGET_NR_ftruncate64
10695     case TARGET_NR_ftruncate64:
10696         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10697 #endif
10698 #ifdef TARGET_NR_stat64
10699     case TARGET_NR_stat64:
10700         if (!(p = lock_user_string(arg1))) {
10701             return -TARGET_EFAULT;
10702         }
10703         ret = get_errno(stat(path(p), &st));
10704         unlock_user(p, arg1, 0);
10705         if (!is_error(ret))
10706             ret = host_to_target_stat64(cpu_env, arg2, &st);
10707         return ret;
10708 #endif
10709 #ifdef TARGET_NR_lstat64
10710     case TARGET_NR_lstat64:
10711         if (!(p = lock_user_string(arg1))) {
10712             return -TARGET_EFAULT;
10713         }
10714         ret = get_errno(lstat(path(p), &st));
10715         unlock_user(p, arg1, 0);
10716         if (!is_error(ret))
10717             ret = host_to_target_stat64(cpu_env, arg2, &st);
10718         return ret;
10719 #endif
10720 #ifdef TARGET_NR_fstat64
10721     case TARGET_NR_fstat64:
10722         ret = get_errno(fstat(arg1, &st));
10723         if (!is_error(ret))
10724             ret = host_to_target_stat64(cpu_env, arg2, &st);
10725         return ret;
10726 #endif
10727 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10728 #ifdef TARGET_NR_fstatat64
10729     case TARGET_NR_fstatat64:
10730 #endif
10731 #ifdef TARGET_NR_newfstatat
10732     case TARGET_NR_newfstatat:
10733 #endif
10734         if (!(p = lock_user_string(arg2))) {
10735             return -TARGET_EFAULT;
10736         }
10737         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10738         unlock_user(p, arg2, 0);
10739         if (!is_error(ret))
10740             ret = host_to_target_stat64(cpu_env, arg3, &st);
10741         return ret;
10742 #endif
10743 #if defined(TARGET_NR_statx)
10744     case TARGET_NR_statx:
10745         {
10746             struct target_statx *target_stx;
10747             int dirfd = arg1;
10748             int flags = arg3;
10749 
10750             p = lock_user_string(arg2);
10751             if (p == NULL) {
10752                 return -TARGET_EFAULT;
10753             }
10754 #if defined(__NR_statx)
10755             {
10756                 /*
10757                  * It is assumed that struct statx is architecture independent.
10758                  */
10759                 struct target_statx host_stx;
10760                 int mask = arg4;
10761 
10762                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10763                 if (!is_error(ret)) {
10764                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10765                         unlock_user(p, arg2, 0);
10766                         return -TARGET_EFAULT;
10767                     }
10768                 }
10769 
10770                 if (ret != -TARGET_ENOSYS) {
10771                     unlock_user(p, arg2, 0);
10772                     return ret;
10773                 }
10774             }
10775 #endif
10776             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10777             unlock_user(p, arg2, 0);
10778 
10779             if (!is_error(ret)) {
10780                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10781                     return -TARGET_EFAULT;
10782                 }
10783                 memset(target_stx, 0, sizeof(*target_stx));
10784                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10785                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10786                 __put_user(st.st_ino, &target_stx->stx_ino);
10787                 __put_user(st.st_mode, &target_stx->stx_mode);
10788                 __put_user(st.st_uid, &target_stx->stx_uid);
10789                 __put_user(st.st_gid, &target_stx->stx_gid);
10790                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10791                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10792                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10793                 __put_user(st.st_size, &target_stx->stx_size);
10794                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10795                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10796                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10797                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10798                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10799                 unlock_user_struct(target_stx, arg5, 1);
10800             }
10801         }
10802         return ret;
10803 #endif
10804 #ifdef TARGET_NR_lchown
10805     case TARGET_NR_lchown:
10806         if (!(p = lock_user_string(arg1)))
10807             return -TARGET_EFAULT;
10808         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10809         unlock_user(p, arg1, 0);
10810         return ret;
10811 #endif
10812 #ifdef TARGET_NR_getuid
10813     case TARGET_NR_getuid:
10814         return get_errno(high2lowuid(getuid()));
10815 #endif
10816 #ifdef TARGET_NR_getgid
10817     case TARGET_NR_getgid:
10818         return get_errno(high2lowgid(getgid()));
10819 #endif
10820 #ifdef TARGET_NR_geteuid
10821     case TARGET_NR_geteuid:
10822         return get_errno(high2lowuid(geteuid()));
10823 #endif
10824 #ifdef TARGET_NR_getegid
10825     case TARGET_NR_getegid:
10826         return get_errno(high2lowgid(getegid()));
10827 #endif
10828     case TARGET_NR_setreuid:
10829         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10830     case TARGET_NR_setregid:
10831         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10832     case TARGET_NR_getgroups:
10833         {
10834             int gidsetsize = arg1;
10835             target_id *target_grouplist;
10836             gid_t *grouplist;
10837             int i;
10838 
10839             grouplist = alloca(gidsetsize * sizeof(gid_t));
10840             ret = get_errno(getgroups(gidsetsize, grouplist));
10841             if (gidsetsize == 0)
10842                 return ret;
10843             if (!is_error(ret)) {
10844                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10845                 if (!target_grouplist)
10846                     return -TARGET_EFAULT;
10847                 for(i = 0;i < ret; i++)
10848                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10849                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10850             }
10851         }
10852         return ret;
10853     case TARGET_NR_setgroups:
10854         {
10855             int gidsetsize = arg1;
10856             target_id *target_grouplist;
10857             gid_t *grouplist = NULL;
10858             int i;
10859             if (gidsetsize) {
10860                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10861                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10862                 if (!target_grouplist) {
10863                     return -TARGET_EFAULT;
10864                 }
10865                 for (i = 0; i < gidsetsize; i++) {
10866                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10867                 }
10868                 unlock_user(target_grouplist, arg2, 0);
10869             }
10870             return get_errno(setgroups(gidsetsize, grouplist));
10871         }
10872     case TARGET_NR_fchown:
10873         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10874 #if defined(TARGET_NR_fchownat)
10875     case TARGET_NR_fchownat:
10876         if (!(p = lock_user_string(arg2)))
10877             return -TARGET_EFAULT;
10878         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10879                                  low2highgid(arg4), arg5));
10880         unlock_user(p, arg2, 0);
10881         return ret;
10882 #endif
10883 #ifdef TARGET_NR_setresuid
10884     case TARGET_NR_setresuid:
10885         return get_errno(sys_setresuid(low2highuid(arg1),
10886                                        low2highuid(arg2),
10887                                        low2highuid(arg3)));
10888 #endif
10889 #ifdef TARGET_NR_getresuid
10890     case TARGET_NR_getresuid:
10891         {
10892             uid_t ruid, euid, suid;
10893             ret = get_errno(getresuid(&ruid, &euid, &suid));
10894             if (!is_error(ret)) {
10895                 if (put_user_id(high2lowuid(ruid), arg1)
10896                     || put_user_id(high2lowuid(euid), arg2)
10897                     || put_user_id(high2lowuid(suid), arg3))
10898                     return -TARGET_EFAULT;
10899             }
10900         }
10901         return ret;
10902 #endif
10903 #ifdef TARGET_NR_getresgid
10904     case TARGET_NR_setresgid:
10905         return get_errno(sys_setresgid(low2highgid(arg1),
10906                                        low2highgid(arg2),
10907                                        low2highgid(arg3)));
10908 #endif
10909 #ifdef TARGET_NR_getresgid
10910     case TARGET_NR_getresgid:
10911         {
10912             gid_t rgid, egid, sgid;
10913             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10914             if (!is_error(ret)) {
10915                 if (put_user_id(high2lowgid(rgid), arg1)
10916                     || put_user_id(high2lowgid(egid), arg2)
10917                     || put_user_id(high2lowgid(sgid), arg3))
10918                     return -TARGET_EFAULT;
10919             }
10920         }
10921         return ret;
10922 #endif
10923 #ifdef TARGET_NR_chown
10924     case TARGET_NR_chown:
10925         if (!(p = lock_user_string(arg1)))
10926             return -TARGET_EFAULT;
10927         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10928         unlock_user(p, arg1, 0);
10929         return ret;
10930 #endif
10931     case TARGET_NR_setuid:
10932         return get_errno(sys_setuid(low2highuid(arg1)));
10933     case TARGET_NR_setgid:
10934         return get_errno(sys_setgid(low2highgid(arg1)));
10935     case TARGET_NR_setfsuid:
10936         return get_errno(setfsuid(arg1));
10937     case TARGET_NR_setfsgid:
10938         return get_errno(setfsgid(arg1));
10939 
10940 #ifdef TARGET_NR_lchown32
10941     case TARGET_NR_lchown32:
10942         if (!(p = lock_user_string(arg1)))
10943             return -TARGET_EFAULT;
10944         ret = get_errno(lchown(p, arg2, arg3));
10945         unlock_user(p, arg1, 0);
10946         return ret;
10947 #endif
10948 #ifdef TARGET_NR_getuid32
10949     case TARGET_NR_getuid32:
10950         return get_errno(getuid());
10951 #endif
10952 
10953 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10954    /* Alpha specific */
10955     case TARGET_NR_getxuid:
10956          {
10957             uid_t euid;
10958             euid=geteuid();
10959             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10960          }
10961         return get_errno(getuid());
10962 #endif
10963 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10964    /* Alpha specific */
10965     case TARGET_NR_getxgid:
10966          {
10967             uid_t egid;
10968             egid=getegid();
10969             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10970          }
10971         return get_errno(getgid());
10972 #endif
10973 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10974     /* Alpha specific */
10975     case TARGET_NR_osf_getsysinfo:
10976         ret = -TARGET_EOPNOTSUPP;
10977         switch (arg1) {
10978           case TARGET_GSI_IEEE_FP_CONTROL:
10979             {
10980                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10981                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10982 
10983                 swcr &= ~SWCR_STATUS_MASK;
10984                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10985 
10986                 if (put_user_u64 (swcr, arg2))
10987                         return -TARGET_EFAULT;
10988                 ret = 0;
10989             }
10990             break;
10991 
10992           /* case GSI_IEEE_STATE_AT_SIGNAL:
10993              -- Not implemented in linux kernel.
10994              case GSI_UACPROC:
10995              -- Retrieves current unaligned access state; not much used.
10996              case GSI_PROC_TYPE:
10997              -- Retrieves implver information; surely not used.
10998              case GSI_GET_HWRPB:
10999              -- Grabs a copy of the HWRPB; surely not used.
11000           */
11001         }
11002         return ret;
11003 #endif
11004 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11005     /* Alpha specific */
11006     case TARGET_NR_osf_setsysinfo:
11007         ret = -TARGET_EOPNOTSUPP;
11008         switch (arg1) {
11009           case TARGET_SSI_IEEE_FP_CONTROL:
11010             {
11011                 uint64_t swcr, fpcr;
11012 
11013                 if (get_user_u64 (swcr, arg2)) {
11014                     return -TARGET_EFAULT;
11015                 }
11016 
11017                 /*
11018                  * The kernel calls swcr_update_status to update the
11019                  * status bits from the fpcr at every point that it
11020                  * could be queried.  Therefore, we store the status
11021                  * bits only in FPCR.
11022                  */
11023                 ((CPUAlphaState *)cpu_env)->swcr
11024                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11025 
11026                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11027                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11028                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11029                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11030                 ret = 0;
11031             }
11032             break;
11033 
11034           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11035             {
11036                 uint64_t exc, fpcr, fex;
11037 
11038                 if (get_user_u64(exc, arg2)) {
11039                     return -TARGET_EFAULT;
11040                 }
11041                 exc &= SWCR_STATUS_MASK;
11042                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11043 
11044                 /* Old exceptions are not signaled.  */
11045                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11046                 fex = exc & ~fex;
11047                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11048                 fex &= ((CPUArchState *)cpu_env)->swcr;
11049 
11050                 /* Update the hardware fpcr.  */
11051                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11052                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11053 
11054                 if (fex) {
11055                     int si_code = TARGET_FPE_FLTUNK;
11056                     target_siginfo_t info;
11057 
11058                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11059                         si_code = TARGET_FPE_FLTUND;
11060                     }
11061                     if (fex & SWCR_TRAP_ENABLE_INE) {
11062                         si_code = TARGET_FPE_FLTRES;
11063                     }
11064                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11065                         si_code = TARGET_FPE_FLTUND;
11066                     }
11067                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11068                         si_code = TARGET_FPE_FLTOVF;
11069                     }
11070                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11071                         si_code = TARGET_FPE_FLTDIV;
11072                     }
11073                     if (fex & SWCR_TRAP_ENABLE_INV) {
11074                         si_code = TARGET_FPE_FLTINV;
11075                     }
11076 
11077                     info.si_signo = SIGFPE;
11078                     info.si_errno = 0;
11079                     info.si_code = si_code;
11080                     info._sifields._sigfault._addr
11081                         = ((CPUArchState *)cpu_env)->pc;
11082                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11083                                  QEMU_SI_FAULT, &info);
11084                 }
11085                 ret = 0;
11086             }
11087             break;
11088 
11089           /* case SSI_NVPAIRS:
11090              -- Used with SSIN_UACPROC to enable unaligned accesses.
11091              case SSI_IEEE_STATE_AT_SIGNAL:
11092              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11093              -- Not implemented in linux kernel
11094           */
11095         }
11096         return ret;
11097 #endif
11098 #ifdef TARGET_NR_osf_sigprocmask
11099     /* Alpha specific.  */
11100     case TARGET_NR_osf_sigprocmask:
11101         {
11102             abi_ulong mask;
11103             int how;
11104             sigset_t set, oldset;
11105 
11106             switch(arg1) {
11107             case TARGET_SIG_BLOCK:
11108                 how = SIG_BLOCK;
11109                 break;
11110             case TARGET_SIG_UNBLOCK:
11111                 how = SIG_UNBLOCK;
11112                 break;
11113             case TARGET_SIG_SETMASK:
11114                 how = SIG_SETMASK;
11115                 break;
11116             default:
11117                 return -TARGET_EINVAL;
11118             }
11119             mask = arg2;
11120             target_to_host_old_sigset(&set, &mask);
11121             ret = do_sigprocmask(how, &set, &oldset);
11122             if (!ret) {
11123                 host_to_target_old_sigset(&mask, &oldset);
11124                 ret = mask;
11125             }
11126         }
11127         return ret;
11128 #endif
11129 
11130 #ifdef TARGET_NR_getgid32
11131     case TARGET_NR_getgid32:
11132         return get_errno(getgid());
11133 #endif
11134 #ifdef TARGET_NR_geteuid32
11135     case TARGET_NR_geteuid32:
11136         return get_errno(geteuid());
11137 #endif
11138 #ifdef TARGET_NR_getegid32
11139     case TARGET_NR_getegid32:
11140         return get_errno(getegid());
11141 #endif
11142 #ifdef TARGET_NR_setreuid32
11143     case TARGET_NR_setreuid32:
11144         return get_errno(setreuid(arg1, arg2));
11145 #endif
11146 #ifdef TARGET_NR_setregid32
11147     case TARGET_NR_setregid32:
11148         return get_errno(setregid(arg1, arg2));
11149 #endif
11150 #ifdef TARGET_NR_getgroups32
11151     case TARGET_NR_getgroups32:
11152         {
11153             int gidsetsize = arg1;
11154             uint32_t *target_grouplist;
11155             gid_t *grouplist;
11156             int i;
11157 
11158             grouplist = alloca(gidsetsize * sizeof(gid_t));
11159             ret = get_errno(getgroups(gidsetsize, grouplist));
11160             if (gidsetsize == 0)
11161                 return ret;
11162             if (!is_error(ret)) {
11163                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11164                 if (!target_grouplist) {
11165                     return -TARGET_EFAULT;
11166                 }
11167                 for(i = 0;i < ret; i++)
11168                     target_grouplist[i] = tswap32(grouplist[i]);
11169                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11170             }
11171         }
11172         return ret;
11173 #endif
11174 #ifdef TARGET_NR_setgroups32
11175     case TARGET_NR_setgroups32:
11176         {
11177             int gidsetsize = arg1;
11178             uint32_t *target_grouplist;
11179             gid_t *grouplist;
11180             int i;
11181 
11182             grouplist = alloca(gidsetsize * sizeof(gid_t));
11183             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11184             if (!target_grouplist) {
11185                 return -TARGET_EFAULT;
11186             }
11187             for(i = 0;i < gidsetsize; i++)
11188                 grouplist[i] = tswap32(target_grouplist[i]);
11189             unlock_user(target_grouplist, arg2, 0);
11190             return get_errno(setgroups(gidsetsize, grouplist));
11191         }
11192 #endif
11193 #ifdef TARGET_NR_fchown32
11194     case TARGET_NR_fchown32:
11195         return get_errno(fchown(arg1, arg2, arg3));
11196 #endif
11197 #ifdef TARGET_NR_setresuid32
11198     case TARGET_NR_setresuid32:
11199         return get_errno(sys_setresuid(arg1, arg2, arg3));
11200 #endif
11201 #ifdef TARGET_NR_getresuid32
11202     case TARGET_NR_getresuid32:
11203         {
11204             uid_t ruid, euid, suid;
11205             ret = get_errno(getresuid(&ruid, &euid, &suid));
11206             if (!is_error(ret)) {
11207                 if (put_user_u32(ruid, arg1)
11208                     || put_user_u32(euid, arg2)
11209                     || put_user_u32(suid, arg3))
11210                     return -TARGET_EFAULT;
11211             }
11212         }
11213         return ret;
11214 #endif
11215 #ifdef TARGET_NR_setresgid32
11216     case TARGET_NR_setresgid32:
11217         return get_errno(sys_setresgid(arg1, arg2, arg3));
11218 #endif
11219 #ifdef TARGET_NR_getresgid32
11220     case TARGET_NR_getresgid32:
11221         {
11222             gid_t rgid, egid, sgid;
11223             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11224             if (!is_error(ret)) {
11225                 if (put_user_u32(rgid, arg1)
11226                     || put_user_u32(egid, arg2)
11227                     || put_user_u32(sgid, arg3))
11228                     return -TARGET_EFAULT;
11229             }
11230         }
11231         return ret;
11232 #endif
11233 #ifdef TARGET_NR_chown32
11234     case TARGET_NR_chown32:
11235         if (!(p = lock_user_string(arg1)))
11236             return -TARGET_EFAULT;
11237         ret = get_errno(chown(p, arg2, arg3));
11238         unlock_user(p, arg1, 0);
11239         return ret;
11240 #endif
11241 #ifdef TARGET_NR_setuid32
11242     case TARGET_NR_setuid32:
11243         return get_errno(sys_setuid(arg1));
11244 #endif
11245 #ifdef TARGET_NR_setgid32
11246     case TARGET_NR_setgid32:
11247         return get_errno(sys_setgid(arg1));
11248 #endif
11249 #ifdef TARGET_NR_setfsuid32
11250     case TARGET_NR_setfsuid32:
11251         return get_errno(setfsuid(arg1));
11252 #endif
11253 #ifdef TARGET_NR_setfsgid32
11254     case TARGET_NR_setfsgid32:
11255         return get_errno(setfsgid(arg1));
11256 #endif
11257 #ifdef TARGET_NR_mincore
11258     case TARGET_NR_mincore:
11259         {
11260             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11261             if (!a) {
11262                 return -TARGET_ENOMEM;
11263             }
11264             p = lock_user_string(arg3);
11265             if (!p) {
11266                 ret = -TARGET_EFAULT;
11267             } else {
11268                 ret = get_errno(mincore(a, arg2, p));
11269                 unlock_user(p, arg3, ret);
11270             }
11271             unlock_user(a, arg1, 0);
11272         }
11273         return ret;
11274 #endif
11275 #ifdef TARGET_NR_arm_fadvise64_64
11276     case TARGET_NR_arm_fadvise64_64:
11277         /* arm_fadvise64_64 looks like fadvise64_64 but
11278          * with different argument order: fd, advice, offset, len
11279          * rather than the usual fd, offset, len, advice.
11280          * Note that offset and len are both 64-bit so appear as
11281          * pairs of 32-bit registers.
11282          */
11283         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11284                             target_offset64(arg5, arg6), arg2);
11285         return -host_to_target_errno(ret);
11286 #endif
11287 
11288 #if TARGET_ABI_BITS == 32
11289 
11290 #ifdef TARGET_NR_fadvise64_64
11291     case TARGET_NR_fadvise64_64:
11292 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11293         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11294         ret = arg2;
11295         arg2 = arg3;
11296         arg3 = arg4;
11297         arg4 = arg5;
11298         arg5 = arg6;
11299         arg6 = ret;
11300 #else
11301         /* 6 args: fd, offset (high, low), len (high, low), advice */
11302         if (regpairs_aligned(cpu_env, num)) {
11303             /* offset is in (3,4), len in (5,6) and advice in 7 */
11304             arg2 = arg3;
11305             arg3 = arg4;
11306             arg4 = arg5;
11307             arg5 = arg6;
11308             arg6 = arg7;
11309         }
11310 #endif
11311         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11312                             target_offset64(arg4, arg5), arg6);
11313         return -host_to_target_errno(ret);
11314 #endif
11315 
11316 #ifdef TARGET_NR_fadvise64
11317     case TARGET_NR_fadvise64:
11318         /* 5 args: fd, offset (high, low), len, advice */
11319         if (regpairs_aligned(cpu_env, num)) {
11320             /* offset is in (3,4), len in 5 and advice in 6 */
11321             arg2 = arg3;
11322             arg3 = arg4;
11323             arg4 = arg5;
11324             arg5 = arg6;
11325         }
11326         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11327         return -host_to_target_errno(ret);
11328 #endif
11329 
11330 #else /* not a 32-bit ABI */
11331 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11332 #ifdef TARGET_NR_fadvise64_64
11333     case TARGET_NR_fadvise64_64:
11334 #endif
11335 #ifdef TARGET_NR_fadvise64
11336     case TARGET_NR_fadvise64:
11337 #endif
11338 #ifdef TARGET_S390X
11339         switch (arg4) {
11340         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11341         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11342         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11343         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11344         default: break;
11345         }
11346 #endif
11347         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11348 #endif
11349 #endif /* end of 64-bit ABI fadvise handling */
11350 
11351 #ifdef TARGET_NR_madvise
11352     case TARGET_NR_madvise:
11353         /* A straight passthrough may not be safe because qemu sometimes
11354            turns private file-backed mappings into anonymous mappings.
11355            This will break MADV_DONTNEED.
11356            This is a hint, so ignoring and returning success is ok.  */
11357         return 0;
11358 #endif
11359 #ifdef TARGET_NR_fcntl64
11360     case TARGET_NR_fcntl64:
11361     {
11362         int cmd;
11363         struct flock64 fl;
11364         from_flock64_fn *copyfrom = copy_from_user_flock64;
11365         to_flock64_fn *copyto = copy_to_user_flock64;
11366 
11367 #ifdef TARGET_ARM
11368         if (!((CPUARMState *)cpu_env)->eabi) {
11369             copyfrom = copy_from_user_oabi_flock64;
11370             copyto = copy_to_user_oabi_flock64;
11371         }
11372 #endif
11373 
11374         cmd = target_to_host_fcntl_cmd(arg2);
11375         if (cmd == -TARGET_EINVAL) {
11376             return cmd;
11377         }
11378 
11379         switch(arg2) {
11380         case TARGET_F_GETLK64:
11381             ret = copyfrom(&fl, arg3);
11382             if (ret) {
11383                 break;
11384             }
11385             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11386             if (ret == 0) {
11387                 ret = copyto(arg3, &fl);
11388             }
11389 	    break;
11390 
11391         case TARGET_F_SETLK64:
11392         case TARGET_F_SETLKW64:
11393             ret = copyfrom(&fl, arg3);
11394             if (ret) {
11395                 break;
11396             }
11397             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11398 	    break;
11399         default:
11400             ret = do_fcntl(arg1, arg2, arg3);
11401             break;
11402         }
11403         return ret;
11404     }
11405 #endif
11406 #ifdef TARGET_NR_cacheflush
11407     case TARGET_NR_cacheflush:
11408         /* self-modifying code is handled automatically, so nothing needed */
11409         return 0;
11410 #endif
11411 #ifdef TARGET_NR_getpagesize
11412     case TARGET_NR_getpagesize:
11413         return TARGET_PAGE_SIZE;
11414 #endif
11415     case TARGET_NR_gettid:
11416         return get_errno(sys_gettid());
11417 #ifdef TARGET_NR_readahead
11418     case TARGET_NR_readahead:
11419 #if TARGET_ABI_BITS == 32
11420         if (regpairs_aligned(cpu_env, num)) {
11421             arg2 = arg3;
11422             arg3 = arg4;
11423             arg4 = arg5;
11424         }
11425         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11426 #else
11427         ret = get_errno(readahead(arg1, arg2, arg3));
11428 #endif
11429         return ret;
11430 #endif
11431 #ifdef CONFIG_ATTR
11432 #ifdef TARGET_NR_setxattr
11433     case TARGET_NR_listxattr:
11434     case TARGET_NR_llistxattr:
11435     {
11436         void *p, *b = 0;
11437         if (arg2) {
11438             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11439             if (!b) {
11440                 return -TARGET_EFAULT;
11441             }
11442         }
11443         p = lock_user_string(arg1);
11444         if (p) {
11445             if (num == TARGET_NR_listxattr) {
11446                 ret = get_errno(listxattr(p, b, arg3));
11447             } else {
11448                 ret = get_errno(llistxattr(p, b, arg3));
11449             }
11450         } else {
11451             ret = -TARGET_EFAULT;
11452         }
11453         unlock_user(p, arg1, 0);
11454         unlock_user(b, arg2, arg3);
11455         return ret;
11456     }
11457     case TARGET_NR_flistxattr:
11458     {
11459         void *b = 0;
11460         if (arg2) {
11461             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11462             if (!b) {
11463                 return -TARGET_EFAULT;
11464             }
11465         }
11466         ret = get_errno(flistxattr(arg1, b, arg3));
11467         unlock_user(b, arg2, arg3);
11468         return ret;
11469     }
11470     case TARGET_NR_setxattr:
11471     case TARGET_NR_lsetxattr:
11472         {
11473             void *p, *n, *v = 0;
11474             if (arg3) {
11475                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11476                 if (!v) {
11477                     return -TARGET_EFAULT;
11478                 }
11479             }
11480             p = lock_user_string(arg1);
11481             n = lock_user_string(arg2);
11482             if (p && n) {
11483                 if (num == TARGET_NR_setxattr) {
11484                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11485                 } else {
11486                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11487                 }
11488             } else {
11489                 ret = -TARGET_EFAULT;
11490             }
11491             unlock_user(p, arg1, 0);
11492             unlock_user(n, arg2, 0);
11493             unlock_user(v, arg3, 0);
11494         }
11495         return ret;
11496     case TARGET_NR_fsetxattr:
11497         {
11498             void *n, *v = 0;
11499             if (arg3) {
11500                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11501                 if (!v) {
11502                     return -TARGET_EFAULT;
11503                 }
11504             }
11505             n = lock_user_string(arg2);
11506             if (n) {
11507                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11508             } else {
11509                 ret = -TARGET_EFAULT;
11510             }
11511             unlock_user(n, arg2, 0);
11512             unlock_user(v, arg3, 0);
11513         }
11514         return ret;
11515     case TARGET_NR_getxattr:
11516     case TARGET_NR_lgetxattr:
11517         {
11518             void *p, *n, *v = 0;
11519             if (arg3) {
11520                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11521                 if (!v) {
11522                     return -TARGET_EFAULT;
11523                 }
11524             }
11525             p = lock_user_string(arg1);
11526             n = lock_user_string(arg2);
11527             if (p && n) {
11528                 if (num == TARGET_NR_getxattr) {
11529                     ret = get_errno(getxattr(p, n, v, arg4));
11530                 } else {
11531                     ret = get_errno(lgetxattr(p, n, v, arg4));
11532                 }
11533             } else {
11534                 ret = -TARGET_EFAULT;
11535             }
11536             unlock_user(p, arg1, 0);
11537             unlock_user(n, arg2, 0);
11538             unlock_user(v, arg3, arg4);
11539         }
11540         return ret;
11541     case TARGET_NR_fgetxattr:
11542         {
11543             void *n, *v = 0;
11544             if (arg3) {
11545                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11546                 if (!v) {
11547                     return -TARGET_EFAULT;
11548                 }
11549             }
11550             n = lock_user_string(arg2);
11551             if (n) {
11552                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11553             } else {
11554                 ret = -TARGET_EFAULT;
11555             }
11556             unlock_user(n, arg2, 0);
11557             unlock_user(v, arg3, arg4);
11558         }
11559         return ret;
11560     case TARGET_NR_removexattr:
11561     case TARGET_NR_lremovexattr:
11562         {
11563             void *p, *n;
11564             p = lock_user_string(arg1);
11565             n = lock_user_string(arg2);
11566             if (p && n) {
11567                 if (num == TARGET_NR_removexattr) {
11568                     ret = get_errno(removexattr(p, n));
11569                 } else {
11570                     ret = get_errno(lremovexattr(p, n));
11571                 }
11572             } else {
11573                 ret = -TARGET_EFAULT;
11574             }
11575             unlock_user(p, arg1, 0);
11576             unlock_user(n, arg2, 0);
11577         }
11578         return ret;
11579     case TARGET_NR_fremovexattr:
11580         {
11581             void *n;
11582             n = lock_user_string(arg2);
11583             if (n) {
11584                 ret = get_errno(fremovexattr(arg1, n));
11585             } else {
11586                 ret = -TARGET_EFAULT;
11587             }
11588             unlock_user(n, arg2, 0);
11589         }
11590         return ret;
11591 #endif
11592 #endif /* CONFIG_ATTR */
11593 #ifdef TARGET_NR_set_thread_area
11594     case TARGET_NR_set_thread_area:
11595 #if defined(TARGET_MIPS)
11596       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11597       return 0;
11598 #elif defined(TARGET_CRIS)
11599       if (arg1 & 0xff)
11600           ret = -TARGET_EINVAL;
11601       else {
11602           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11603           ret = 0;
11604       }
11605       return ret;
11606 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11607       return do_set_thread_area(cpu_env, arg1);
11608 #elif defined(TARGET_M68K)
11609       {
11610           TaskState *ts = cpu->opaque;
11611           ts->tp_value = arg1;
11612           return 0;
11613       }
11614 #else
11615       return -TARGET_ENOSYS;
11616 #endif
11617 #endif
11618 #ifdef TARGET_NR_get_thread_area
11619     case TARGET_NR_get_thread_area:
11620 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11621         return do_get_thread_area(cpu_env, arg1);
11622 #elif defined(TARGET_M68K)
11623         {
11624             TaskState *ts = cpu->opaque;
11625             return ts->tp_value;
11626         }
11627 #else
11628         return -TARGET_ENOSYS;
11629 #endif
11630 #endif
11631 #ifdef TARGET_NR_getdomainname
11632     case TARGET_NR_getdomainname:
11633         return -TARGET_ENOSYS;
11634 #endif
11635 
11636 #ifdef TARGET_NR_clock_settime
11637     case TARGET_NR_clock_settime:
11638     {
11639         struct timespec ts;
11640 
11641         ret = target_to_host_timespec(&ts, arg2);
11642         if (!is_error(ret)) {
11643             ret = get_errno(clock_settime(arg1, &ts));
11644         }
11645         return ret;
11646     }
11647 #endif
11648 #ifdef TARGET_NR_clock_settime64
11649     case TARGET_NR_clock_settime64:
11650     {
11651         struct timespec ts;
11652 
11653         ret = target_to_host_timespec64(&ts, arg2);
11654         if (!is_error(ret)) {
11655             ret = get_errno(clock_settime(arg1, &ts));
11656         }
11657         return ret;
11658     }
11659 #endif
11660 #ifdef TARGET_NR_clock_gettime
11661     case TARGET_NR_clock_gettime:
11662     {
11663         struct timespec ts;
11664         ret = get_errno(clock_gettime(arg1, &ts));
11665         if (!is_error(ret)) {
11666             ret = host_to_target_timespec(arg2, &ts);
11667         }
11668         return ret;
11669     }
11670 #endif
11671 #ifdef TARGET_NR_clock_gettime64
11672     case TARGET_NR_clock_gettime64:
11673     {
11674         struct timespec ts;
11675         ret = get_errno(clock_gettime(arg1, &ts));
11676         if (!is_error(ret)) {
11677             ret = host_to_target_timespec64(arg2, &ts);
11678         }
11679         return ret;
11680     }
11681 #endif
11682 #ifdef TARGET_NR_clock_getres
11683     case TARGET_NR_clock_getres:
11684     {
11685         struct timespec ts;
11686         ret = get_errno(clock_getres(arg1, &ts));
11687         if (!is_error(ret)) {
11688             host_to_target_timespec(arg2, &ts);
11689         }
11690         return ret;
11691     }
11692 #endif
11693 #ifdef TARGET_NR_clock_nanosleep
11694     case TARGET_NR_clock_nanosleep:
11695     {
11696         struct timespec ts;
11697         target_to_host_timespec(&ts, arg3);
11698         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11699                                              &ts, arg4 ? &ts : NULL));
11700         if (arg4)
11701             host_to_target_timespec(arg4, &ts);
11702 
11703 #if defined(TARGET_PPC)
11704         /* clock_nanosleep is odd in that it returns positive errno values.
11705          * On PPC, CR0 bit 3 should be set in such a situation. */
11706         if (ret && ret != -TARGET_ERESTARTSYS) {
11707             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11708         }
11709 #endif
11710         return ret;
11711     }
11712 #endif
11713 
11714 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11715     case TARGET_NR_set_tid_address:
11716         return get_errno(set_tid_address((int *)g2h(arg1)));
11717 #endif
11718 
11719     case TARGET_NR_tkill:
11720         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11721 
11722     case TARGET_NR_tgkill:
11723         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11724                          target_to_host_signal(arg3)));
11725 
11726 #ifdef TARGET_NR_set_robust_list
11727     case TARGET_NR_set_robust_list:
11728     case TARGET_NR_get_robust_list:
11729         /* The ABI for supporting robust futexes has userspace pass
11730          * the kernel a pointer to a linked list which is updated by
11731          * userspace after the syscall; the list is walked by the kernel
11732          * when the thread exits. Since the linked list in QEMU guest
11733          * memory isn't a valid linked list for the host and we have
11734          * no way to reliably intercept the thread-death event, we can't
11735          * support these. Silently return ENOSYS so that guest userspace
11736          * falls back to a non-robust futex implementation (which should
11737          * be OK except in the corner case of the guest crashing while
11738          * holding a mutex that is shared with another process via
11739          * shared memory).
11740          */
11741         return -TARGET_ENOSYS;
11742 #endif
11743 
11744 #if defined(TARGET_NR_utimensat)
11745     case TARGET_NR_utimensat:
11746         {
11747             struct timespec *tsp, ts[2];
11748             if (!arg3) {
11749                 tsp = NULL;
11750             } else {
11751                 target_to_host_timespec(ts, arg3);
11752                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11753                 tsp = ts;
11754             }
11755             if (!arg2)
11756                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11757             else {
11758                 if (!(p = lock_user_string(arg2))) {
11759                     return -TARGET_EFAULT;
11760                 }
11761                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11762                 unlock_user(p, arg2, 0);
11763             }
11764         }
11765         return ret;
11766 #endif
11767 #ifdef TARGET_NR_futex
11768     case TARGET_NR_futex:
11769         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11770 #endif
11771 #ifdef TARGET_NR_futex_time64
11772     case TARGET_NR_futex_time64:
11773         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
11774 #endif
11775 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11776     case TARGET_NR_inotify_init:
11777         ret = get_errno(sys_inotify_init());
11778         if (ret >= 0) {
11779             fd_trans_register(ret, &target_inotify_trans);
11780         }
11781         return ret;
11782 #endif
11783 #ifdef CONFIG_INOTIFY1
11784 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11785     case TARGET_NR_inotify_init1:
11786         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11787                                           fcntl_flags_tbl)));
11788         if (ret >= 0) {
11789             fd_trans_register(ret, &target_inotify_trans);
11790         }
11791         return ret;
11792 #endif
11793 #endif
11794 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11795     case TARGET_NR_inotify_add_watch:
11796         p = lock_user_string(arg2);
11797         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11798         unlock_user(p, arg2, 0);
11799         return ret;
11800 #endif
11801 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11802     case TARGET_NR_inotify_rm_watch:
11803         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11804 #endif
11805 
11806 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11807     case TARGET_NR_mq_open:
11808         {
11809             struct mq_attr posix_mq_attr;
11810             struct mq_attr *pposix_mq_attr;
11811             int host_flags;
11812 
11813             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11814             pposix_mq_attr = NULL;
11815             if (arg4) {
11816                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11817                     return -TARGET_EFAULT;
11818                 }
11819                 pposix_mq_attr = &posix_mq_attr;
11820             }
11821             p = lock_user_string(arg1 - 1);
11822             if (!p) {
11823                 return -TARGET_EFAULT;
11824             }
11825             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11826             unlock_user (p, arg1, 0);
11827         }
11828         return ret;
11829 
11830     case TARGET_NR_mq_unlink:
11831         p = lock_user_string(arg1 - 1);
11832         if (!p) {
11833             return -TARGET_EFAULT;
11834         }
11835         ret = get_errno(mq_unlink(p));
11836         unlock_user (p, arg1, 0);
11837         return ret;
11838 
11839 #ifdef TARGET_NR_mq_timedsend
11840     case TARGET_NR_mq_timedsend:
11841         {
11842             struct timespec ts;
11843 
11844             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11845             if (arg5 != 0) {
11846                 target_to_host_timespec(&ts, arg5);
11847                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11848                 host_to_target_timespec(arg5, &ts);
11849             } else {
11850                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11851             }
11852             unlock_user (p, arg2, arg3);
11853         }
11854         return ret;
11855 #endif
11856 
11857 #ifdef TARGET_NR_mq_timedreceive
11858     case TARGET_NR_mq_timedreceive:
11859         {
11860             struct timespec ts;
11861             unsigned int prio;
11862 
11863             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11864             if (arg5 != 0) {
11865                 target_to_host_timespec(&ts, arg5);
11866                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11867                                                      &prio, &ts));
11868                 host_to_target_timespec(arg5, &ts);
11869             } else {
11870                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11871                                                      &prio, NULL));
11872             }
11873             unlock_user (p, arg2, arg3);
11874             if (arg4 != 0)
11875                 put_user_u32(prio, arg4);
11876         }
11877         return ret;
11878 #endif
11879 
11880     /* Not implemented for now... */
11881 /*     case TARGET_NR_mq_notify: */
11882 /*         break; */
11883 
11884     case TARGET_NR_mq_getsetattr:
11885         {
11886             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11887             ret = 0;
11888             if (arg2 != 0) {
11889                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11890                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11891                                            &posix_mq_attr_out));
11892             } else if (arg3 != 0) {
11893                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11894             }
11895             if (ret == 0 && arg3 != 0) {
11896                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11897             }
11898         }
11899         return ret;
11900 #endif
11901 
11902 #ifdef CONFIG_SPLICE
11903 #ifdef TARGET_NR_tee
11904     case TARGET_NR_tee:
11905         {
11906             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11907         }
11908         return ret;
11909 #endif
11910 #ifdef TARGET_NR_splice
11911     case TARGET_NR_splice:
11912         {
11913             loff_t loff_in, loff_out;
11914             loff_t *ploff_in = NULL, *ploff_out = NULL;
11915             if (arg2) {
11916                 if (get_user_u64(loff_in, arg2)) {
11917                     return -TARGET_EFAULT;
11918                 }
11919                 ploff_in = &loff_in;
11920             }
11921             if (arg4) {
11922                 if (get_user_u64(loff_out, arg4)) {
11923                     return -TARGET_EFAULT;
11924                 }
11925                 ploff_out = &loff_out;
11926             }
11927             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11928             if (arg2) {
11929                 if (put_user_u64(loff_in, arg2)) {
11930                     return -TARGET_EFAULT;
11931                 }
11932             }
11933             if (arg4) {
11934                 if (put_user_u64(loff_out, arg4)) {
11935                     return -TARGET_EFAULT;
11936                 }
11937             }
11938         }
11939         return ret;
11940 #endif
11941 #ifdef TARGET_NR_vmsplice
11942 	case TARGET_NR_vmsplice:
11943         {
11944             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11945             if (vec != NULL) {
11946                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11947                 unlock_iovec(vec, arg2, arg3, 0);
11948             } else {
11949                 ret = -host_to_target_errno(errno);
11950             }
11951         }
11952         return ret;
11953 #endif
11954 #endif /* CONFIG_SPLICE */
11955 #ifdef CONFIG_EVENTFD
11956 #if defined(TARGET_NR_eventfd)
11957     case TARGET_NR_eventfd:
11958         ret = get_errno(eventfd(arg1, 0));
11959         if (ret >= 0) {
11960             fd_trans_register(ret, &target_eventfd_trans);
11961         }
11962         return ret;
11963 #endif
11964 #if defined(TARGET_NR_eventfd2)
11965     case TARGET_NR_eventfd2:
11966     {
11967         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11968         if (arg2 & TARGET_O_NONBLOCK) {
11969             host_flags |= O_NONBLOCK;
11970         }
11971         if (arg2 & TARGET_O_CLOEXEC) {
11972             host_flags |= O_CLOEXEC;
11973         }
11974         ret = get_errno(eventfd(arg1, host_flags));
11975         if (ret >= 0) {
11976             fd_trans_register(ret, &target_eventfd_trans);
11977         }
11978         return ret;
11979     }
11980 #endif
11981 #endif /* CONFIG_EVENTFD  */
11982 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11983     case TARGET_NR_fallocate:
11984 #if TARGET_ABI_BITS == 32
11985         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11986                                   target_offset64(arg5, arg6)));
11987 #else
11988         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11989 #endif
11990         return ret;
11991 #endif
11992 #if defined(CONFIG_SYNC_FILE_RANGE)
11993 #if defined(TARGET_NR_sync_file_range)
11994     case TARGET_NR_sync_file_range:
11995 #if TARGET_ABI_BITS == 32
11996 #if defined(TARGET_MIPS)
11997         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11998                                         target_offset64(arg5, arg6), arg7));
11999 #else
12000         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12001                                         target_offset64(arg4, arg5), arg6));
12002 #endif /* !TARGET_MIPS */
12003 #else
12004         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12005 #endif
12006         return ret;
12007 #endif
12008 #if defined(TARGET_NR_sync_file_range2) || \
12009     defined(TARGET_NR_arm_sync_file_range)
12010 #if defined(TARGET_NR_sync_file_range2)
12011     case TARGET_NR_sync_file_range2:
12012 #endif
12013 #if defined(TARGET_NR_arm_sync_file_range)
12014     case TARGET_NR_arm_sync_file_range:
12015 #endif
12016         /* This is like sync_file_range but the arguments are reordered */
12017 #if TARGET_ABI_BITS == 32
12018         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12019                                         target_offset64(arg5, arg6), arg2));
12020 #else
12021         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12022 #endif
12023         return ret;
12024 #endif
12025 #endif
12026 #if defined(TARGET_NR_signalfd4)
12027     case TARGET_NR_signalfd4:
12028         return do_signalfd4(arg1, arg2, arg4);
12029 #endif
12030 #if defined(TARGET_NR_signalfd)
12031     case TARGET_NR_signalfd:
12032         return do_signalfd4(arg1, arg2, 0);
12033 #endif
12034 #if defined(CONFIG_EPOLL)
12035 #if defined(TARGET_NR_epoll_create)
12036     case TARGET_NR_epoll_create:
12037         return get_errno(epoll_create(arg1));
12038 #endif
12039 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12040     case TARGET_NR_epoll_create1:
12041         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12042 #endif
12043 #if defined(TARGET_NR_epoll_ctl)
12044     case TARGET_NR_epoll_ctl:
12045     {
12046         struct epoll_event ep;
12047         struct epoll_event *epp = 0;
12048         if (arg4) {
12049             struct target_epoll_event *target_ep;
12050             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12051                 return -TARGET_EFAULT;
12052             }
12053             ep.events = tswap32(target_ep->events);
12054             /* The epoll_data_t union is just opaque data to the kernel,
12055              * so we transfer all 64 bits across and need not worry what
12056              * actual data type it is.
12057              */
12058             ep.data.u64 = tswap64(target_ep->data.u64);
12059             unlock_user_struct(target_ep, arg4, 0);
12060             epp = &ep;
12061         }
12062         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12063     }
12064 #endif
12065 
12066 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12067 #if defined(TARGET_NR_epoll_wait)
12068     case TARGET_NR_epoll_wait:
12069 #endif
12070 #if defined(TARGET_NR_epoll_pwait)
12071     case TARGET_NR_epoll_pwait:
12072 #endif
12073     {
12074         struct target_epoll_event *target_ep;
12075         struct epoll_event *ep;
12076         int epfd = arg1;
12077         int maxevents = arg3;
12078         int timeout = arg4;
12079 
12080         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12081             return -TARGET_EINVAL;
12082         }
12083 
12084         target_ep = lock_user(VERIFY_WRITE, arg2,
12085                               maxevents * sizeof(struct target_epoll_event), 1);
12086         if (!target_ep) {
12087             return -TARGET_EFAULT;
12088         }
12089 
12090         ep = g_try_new(struct epoll_event, maxevents);
12091         if (!ep) {
12092             unlock_user(target_ep, arg2, 0);
12093             return -TARGET_ENOMEM;
12094         }
12095 
12096         switch (num) {
12097 #if defined(TARGET_NR_epoll_pwait)
12098         case TARGET_NR_epoll_pwait:
12099         {
12100             target_sigset_t *target_set;
12101             sigset_t _set, *set = &_set;
12102 
12103             if (arg5) {
12104                 if (arg6 != sizeof(target_sigset_t)) {
12105                     ret = -TARGET_EINVAL;
12106                     break;
12107                 }
12108 
12109                 target_set = lock_user(VERIFY_READ, arg5,
12110                                        sizeof(target_sigset_t), 1);
12111                 if (!target_set) {
12112                     ret = -TARGET_EFAULT;
12113                     break;
12114                 }
12115                 target_to_host_sigset(set, target_set);
12116                 unlock_user(target_set, arg5, 0);
12117             } else {
12118                 set = NULL;
12119             }
12120 
12121             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12122                                              set, SIGSET_T_SIZE));
12123             break;
12124         }
12125 #endif
12126 #if defined(TARGET_NR_epoll_wait)
12127         case TARGET_NR_epoll_wait:
12128             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12129                                              NULL, 0));
12130             break;
12131 #endif
12132         default:
12133             ret = -TARGET_ENOSYS;
12134         }
12135         if (!is_error(ret)) {
12136             int i;
12137             for (i = 0; i < ret; i++) {
12138                 target_ep[i].events = tswap32(ep[i].events);
12139                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12140             }
12141             unlock_user(target_ep, arg2,
12142                         ret * sizeof(struct target_epoll_event));
12143         } else {
12144             unlock_user(target_ep, arg2, 0);
12145         }
12146         g_free(ep);
12147         return ret;
12148     }
12149 #endif
12150 #endif
12151 #ifdef TARGET_NR_prlimit64
12152     case TARGET_NR_prlimit64:
12153     {
12154         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12155         struct target_rlimit64 *target_rnew, *target_rold;
12156         struct host_rlimit64 rnew, rold, *rnewp = 0;
12157         int resource = target_to_host_resource(arg2);
12158 
12159         if (arg3 && (resource != RLIMIT_AS &&
12160                      resource != RLIMIT_DATA &&
12161                      resource != RLIMIT_STACK)) {
12162             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12163                 return -TARGET_EFAULT;
12164             }
12165             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12166             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12167             unlock_user_struct(target_rnew, arg3, 0);
12168             rnewp = &rnew;
12169         }
12170 
12171         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12172         if (!is_error(ret) && arg4) {
12173             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12174                 return -TARGET_EFAULT;
12175             }
12176             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12177             target_rold->rlim_max = tswap64(rold.rlim_max);
12178             unlock_user_struct(target_rold, arg4, 1);
12179         }
12180         return ret;
12181     }
12182 #endif
12183 #ifdef TARGET_NR_gethostname
12184     case TARGET_NR_gethostname:
12185     {
12186         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12187         if (name) {
12188             ret = get_errno(gethostname(name, arg2));
12189             unlock_user(name, arg1, arg2);
12190         } else {
12191             ret = -TARGET_EFAULT;
12192         }
12193         return ret;
12194     }
12195 #endif
12196 #ifdef TARGET_NR_atomic_cmpxchg_32
12197     case TARGET_NR_atomic_cmpxchg_32:
12198     {
12199         /* should use start_exclusive from main.c */
12200         abi_ulong mem_value;
12201         if (get_user_u32(mem_value, arg6)) {
12202             target_siginfo_t info;
12203             info.si_signo = SIGSEGV;
12204             info.si_errno = 0;
12205             info.si_code = TARGET_SEGV_MAPERR;
12206             info._sifields._sigfault._addr = arg6;
12207             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12208                          QEMU_SI_FAULT, &info);
12209             ret = 0xdeadbeef;
12210 
12211         }
12212         if (mem_value == arg2)
12213             put_user_u32(arg1, arg6);
12214         return mem_value;
12215     }
12216 #endif
12217 #ifdef TARGET_NR_atomic_barrier
12218     case TARGET_NR_atomic_barrier:
12219         /* Like the kernel implementation and the
12220            qemu arm barrier, no-op this? */
12221         return 0;
12222 #endif
12223 
12224 #ifdef TARGET_NR_timer_create
12225     case TARGET_NR_timer_create:
12226     {
12227         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12228 
12229         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12230 
12231         int clkid = arg1;
12232         int timer_index = next_free_host_timer();
12233 
12234         if (timer_index < 0) {
12235             ret = -TARGET_EAGAIN;
12236         } else {
12237             timer_t *phtimer = g_posix_timers  + timer_index;
12238 
12239             if (arg2) {
12240                 phost_sevp = &host_sevp;
12241                 ret = target_to_host_sigevent(phost_sevp, arg2);
12242                 if (ret != 0) {
12243                     return ret;
12244                 }
12245             }
12246 
12247             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12248             if (ret) {
12249                 phtimer = NULL;
12250             } else {
12251                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12252                     return -TARGET_EFAULT;
12253                 }
12254             }
12255         }
12256         return ret;
12257     }
12258 #endif
12259 
12260 #ifdef TARGET_NR_timer_settime
12261     case TARGET_NR_timer_settime:
12262     {
12263         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12264          * struct itimerspec * old_value */
12265         target_timer_t timerid = get_timer_id(arg1);
12266 
12267         if (timerid < 0) {
12268             ret = timerid;
12269         } else if (arg3 == 0) {
12270             ret = -TARGET_EINVAL;
12271         } else {
12272             timer_t htimer = g_posix_timers[timerid];
12273             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12274 
12275             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12276                 return -TARGET_EFAULT;
12277             }
12278             ret = get_errno(
12279                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12280             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12281                 return -TARGET_EFAULT;
12282             }
12283         }
12284         return ret;
12285     }
12286 #endif
12287 
12288 #ifdef TARGET_NR_timer_gettime
12289     case TARGET_NR_timer_gettime:
12290     {
12291         /* args: timer_t timerid, struct itimerspec *curr_value */
12292         target_timer_t timerid = get_timer_id(arg1);
12293 
12294         if (timerid < 0) {
12295             ret = timerid;
12296         } else if (!arg2) {
12297             ret = -TARGET_EFAULT;
12298         } else {
12299             timer_t htimer = g_posix_timers[timerid];
12300             struct itimerspec hspec;
12301             ret = get_errno(timer_gettime(htimer, &hspec));
12302 
12303             if (host_to_target_itimerspec(arg2, &hspec)) {
12304                 ret = -TARGET_EFAULT;
12305             }
12306         }
12307         return ret;
12308     }
12309 #endif
12310 
12311 #ifdef TARGET_NR_timer_getoverrun
12312     case TARGET_NR_timer_getoverrun:
12313     {
12314         /* args: timer_t timerid */
12315         target_timer_t timerid = get_timer_id(arg1);
12316 
12317         if (timerid < 0) {
12318             ret = timerid;
12319         } else {
12320             timer_t htimer = g_posix_timers[timerid];
12321             ret = get_errno(timer_getoverrun(htimer));
12322         }
12323         return ret;
12324     }
12325 #endif
12326 
12327 #ifdef TARGET_NR_timer_delete
12328     case TARGET_NR_timer_delete:
12329     {
12330         /* args: timer_t timerid */
12331         target_timer_t timerid = get_timer_id(arg1);
12332 
12333         if (timerid < 0) {
12334             ret = timerid;
12335         } else {
12336             timer_t htimer = g_posix_timers[timerid];
12337             ret = get_errno(timer_delete(htimer));
12338             g_posix_timers[timerid] = 0;
12339         }
12340         return ret;
12341     }
12342 #endif
12343 
12344 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12345     case TARGET_NR_timerfd_create:
12346         return get_errno(timerfd_create(arg1,
12347                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12348 #endif
12349 
12350 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12351     case TARGET_NR_timerfd_gettime:
12352         {
12353             struct itimerspec its_curr;
12354 
12355             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12356 
12357             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12358                 return -TARGET_EFAULT;
12359             }
12360         }
12361         return ret;
12362 #endif
12363 
12364 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12365     case TARGET_NR_timerfd_settime:
12366         {
12367             struct itimerspec its_new, its_old, *p_new;
12368 
12369             if (arg3) {
12370                 if (target_to_host_itimerspec(&its_new, arg3)) {
12371                     return -TARGET_EFAULT;
12372                 }
12373                 p_new = &its_new;
12374             } else {
12375                 p_new = NULL;
12376             }
12377 
12378             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12379 
12380             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12381                 return -TARGET_EFAULT;
12382             }
12383         }
12384         return ret;
12385 #endif
12386 
12387 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12388     case TARGET_NR_ioprio_get:
12389         return get_errno(ioprio_get(arg1, arg2));
12390 #endif
12391 
12392 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12393     case TARGET_NR_ioprio_set:
12394         return get_errno(ioprio_set(arg1, arg2, arg3));
12395 #endif
12396 
12397 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12398     case TARGET_NR_setns:
12399         return get_errno(setns(arg1, arg2));
12400 #endif
12401 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12402     case TARGET_NR_unshare:
12403         return get_errno(unshare(arg1));
12404 #endif
12405 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12406     case TARGET_NR_kcmp:
12407         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12408 #endif
12409 #ifdef TARGET_NR_swapcontext
12410     case TARGET_NR_swapcontext:
12411         /* PowerPC specific.  */
12412         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12413 #endif
12414 #ifdef TARGET_NR_memfd_create
12415     case TARGET_NR_memfd_create:
12416         p = lock_user_string(arg1);
12417         if (!p) {
12418             return -TARGET_EFAULT;
12419         }
12420         ret = get_errno(memfd_create(p, arg2));
12421         fd_trans_unregister(ret);
12422         unlock_user(p, arg1, 0);
12423         return ret;
12424 #endif
12425 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12426     case TARGET_NR_membarrier:
12427         return get_errno(membarrier(arg1, arg2));
12428 #endif
12429 
12430     default:
12431         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12432         return -TARGET_ENOSYS;
12433     }
12434     return ret;
12435 }
12436 
12437 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12438                     abi_long arg2, abi_long arg3, abi_long arg4,
12439                     abi_long arg5, abi_long arg6, abi_long arg7,
12440                     abi_long arg8)
12441 {
12442     CPUState *cpu = env_cpu(cpu_env);
12443     abi_long ret;
12444 
12445 #ifdef DEBUG_ERESTARTSYS
12446     /* Debug-only code for exercising the syscall-restart code paths
12447      * in the per-architecture cpu main loops: restart every syscall
12448      * the guest makes once before letting it through.
12449      */
12450     {
12451         static bool flag;
12452         flag = !flag;
12453         if (flag) {
12454             return -TARGET_ERESTARTSYS;
12455         }
12456     }
12457 #endif
12458 
12459     record_syscall_start(cpu, num, arg1,
12460                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12461 
12462     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12463         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12464     }
12465 
12466     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12467                       arg5, arg6, arg7, arg8);
12468 
12469     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12470         print_syscall_ret(num, ret);
12471     }
12472 
12473     record_syscall_return(cpu, num, ret);
12474     return ret;
12475 }
12476