xref: /openbmc/qemu/linux-user/syscall.c (revision 859e8a89)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #include "linux_loop.h"
116 #include "uname.h"
117 
118 #include "qemu.h"
119 #include "qemu/guest-random.h"
120 #include "user/syscall-trace.h"
121 #include "qapi/error.h"
122 #include "fd-trans.h"
123 #include "tcg/tcg.h"
124 
125 #ifndef CLONE_IO
126 #define CLONE_IO                0x80000000      /* Clone io context */
127 #endif
128 
129 /* We can't directly call the host clone syscall, because this will
130  * badly confuse libc (breaking mutexes, for example). So we must
131  * divide clone flags into:
132  *  * flag combinations that look like pthread_create()
133  *  * flag combinations that look like fork()
134  *  * flags we can implement within QEMU itself
135  *  * flags we can't support and will return an error for
136  */
137 /* For thread creation, all these flags must be present; for
138  * fork, none must be present.
139  */
140 #define CLONE_THREAD_FLAGS                              \
141     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
142      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
143 
144 /* These flags are ignored:
145  * CLONE_DETACHED is now ignored by the kernel;
146  * CLONE_IO is just an optimisation hint to the I/O scheduler
147  */
148 #define CLONE_IGNORED_FLAGS                     \
149     (CLONE_DETACHED | CLONE_IO)
150 
151 /* Flags for fork which we can implement within QEMU itself */
152 #define CLONE_OPTIONAL_FORK_FLAGS               \
153     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
154      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
155 
156 /* Flags for thread creation which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
158     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
159      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
160 
161 #define CLONE_INVALID_FORK_FLAGS                                        \
162     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
163 
164 #define CLONE_INVALID_THREAD_FLAGS                                      \
165     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
166        CLONE_IGNORED_FLAGS))
167 
168 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
169  * have almost all been allocated. We cannot support any of
170  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
171  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
172  * The checks against the invalid thread masks above will catch these.
173  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
174  */
175 
176 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
177  * once. This exercises the codepaths for restart.
178  */
179 //#define DEBUG_ERESTARTSYS
180 
181 //#include <linux/msdos_fs.h>
182 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
183 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
184 
185 #undef _syscall0
186 #undef _syscall1
187 #undef _syscall2
188 #undef _syscall3
189 #undef _syscall4
190 #undef _syscall5
191 #undef _syscall6
192 
193 #define _syscall0(type,name)		\
194 static type name (void)			\
195 {					\
196 	return syscall(__NR_##name);	\
197 }
198 
199 #define _syscall1(type,name,type1,arg1)		\
200 static type name (type1 arg1)			\
201 {						\
202 	return syscall(__NR_##name, arg1);	\
203 }
204 
205 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
206 static type name (type1 arg1,type2 arg2)		\
207 {							\
208 	return syscall(__NR_##name, arg1, arg2);	\
209 }
210 
211 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
212 static type name (type1 arg1,type2 arg2,type3 arg3)		\
213 {								\
214 	return syscall(__NR_##name, arg1, arg2, arg3);		\
215 }
216 
217 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
219 {										\
220 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
221 }
222 
223 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
224 		  type5,arg5)							\
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
226 {										\
227 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
228 }
229 
230 
231 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
232 		  type5,arg5,type6,arg6)					\
233 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
234                   type6 arg6)							\
235 {										\
236 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
237 }
238 
239 
240 #define __NR_sys_uname __NR_uname
241 #define __NR_sys_getcwd1 __NR_getcwd
242 #define __NR_sys_getdents __NR_getdents
243 #define __NR_sys_getdents64 __NR_getdents64
244 #define __NR_sys_getpriority __NR_getpriority
245 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
246 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
247 #define __NR_sys_syslog __NR_syslog
248 #define __NR_sys_futex __NR_futex
249 #define __NR_sys_inotify_init __NR_inotify_init
250 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
251 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
252 #define __NR_sys_statx __NR_statx
253 
254 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
255 #define __NR__llseek __NR_lseek
256 #endif
257 
258 /* Newer kernel ports have llseek() instead of _llseek() */
259 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
260 #define TARGET_NR__llseek TARGET_NR_llseek
261 #endif
262 
263 #define __NR_sys_gettid __NR_gettid
264 _syscall0(int, sys_gettid)
265 
266 /* For the 64-bit guest on 32-bit host case we must emulate
267  * getdents using getdents64, because otherwise the host
268  * might hand us back more dirent records than we can fit
269  * into the guest buffer after structure format conversion.
270  * Otherwise we emulate getdents with getdents if the host has it.
271  */
272 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
273 #define EMULATE_GETDENTS_WITH_GETDENTS
274 #endif
275 
276 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
277 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
278 #endif
279 #if (defined(TARGET_NR_getdents) && \
280       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
281     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
282 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
283 #endif
284 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
285 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
286           loff_t *, res, uint, wh);
287 #endif
288 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
289 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
290           siginfo_t *, uinfo)
291 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
292 #ifdef __NR_exit_group
293 _syscall1(int,exit_group,int,error_code)
294 #endif
295 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
296 _syscall1(int,set_tid_address,int *,tidptr)
297 #endif
298 #if defined(TARGET_NR_futex) && defined(__NR_futex)
299 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
300           const struct timespec *,timeout,int *,uaddr2,int,val3)
301 #endif
302 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
303 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
304           unsigned long *, user_mask_ptr);
305 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
306 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
307           unsigned long *, user_mask_ptr);
308 #define __NR_sys_getcpu __NR_getcpu
309 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
310 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
311           void *, arg);
312 _syscall2(int, capget, struct __user_cap_header_struct *, header,
313           struct __user_cap_data_struct *, data);
314 _syscall2(int, capset, struct __user_cap_header_struct *, header,
315           struct __user_cap_data_struct *, data);
316 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
317 _syscall2(int, ioprio_get, int, which, int, who)
318 #endif
319 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
320 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
321 #endif
322 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
323 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
324 #endif
325 
326 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
327 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
328           unsigned long, idx1, unsigned long, idx2)
329 #endif
330 
331 /*
332  * It is assumed that struct statx is architecture independent.
333  */
334 #if defined(TARGET_NR_statx) && defined(__NR_statx)
335 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
336           unsigned int, mask, struct target_statx *, statxbuf)
337 #endif
338 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
339 _syscall2(int, membarrier, int, cmd, int, flags)
340 #endif
341 
342 static bitmask_transtbl fcntl_flags_tbl[] = {
343   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
344   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
345   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
346   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
347   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
348   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
349   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
350   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
351   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
352   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
353   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
354   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
355   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
356 #if defined(O_DIRECT)
357   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
358 #endif
359 #if defined(O_NOATIME)
360   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
361 #endif
362 #if defined(O_CLOEXEC)
363   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
364 #endif
365 #if defined(O_PATH)
366   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
367 #endif
368 #if defined(O_TMPFILE)
369   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
370 #endif
371   /* Don't terminate the list prematurely on 64-bit host+guest.  */
372 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
373   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
374 #endif
375   { 0, 0, 0, 0 }
376 };
377 
378 static int sys_getcwd1(char *buf, size_t size)
379 {
380   if (getcwd(buf, size) == NULL) {
381       /* getcwd() sets errno */
382       return (-1);
383   }
384   return strlen(buf)+1;
385 }
386 
387 #ifdef TARGET_NR_utimensat
388 #if defined(__NR_utimensat)
389 #define __NR_sys_utimensat __NR_utimensat
390 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
391           const struct timespec *,tsp,int,flags)
392 #else
393 static int sys_utimensat(int dirfd, const char *pathname,
394                          const struct timespec times[2], int flags)
395 {
396     errno = ENOSYS;
397     return -1;
398 }
399 #endif
400 #endif /* TARGET_NR_utimensat */
401 
402 #ifdef TARGET_NR_renameat2
403 #if defined(__NR_renameat2)
404 #define __NR_sys_renameat2 __NR_renameat2
405 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
406           const char *, new, unsigned int, flags)
407 #else
408 static int sys_renameat2(int oldfd, const char *old,
409                          int newfd, const char *new, int flags)
410 {
411     if (flags == 0) {
412         return renameat(oldfd, old, newfd, new);
413     }
414     errno = ENOSYS;
415     return -1;
416 }
417 #endif
418 #endif /* TARGET_NR_renameat2 */
419 
420 #ifdef CONFIG_INOTIFY
421 #include <sys/inotify.h>
422 
423 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
424 static int sys_inotify_init(void)
425 {
426   return (inotify_init());
427 }
428 #endif
429 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
430 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
431 {
432   return (inotify_add_watch(fd, pathname, mask));
433 }
434 #endif
435 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
436 static int sys_inotify_rm_watch(int fd, int32_t wd)
437 {
438   return (inotify_rm_watch(fd, wd));
439 }
440 #endif
441 #ifdef CONFIG_INOTIFY1
442 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
443 static int sys_inotify_init1(int flags)
444 {
445   return (inotify_init1(flags));
446 }
447 #endif
448 #endif
449 #else
450 /* Userspace can usually survive runtime without inotify */
451 #undef TARGET_NR_inotify_init
452 #undef TARGET_NR_inotify_init1
453 #undef TARGET_NR_inotify_add_watch
454 #undef TARGET_NR_inotify_rm_watch
455 #endif /* CONFIG_INOTIFY  */
456 
457 #if defined(TARGET_NR_prlimit64)
458 #ifndef __NR_prlimit64
459 # define __NR_prlimit64 -1
460 #endif
461 #define __NR_sys_prlimit64 __NR_prlimit64
462 /* The glibc rlimit structure may not be that used by the underlying syscall */
463 struct host_rlimit64 {
464     uint64_t rlim_cur;
465     uint64_t rlim_max;
466 };
467 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
468           const struct host_rlimit64 *, new_limit,
469           struct host_rlimit64 *, old_limit)
470 #endif
471 
472 
473 #if defined(TARGET_NR_timer_create)
474 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
475 static timer_t g_posix_timers[32] = { 0, } ;
476 
477 static inline int next_free_host_timer(void)
478 {
479     int k ;
480     /* FIXME: Does finding the next free slot require a lock? */
481     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
482         if (g_posix_timers[k] == 0) {
483             g_posix_timers[k] = (timer_t) 1;
484             return k;
485         }
486     }
487     return -1;
488 }
489 #endif
490 
491 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
492 #ifdef TARGET_ARM
493 static inline int regpairs_aligned(void *cpu_env, int num)
494 {
495     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
496 }
497 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
498 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
499 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
500 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
501  * of registers which translates to the same as ARM/MIPS, because we start with
502  * r3 as arg1 */
503 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
504 #elif defined(TARGET_SH4)
505 /* SH4 doesn't align register pairs, except for p{read,write}64 */
506 static inline int regpairs_aligned(void *cpu_env, int num)
507 {
508     switch (num) {
509     case TARGET_NR_pread64:
510     case TARGET_NR_pwrite64:
511         return 1;
512 
513     default:
514         return 0;
515     }
516 }
517 #elif defined(TARGET_XTENSA)
518 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
519 #else
520 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
521 #endif
522 
523 #define ERRNO_TABLE_SIZE 1200
524 
525 /* target_to_host_errno_table[] is initialized from
526  * host_to_target_errno_table[] in syscall_init(). */
527 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
528 };
529 
530 /*
531  * This list is the union of errno values overridden in asm-<arch>/errno.h
532  * minus the errnos that are not actually generic to all archs.
533  */
534 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
535     [EAGAIN]		= TARGET_EAGAIN,
536     [EIDRM]		= TARGET_EIDRM,
537     [ECHRNG]		= TARGET_ECHRNG,
538     [EL2NSYNC]		= TARGET_EL2NSYNC,
539     [EL3HLT]		= TARGET_EL3HLT,
540     [EL3RST]		= TARGET_EL3RST,
541     [ELNRNG]		= TARGET_ELNRNG,
542     [EUNATCH]		= TARGET_EUNATCH,
543     [ENOCSI]		= TARGET_ENOCSI,
544     [EL2HLT]		= TARGET_EL2HLT,
545     [EDEADLK]		= TARGET_EDEADLK,
546     [ENOLCK]		= TARGET_ENOLCK,
547     [EBADE]		= TARGET_EBADE,
548     [EBADR]		= TARGET_EBADR,
549     [EXFULL]		= TARGET_EXFULL,
550     [ENOANO]		= TARGET_ENOANO,
551     [EBADRQC]		= TARGET_EBADRQC,
552     [EBADSLT]		= TARGET_EBADSLT,
553     [EBFONT]		= TARGET_EBFONT,
554     [ENOSTR]		= TARGET_ENOSTR,
555     [ENODATA]		= TARGET_ENODATA,
556     [ETIME]		= TARGET_ETIME,
557     [ENOSR]		= TARGET_ENOSR,
558     [ENONET]		= TARGET_ENONET,
559     [ENOPKG]		= TARGET_ENOPKG,
560     [EREMOTE]		= TARGET_EREMOTE,
561     [ENOLINK]		= TARGET_ENOLINK,
562     [EADV]		= TARGET_EADV,
563     [ESRMNT]		= TARGET_ESRMNT,
564     [ECOMM]		= TARGET_ECOMM,
565     [EPROTO]		= TARGET_EPROTO,
566     [EDOTDOT]		= TARGET_EDOTDOT,
567     [EMULTIHOP]		= TARGET_EMULTIHOP,
568     [EBADMSG]		= TARGET_EBADMSG,
569     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
570     [EOVERFLOW]		= TARGET_EOVERFLOW,
571     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
572     [EBADFD]		= TARGET_EBADFD,
573     [EREMCHG]		= TARGET_EREMCHG,
574     [ELIBACC]		= TARGET_ELIBACC,
575     [ELIBBAD]		= TARGET_ELIBBAD,
576     [ELIBSCN]		= TARGET_ELIBSCN,
577     [ELIBMAX]		= TARGET_ELIBMAX,
578     [ELIBEXEC]		= TARGET_ELIBEXEC,
579     [EILSEQ]		= TARGET_EILSEQ,
580     [ENOSYS]		= TARGET_ENOSYS,
581     [ELOOP]		= TARGET_ELOOP,
582     [ERESTART]		= TARGET_ERESTART,
583     [ESTRPIPE]		= TARGET_ESTRPIPE,
584     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
585     [EUSERS]		= TARGET_EUSERS,
586     [ENOTSOCK]		= TARGET_ENOTSOCK,
587     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
588     [EMSGSIZE]		= TARGET_EMSGSIZE,
589     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
590     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
591     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
592     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
593     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
594     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
595     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
596     [EADDRINUSE]	= TARGET_EADDRINUSE,
597     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
598     [ENETDOWN]		= TARGET_ENETDOWN,
599     [ENETUNREACH]	= TARGET_ENETUNREACH,
600     [ENETRESET]		= TARGET_ENETRESET,
601     [ECONNABORTED]	= TARGET_ECONNABORTED,
602     [ECONNRESET]	= TARGET_ECONNRESET,
603     [ENOBUFS]		= TARGET_ENOBUFS,
604     [EISCONN]		= TARGET_EISCONN,
605     [ENOTCONN]		= TARGET_ENOTCONN,
606     [EUCLEAN]		= TARGET_EUCLEAN,
607     [ENOTNAM]		= TARGET_ENOTNAM,
608     [ENAVAIL]		= TARGET_ENAVAIL,
609     [EISNAM]		= TARGET_EISNAM,
610     [EREMOTEIO]		= TARGET_EREMOTEIO,
611     [EDQUOT]            = TARGET_EDQUOT,
612     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
613     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
614     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
615     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
616     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
617     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
618     [EALREADY]		= TARGET_EALREADY,
619     [EINPROGRESS]	= TARGET_EINPROGRESS,
620     [ESTALE]		= TARGET_ESTALE,
621     [ECANCELED]		= TARGET_ECANCELED,
622     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
623     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
624 #ifdef ENOKEY
625     [ENOKEY]		= TARGET_ENOKEY,
626 #endif
627 #ifdef EKEYEXPIRED
628     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
629 #endif
630 #ifdef EKEYREVOKED
631     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
632 #endif
633 #ifdef EKEYREJECTED
634     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
635 #endif
636 #ifdef EOWNERDEAD
637     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
638 #endif
639 #ifdef ENOTRECOVERABLE
640     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
641 #endif
642 #ifdef ENOMSG
643     [ENOMSG]            = TARGET_ENOMSG,
644 #endif
645 #ifdef ERKFILL
646     [ERFKILL]           = TARGET_ERFKILL,
647 #endif
648 #ifdef EHWPOISON
649     [EHWPOISON]         = TARGET_EHWPOISON,
650 #endif
651 };
652 
653 static inline int host_to_target_errno(int err)
654 {
655     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
656         host_to_target_errno_table[err]) {
657         return host_to_target_errno_table[err];
658     }
659     return err;
660 }
661 
662 static inline int target_to_host_errno(int err)
663 {
664     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
665         target_to_host_errno_table[err]) {
666         return target_to_host_errno_table[err];
667     }
668     return err;
669 }
670 
671 static inline abi_long get_errno(abi_long ret)
672 {
673     if (ret == -1)
674         return -host_to_target_errno(errno);
675     else
676         return ret;
677 }
678 
679 const char *target_strerror(int err)
680 {
681     if (err == TARGET_ERESTARTSYS) {
682         return "To be restarted";
683     }
684     if (err == TARGET_QEMU_ESIGRETURN) {
685         return "Successful exit from sigreturn";
686     }
687 
688     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
689         return NULL;
690     }
691     return strerror(target_to_host_errno(err));
692 }
693 
694 #define safe_syscall0(type, name) \
695 static type safe_##name(void) \
696 { \
697     return safe_syscall(__NR_##name); \
698 }
699 
700 #define safe_syscall1(type, name, type1, arg1) \
701 static type safe_##name(type1 arg1) \
702 { \
703     return safe_syscall(__NR_##name, arg1); \
704 }
705 
706 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
707 static type safe_##name(type1 arg1, type2 arg2) \
708 { \
709     return safe_syscall(__NR_##name, arg1, arg2); \
710 }
711 
712 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
714 { \
715     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
716 }
717 
718 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
719     type4, arg4) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
721 { \
722     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
723 }
724 
725 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
726     type4, arg4, type5, arg5) \
727 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
728     type5 arg5) \
729 { \
730     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
731 }
732 
733 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
734     type4, arg4, type5, arg5, type6, arg6) \
735 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
736     type5 arg5, type6 arg6) \
737 { \
738     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
739 }
740 
741 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
742 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
743 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
744               int, flags, mode_t, mode)
745 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
746 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
747               struct rusage *, rusage)
748 #endif
749 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
750               int, options, struct rusage *, rusage)
751 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
752 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
753     defined(TARGET_NR_pselect6)
754 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
755               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
756 #endif
757 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
758 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
759               struct timespec *, tsp, const sigset_t *, sigmask,
760               size_t, sigsetsize)
761 #endif
762 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
763               int, maxevents, int, timeout, const sigset_t *, sigmask,
764               size_t, sigsetsize)
765 #ifdef TARGET_NR_futex
766 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
767               const struct timespec *,timeout,int *,uaddr2,int,val3)
768 #endif
769 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
770 safe_syscall2(int, kill, pid_t, pid, int, sig)
771 safe_syscall2(int, tkill, int, tid, int, sig)
772 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
773 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
774 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
775 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
776               unsigned long, pos_l, unsigned long, pos_h)
777 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
778               unsigned long, pos_l, unsigned long, pos_h)
779 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
780               socklen_t, addrlen)
781 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
782               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
783 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
784               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
785 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
786 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
787 safe_syscall2(int, flock, int, fd, int, operation)
788 #ifdef TARGET_NR_rt_sigtimedwait
789 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
790               const struct timespec *, uts, size_t, sigsetsize)
791 #endif
792 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
793               int, flags)
794 #if defined(TARGET_NR_nanosleep)
795 safe_syscall2(int, nanosleep, const struct timespec *, req,
796               struct timespec *, rem)
797 #endif
798 #ifdef TARGET_NR_clock_nanosleep
799 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
800               const struct timespec *, req, struct timespec *, rem)
801 #endif
802 #ifdef __NR_ipc
803 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
804               void *, ptr, long, fifth)
805 #endif
806 #ifdef __NR_msgsnd
807 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
808               int, flags)
809 #endif
810 #ifdef __NR_msgrcv
811 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
812               long, msgtype, int, flags)
813 #endif
814 #ifdef __NR_semtimedop
815 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
816               unsigned, nsops, const struct timespec *, timeout)
817 #endif
818 #ifdef TARGET_NR_mq_timedsend
819 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
820               size_t, len, unsigned, prio, const struct timespec *, timeout)
821 #endif
822 #ifdef TARGET_NR_mq_timedreceive
823 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
824               size_t, len, unsigned *, prio, const struct timespec *, timeout)
825 #endif
826 /* We do ioctl like this rather than via safe_syscall3 to preserve the
827  * "third argument might be integer or pointer or not present" behaviour of
828  * the libc function.
829  */
830 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
831 /* Similarly for fcntl. Note that callers must always:
832  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
833  *  use the flock64 struct rather than unsuffixed flock
834  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
835  */
836 #ifdef __NR_fcntl64
837 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
838 #else
839 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
840 #endif
841 
842 static inline int host_to_target_sock_type(int host_type)
843 {
844     int target_type;
845 
846     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
847     case SOCK_DGRAM:
848         target_type = TARGET_SOCK_DGRAM;
849         break;
850     case SOCK_STREAM:
851         target_type = TARGET_SOCK_STREAM;
852         break;
853     default:
854         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
855         break;
856     }
857 
858 #if defined(SOCK_CLOEXEC)
859     if (host_type & SOCK_CLOEXEC) {
860         target_type |= TARGET_SOCK_CLOEXEC;
861     }
862 #endif
863 
864 #if defined(SOCK_NONBLOCK)
865     if (host_type & SOCK_NONBLOCK) {
866         target_type |= TARGET_SOCK_NONBLOCK;
867     }
868 #endif
869 
870     return target_type;
871 }
872 
873 static abi_ulong target_brk;
874 static abi_ulong target_original_brk;
875 static abi_ulong brk_page;
876 
877 void target_set_brk(abi_ulong new_brk)
878 {
879     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
880     brk_page = HOST_PAGE_ALIGN(target_brk);
881 }
882 
883 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
884 #define DEBUGF_BRK(message, args...)
885 
886 /* do_brk() must return target values and target errnos. */
887 abi_long do_brk(abi_ulong new_brk)
888 {
889     abi_long mapped_addr;
890     abi_ulong new_alloc_size;
891 
892     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
893 
894     if (!new_brk) {
895         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
896         return target_brk;
897     }
898     if (new_brk < target_original_brk) {
899         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
900                    target_brk);
901         return target_brk;
902     }
903 
904     /* If the new brk is less than the highest page reserved to the
905      * target heap allocation, set it and we're almost done...  */
906     if (new_brk <= brk_page) {
907         /* Heap contents are initialized to zero, as for anonymous
908          * mapped pages.  */
909         if (new_brk > target_brk) {
910             memset(g2h(target_brk), 0, new_brk - target_brk);
911         }
912 	target_brk = new_brk;
913         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
914 	return target_brk;
915     }
916 
917     /* We need to allocate more memory after the brk... Note that
918      * we don't use MAP_FIXED because that will map over the top of
919      * any existing mapping (like the one with the host libc or qemu
920      * itself); instead we treat "mapped but at wrong address" as
921      * a failure and unmap again.
922      */
923     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
924     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
925                                         PROT_READ|PROT_WRITE,
926                                         MAP_ANON|MAP_PRIVATE, 0, 0));
927 
928     if (mapped_addr == brk_page) {
929         /* Heap contents are initialized to zero, as for anonymous
930          * mapped pages.  Technically the new pages are already
931          * initialized to zero since they *are* anonymous mapped
932          * pages, however we have to take care with the contents that
933          * come from the remaining part of the previous page: it may
934          * contains garbage data due to a previous heap usage (grown
935          * then shrunken).  */
936         memset(g2h(target_brk), 0, brk_page - target_brk);
937 
938         target_brk = new_brk;
939         brk_page = HOST_PAGE_ALIGN(target_brk);
940         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
941             target_brk);
942         return target_brk;
943     } else if (mapped_addr != -1) {
944         /* Mapped but at wrong address, meaning there wasn't actually
945          * enough space for this brk.
946          */
947         target_munmap(mapped_addr, new_alloc_size);
948         mapped_addr = -1;
949         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
950     }
951     else {
952         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
953     }
954 
955 #if defined(TARGET_ALPHA)
956     /* We (partially) emulate OSF/1 on Alpha, which requires we
957        return a proper errno, not an unchanged brk value.  */
958     return -TARGET_ENOMEM;
959 #endif
960     /* For everything else, return the previous break. */
961     return target_brk;
962 }
963 
964 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
965     defined(TARGET_NR_pselect6)
966 static inline abi_long copy_from_user_fdset(fd_set *fds,
967                                             abi_ulong target_fds_addr,
968                                             int n)
969 {
970     int i, nw, j, k;
971     abi_ulong b, *target_fds;
972 
973     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
974     if (!(target_fds = lock_user(VERIFY_READ,
975                                  target_fds_addr,
976                                  sizeof(abi_ulong) * nw,
977                                  1)))
978         return -TARGET_EFAULT;
979 
980     FD_ZERO(fds);
981     k = 0;
982     for (i = 0; i < nw; i++) {
983         /* grab the abi_ulong */
984         __get_user(b, &target_fds[i]);
985         for (j = 0; j < TARGET_ABI_BITS; j++) {
986             /* check the bit inside the abi_ulong */
987             if ((b >> j) & 1)
988                 FD_SET(k, fds);
989             k++;
990         }
991     }
992 
993     unlock_user(target_fds, target_fds_addr, 0);
994 
995     return 0;
996 }
997 
998 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
999                                                  abi_ulong target_fds_addr,
1000                                                  int n)
1001 {
1002     if (target_fds_addr) {
1003         if (copy_from_user_fdset(fds, target_fds_addr, n))
1004             return -TARGET_EFAULT;
1005         *fds_ptr = fds;
1006     } else {
1007         *fds_ptr = NULL;
1008     }
1009     return 0;
1010 }
1011 
1012 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1013                                           const fd_set *fds,
1014                                           int n)
1015 {
1016     int i, nw, j, k;
1017     abi_long v;
1018     abi_ulong *target_fds;
1019 
1020     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1021     if (!(target_fds = lock_user(VERIFY_WRITE,
1022                                  target_fds_addr,
1023                                  sizeof(abi_ulong) * nw,
1024                                  0)))
1025         return -TARGET_EFAULT;
1026 
1027     k = 0;
1028     for (i = 0; i < nw; i++) {
1029         v = 0;
1030         for (j = 0; j < TARGET_ABI_BITS; j++) {
1031             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1032             k++;
1033         }
1034         __put_user(v, &target_fds[i]);
1035     }
1036 
1037     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1038 
1039     return 0;
1040 }
1041 #endif
1042 
1043 #if defined(__alpha__)
1044 #define HOST_HZ 1024
1045 #else
1046 #define HOST_HZ 100
1047 #endif
1048 
1049 static inline abi_long host_to_target_clock_t(long ticks)
1050 {
1051 #if HOST_HZ == TARGET_HZ
1052     return ticks;
1053 #else
1054     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1055 #endif
1056 }
1057 
1058 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1059                                              const struct rusage *rusage)
1060 {
1061     struct target_rusage *target_rusage;
1062 
1063     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1064         return -TARGET_EFAULT;
1065     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1066     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1067     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1068     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1069     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1070     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1071     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1072     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1073     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1074     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1075     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1076     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1077     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1078     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1079     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1080     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1081     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1082     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1083     unlock_user_struct(target_rusage, target_addr, 1);
1084 
1085     return 0;
1086 }
1087 
1088 #ifdef TARGET_NR_setrlimit
1089 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1090 {
1091     abi_ulong target_rlim_swap;
1092     rlim_t result;
1093 
1094     target_rlim_swap = tswapal(target_rlim);
1095     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1096         return RLIM_INFINITY;
1097 
1098     result = target_rlim_swap;
1099     if (target_rlim_swap != (rlim_t)result)
1100         return RLIM_INFINITY;
1101 
1102     return result;
1103 }
1104 #endif
1105 
1106 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1107 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1108 {
1109     abi_ulong target_rlim_swap;
1110     abi_ulong result;
1111 
1112     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1113         target_rlim_swap = TARGET_RLIM_INFINITY;
1114     else
1115         target_rlim_swap = rlim;
1116     result = tswapal(target_rlim_swap);
1117 
1118     return result;
1119 }
1120 #endif
1121 
1122 static inline int target_to_host_resource(int code)
1123 {
1124     switch (code) {
1125     case TARGET_RLIMIT_AS:
1126         return RLIMIT_AS;
1127     case TARGET_RLIMIT_CORE:
1128         return RLIMIT_CORE;
1129     case TARGET_RLIMIT_CPU:
1130         return RLIMIT_CPU;
1131     case TARGET_RLIMIT_DATA:
1132         return RLIMIT_DATA;
1133     case TARGET_RLIMIT_FSIZE:
1134         return RLIMIT_FSIZE;
1135     case TARGET_RLIMIT_LOCKS:
1136         return RLIMIT_LOCKS;
1137     case TARGET_RLIMIT_MEMLOCK:
1138         return RLIMIT_MEMLOCK;
1139     case TARGET_RLIMIT_MSGQUEUE:
1140         return RLIMIT_MSGQUEUE;
1141     case TARGET_RLIMIT_NICE:
1142         return RLIMIT_NICE;
1143     case TARGET_RLIMIT_NOFILE:
1144         return RLIMIT_NOFILE;
1145     case TARGET_RLIMIT_NPROC:
1146         return RLIMIT_NPROC;
1147     case TARGET_RLIMIT_RSS:
1148         return RLIMIT_RSS;
1149     case TARGET_RLIMIT_RTPRIO:
1150         return RLIMIT_RTPRIO;
1151     case TARGET_RLIMIT_SIGPENDING:
1152         return RLIMIT_SIGPENDING;
1153     case TARGET_RLIMIT_STACK:
1154         return RLIMIT_STACK;
1155     default:
1156         return code;
1157     }
1158 }
1159 
1160 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1161                                               abi_ulong target_tv_addr)
1162 {
1163     struct target_timeval *target_tv;
1164 
1165     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1166         return -TARGET_EFAULT;
1167     }
1168 
1169     __get_user(tv->tv_sec, &target_tv->tv_sec);
1170     __get_user(tv->tv_usec, &target_tv->tv_usec);
1171 
1172     unlock_user_struct(target_tv, target_tv_addr, 0);
1173 
1174     return 0;
1175 }
1176 
1177 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1178                                             const struct timeval *tv)
1179 {
1180     struct target_timeval *target_tv;
1181 
1182     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1183         return -TARGET_EFAULT;
1184     }
1185 
1186     __put_user(tv->tv_sec, &target_tv->tv_sec);
1187     __put_user(tv->tv_usec, &target_tv->tv_usec);
1188 
1189     unlock_user_struct(target_tv, target_tv_addr, 1);
1190 
1191     return 0;
1192 }
1193 
1194 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1195                                              const struct timeval *tv)
1196 {
1197     struct target__kernel_sock_timeval *target_tv;
1198 
1199     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1200         return -TARGET_EFAULT;
1201     }
1202 
1203     __put_user(tv->tv_sec, &target_tv->tv_sec);
1204     __put_user(tv->tv_usec, &target_tv->tv_usec);
1205 
1206     unlock_user_struct(target_tv, target_tv_addr, 1);
1207 
1208     return 0;
1209 }
1210 
1211 #if defined(TARGET_NR_futex) || \
1212     defined(TARGET_NR_rt_sigtimedwait) || \
1213     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1214     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1215     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1216     defined(TARGET_NR_mq_timedreceive)
1217 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1218                                                abi_ulong target_addr)
1219 {
1220     struct target_timespec *target_ts;
1221 
1222     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1223         return -TARGET_EFAULT;
1224     }
1225     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1226     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1227     unlock_user_struct(target_ts, target_addr, 0);
1228     return 0;
1229 }
1230 #endif
1231 
1232 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1233                                                struct timespec *host_ts)
1234 {
1235     struct target_timespec *target_ts;
1236 
1237     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1238         return -TARGET_EFAULT;
1239     }
1240     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1241     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1242     unlock_user_struct(target_ts, target_addr, 1);
1243     return 0;
1244 }
1245 
1246 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1247                                                  struct timespec *host_ts)
1248 {
1249     struct target__kernel_timespec *target_ts;
1250 
1251     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1252         return -TARGET_EFAULT;
1253     }
1254     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1255     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1256     unlock_user_struct(target_ts, target_addr, 1);
1257     return 0;
1258 }
1259 
1260 #if defined(TARGET_NR_settimeofday)
1261 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1262                                                abi_ulong target_tz_addr)
1263 {
1264     struct target_timezone *target_tz;
1265 
1266     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1267         return -TARGET_EFAULT;
1268     }
1269 
1270     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1271     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1272 
1273     unlock_user_struct(target_tz, target_tz_addr, 0);
1274 
1275     return 0;
1276 }
1277 #endif
1278 
1279 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1280 #include <mqueue.h>
1281 
1282 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1283                                               abi_ulong target_mq_attr_addr)
1284 {
1285     struct target_mq_attr *target_mq_attr;
1286 
1287     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1288                           target_mq_attr_addr, 1))
1289         return -TARGET_EFAULT;
1290 
1291     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1292     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1293     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1294     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1295 
1296     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1297 
1298     return 0;
1299 }
1300 
1301 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1302                                             const struct mq_attr *attr)
1303 {
1304     struct target_mq_attr *target_mq_attr;
1305 
1306     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1307                           target_mq_attr_addr, 0))
1308         return -TARGET_EFAULT;
1309 
1310     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1311     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1312     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1313     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1314 
1315     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1316 
1317     return 0;
1318 }
1319 #endif
1320 
1321 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1322 /* do_select() must return target values and target errnos. */
1323 static abi_long do_select(int n,
1324                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1325                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1326 {
1327     fd_set rfds, wfds, efds;
1328     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1329     struct timeval tv;
1330     struct timespec ts, *ts_ptr;
1331     abi_long ret;
1332 
1333     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1334     if (ret) {
1335         return ret;
1336     }
1337     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1338     if (ret) {
1339         return ret;
1340     }
1341     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1342     if (ret) {
1343         return ret;
1344     }
1345 
1346     if (target_tv_addr) {
1347         if (copy_from_user_timeval(&tv, target_tv_addr))
1348             return -TARGET_EFAULT;
1349         ts.tv_sec = tv.tv_sec;
1350         ts.tv_nsec = tv.tv_usec * 1000;
1351         ts_ptr = &ts;
1352     } else {
1353         ts_ptr = NULL;
1354     }
1355 
1356     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1357                                   ts_ptr, NULL));
1358 
1359     if (!is_error(ret)) {
1360         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1361             return -TARGET_EFAULT;
1362         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1363             return -TARGET_EFAULT;
1364         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1365             return -TARGET_EFAULT;
1366 
1367         if (target_tv_addr) {
1368             tv.tv_sec = ts.tv_sec;
1369             tv.tv_usec = ts.tv_nsec / 1000;
1370             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1371                 return -TARGET_EFAULT;
1372             }
1373         }
1374     }
1375 
1376     return ret;
1377 }
1378 
1379 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1380 static abi_long do_old_select(abi_ulong arg1)
1381 {
1382     struct target_sel_arg_struct *sel;
1383     abi_ulong inp, outp, exp, tvp;
1384     long nsel;
1385 
1386     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1387         return -TARGET_EFAULT;
1388     }
1389 
1390     nsel = tswapal(sel->n);
1391     inp = tswapal(sel->inp);
1392     outp = tswapal(sel->outp);
1393     exp = tswapal(sel->exp);
1394     tvp = tswapal(sel->tvp);
1395 
1396     unlock_user_struct(sel, arg1, 0);
1397 
1398     return do_select(nsel, inp, outp, exp, tvp);
1399 }
1400 #endif
1401 #endif
1402 
1403 static abi_long do_pipe2(int host_pipe[], int flags)
1404 {
1405 #ifdef CONFIG_PIPE2
1406     return pipe2(host_pipe, flags);
1407 #else
1408     return -ENOSYS;
1409 #endif
1410 }
1411 
1412 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1413                         int flags, int is_pipe2)
1414 {
1415     int host_pipe[2];
1416     abi_long ret;
1417     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1418 
1419     if (is_error(ret))
1420         return get_errno(ret);
1421 
1422     /* Several targets have special calling conventions for the original
1423        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1424     if (!is_pipe2) {
1425 #if defined(TARGET_ALPHA)
1426         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1427         return host_pipe[0];
1428 #elif defined(TARGET_MIPS)
1429         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1430         return host_pipe[0];
1431 #elif defined(TARGET_SH4)
1432         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1433         return host_pipe[0];
1434 #elif defined(TARGET_SPARC)
1435         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1436         return host_pipe[0];
1437 #endif
1438     }
1439 
1440     if (put_user_s32(host_pipe[0], pipedes)
1441         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1442         return -TARGET_EFAULT;
1443     return get_errno(ret);
1444 }
1445 
1446 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1447                                               abi_ulong target_addr,
1448                                               socklen_t len)
1449 {
1450     struct target_ip_mreqn *target_smreqn;
1451 
1452     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1453     if (!target_smreqn)
1454         return -TARGET_EFAULT;
1455     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1456     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1457     if (len == sizeof(struct target_ip_mreqn))
1458         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1459     unlock_user(target_smreqn, target_addr, 0);
1460 
1461     return 0;
1462 }
1463 
1464 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1465                                                abi_ulong target_addr,
1466                                                socklen_t len)
1467 {
1468     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1469     sa_family_t sa_family;
1470     struct target_sockaddr *target_saddr;
1471 
1472     if (fd_trans_target_to_host_addr(fd)) {
1473         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1474     }
1475 
1476     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1477     if (!target_saddr)
1478         return -TARGET_EFAULT;
1479 
1480     sa_family = tswap16(target_saddr->sa_family);
1481 
1482     /* Oops. The caller might send a incomplete sun_path; sun_path
1483      * must be terminated by \0 (see the manual page), but
1484      * unfortunately it is quite common to specify sockaddr_un
1485      * length as "strlen(x->sun_path)" while it should be
1486      * "strlen(...) + 1". We'll fix that here if needed.
1487      * Linux kernel has a similar feature.
1488      */
1489 
1490     if (sa_family == AF_UNIX) {
1491         if (len < unix_maxlen && len > 0) {
1492             char *cp = (char*)target_saddr;
1493 
1494             if ( cp[len-1] && !cp[len] )
1495                 len++;
1496         }
1497         if (len > unix_maxlen)
1498             len = unix_maxlen;
1499     }
1500 
1501     memcpy(addr, target_saddr, len);
1502     addr->sa_family = sa_family;
1503     if (sa_family == AF_NETLINK) {
1504         struct sockaddr_nl *nladdr;
1505 
1506         nladdr = (struct sockaddr_nl *)addr;
1507         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1508         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1509     } else if (sa_family == AF_PACKET) {
1510 	struct target_sockaddr_ll *lladdr;
1511 
1512 	lladdr = (struct target_sockaddr_ll *)addr;
1513 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1514 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1515     }
1516     unlock_user(target_saddr, target_addr, 0);
1517 
1518     return 0;
1519 }
1520 
1521 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1522                                                struct sockaddr *addr,
1523                                                socklen_t len)
1524 {
1525     struct target_sockaddr *target_saddr;
1526 
1527     if (len == 0) {
1528         return 0;
1529     }
1530     assert(addr);
1531 
1532     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1533     if (!target_saddr)
1534         return -TARGET_EFAULT;
1535     memcpy(target_saddr, addr, len);
1536     if (len >= offsetof(struct target_sockaddr, sa_family) +
1537         sizeof(target_saddr->sa_family)) {
1538         target_saddr->sa_family = tswap16(addr->sa_family);
1539     }
1540     if (addr->sa_family == AF_NETLINK &&
1541         len >= sizeof(struct target_sockaddr_nl)) {
1542         struct target_sockaddr_nl *target_nl =
1543                (struct target_sockaddr_nl *)target_saddr;
1544         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1545         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1546     } else if (addr->sa_family == AF_PACKET) {
1547         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1548         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1549         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1550     } else if (addr->sa_family == AF_INET6 &&
1551                len >= sizeof(struct target_sockaddr_in6)) {
1552         struct target_sockaddr_in6 *target_in6 =
1553                (struct target_sockaddr_in6 *)target_saddr;
1554         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1555     }
1556     unlock_user(target_saddr, target_addr, len);
1557 
1558     return 0;
1559 }
1560 
1561 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1562                                            struct target_msghdr *target_msgh)
1563 {
1564     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1565     abi_long msg_controllen;
1566     abi_ulong target_cmsg_addr;
1567     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1568     socklen_t space = 0;
1569 
1570     msg_controllen = tswapal(target_msgh->msg_controllen);
1571     if (msg_controllen < sizeof (struct target_cmsghdr))
1572         goto the_end;
1573     target_cmsg_addr = tswapal(target_msgh->msg_control);
1574     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1575     target_cmsg_start = target_cmsg;
1576     if (!target_cmsg)
1577         return -TARGET_EFAULT;
1578 
1579     while (cmsg && target_cmsg) {
1580         void *data = CMSG_DATA(cmsg);
1581         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1582 
1583         int len = tswapal(target_cmsg->cmsg_len)
1584             - sizeof(struct target_cmsghdr);
1585 
1586         space += CMSG_SPACE(len);
1587         if (space > msgh->msg_controllen) {
1588             space -= CMSG_SPACE(len);
1589             /* This is a QEMU bug, since we allocated the payload
1590              * area ourselves (unlike overflow in host-to-target
1591              * conversion, which is just the guest giving us a buffer
1592              * that's too small). It can't happen for the payload types
1593              * we currently support; if it becomes an issue in future
1594              * we would need to improve our allocation strategy to
1595              * something more intelligent than "twice the size of the
1596              * target buffer we're reading from".
1597              */
1598             qemu_log_mask(LOG_UNIMP,
1599                           ("Unsupported ancillary data %d/%d: "
1600                            "unhandled msg size\n"),
1601                           tswap32(target_cmsg->cmsg_level),
1602                           tswap32(target_cmsg->cmsg_type));
1603             break;
1604         }
1605 
1606         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1607             cmsg->cmsg_level = SOL_SOCKET;
1608         } else {
1609             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1610         }
1611         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1612         cmsg->cmsg_len = CMSG_LEN(len);
1613 
1614         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1615             int *fd = (int *)data;
1616             int *target_fd = (int *)target_data;
1617             int i, numfds = len / sizeof(int);
1618 
1619             for (i = 0; i < numfds; i++) {
1620                 __get_user(fd[i], target_fd + i);
1621             }
1622         } else if (cmsg->cmsg_level == SOL_SOCKET
1623                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1624             struct ucred *cred = (struct ucred *)data;
1625             struct target_ucred *target_cred =
1626                 (struct target_ucred *)target_data;
1627 
1628             __get_user(cred->pid, &target_cred->pid);
1629             __get_user(cred->uid, &target_cred->uid);
1630             __get_user(cred->gid, &target_cred->gid);
1631         } else {
1632             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1633                           cmsg->cmsg_level, cmsg->cmsg_type);
1634             memcpy(data, target_data, len);
1635         }
1636 
1637         cmsg = CMSG_NXTHDR(msgh, cmsg);
1638         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1639                                          target_cmsg_start);
1640     }
1641     unlock_user(target_cmsg, target_cmsg_addr, 0);
1642  the_end:
1643     msgh->msg_controllen = space;
1644     return 0;
1645 }
1646 
1647 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1648                                            struct msghdr *msgh)
1649 {
1650     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1651     abi_long msg_controllen;
1652     abi_ulong target_cmsg_addr;
1653     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1654     socklen_t space = 0;
1655 
1656     msg_controllen = tswapal(target_msgh->msg_controllen);
1657     if (msg_controllen < sizeof (struct target_cmsghdr))
1658         goto the_end;
1659     target_cmsg_addr = tswapal(target_msgh->msg_control);
1660     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1661     target_cmsg_start = target_cmsg;
1662     if (!target_cmsg)
1663         return -TARGET_EFAULT;
1664 
1665     while (cmsg && target_cmsg) {
1666         void *data = CMSG_DATA(cmsg);
1667         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1668 
1669         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1670         int tgt_len, tgt_space;
1671 
1672         /* We never copy a half-header but may copy half-data;
1673          * this is Linux's behaviour in put_cmsg(). Note that
1674          * truncation here is a guest problem (which we report
1675          * to the guest via the CTRUNC bit), unlike truncation
1676          * in target_to_host_cmsg, which is a QEMU bug.
1677          */
1678         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1679             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1680             break;
1681         }
1682 
1683         if (cmsg->cmsg_level == SOL_SOCKET) {
1684             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1685         } else {
1686             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1687         }
1688         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1689 
1690         /* Payload types which need a different size of payload on
1691          * the target must adjust tgt_len here.
1692          */
1693         tgt_len = len;
1694         switch (cmsg->cmsg_level) {
1695         case SOL_SOCKET:
1696             switch (cmsg->cmsg_type) {
1697             case SO_TIMESTAMP:
1698                 tgt_len = sizeof(struct target_timeval);
1699                 break;
1700             default:
1701                 break;
1702             }
1703             break;
1704         default:
1705             break;
1706         }
1707 
1708         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1709             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1710             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1711         }
1712 
1713         /* We must now copy-and-convert len bytes of payload
1714          * into tgt_len bytes of destination space. Bear in mind
1715          * that in both source and destination we may be dealing
1716          * with a truncated value!
1717          */
1718         switch (cmsg->cmsg_level) {
1719         case SOL_SOCKET:
1720             switch (cmsg->cmsg_type) {
1721             case SCM_RIGHTS:
1722             {
1723                 int *fd = (int *)data;
1724                 int *target_fd = (int *)target_data;
1725                 int i, numfds = tgt_len / sizeof(int);
1726 
1727                 for (i = 0; i < numfds; i++) {
1728                     __put_user(fd[i], target_fd + i);
1729                 }
1730                 break;
1731             }
1732             case SO_TIMESTAMP:
1733             {
1734                 struct timeval *tv = (struct timeval *)data;
1735                 struct target_timeval *target_tv =
1736                     (struct target_timeval *)target_data;
1737 
1738                 if (len != sizeof(struct timeval) ||
1739                     tgt_len != sizeof(struct target_timeval)) {
1740                     goto unimplemented;
1741                 }
1742 
1743                 /* copy struct timeval to target */
1744                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1745                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1746                 break;
1747             }
1748             case SCM_CREDENTIALS:
1749             {
1750                 struct ucred *cred = (struct ucred *)data;
1751                 struct target_ucred *target_cred =
1752                     (struct target_ucred *)target_data;
1753 
1754                 __put_user(cred->pid, &target_cred->pid);
1755                 __put_user(cred->uid, &target_cred->uid);
1756                 __put_user(cred->gid, &target_cred->gid);
1757                 break;
1758             }
1759             default:
1760                 goto unimplemented;
1761             }
1762             break;
1763 
1764         case SOL_IP:
1765             switch (cmsg->cmsg_type) {
1766             case IP_TTL:
1767             {
1768                 uint32_t *v = (uint32_t *)data;
1769                 uint32_t *t_int = (uint32_t *)target_data;
1770 
1771                 if (len != sizeof(uint32_t) ||
1772                     tgt_len != sizeof(uint32_t)) {
1773                     goto unimplemented;
1774                 }
1775                 __put_user(*v, t_int);
1776                 break;
1777             }
1778             case IP_RECVERR:
1779             {
1780                 struct errhdr_t {
1781                    struct sock_extended_err ee;
1782                    struct sockaddr_in offender;
1783                 };
1784                 struct errhdr_t *errh = (struct errhdr_t *)data;
1785                 struct errhdr_t *target_errh =
1786                     (struct errhdr_t *)target_data;
1787 
1788                 if (len != sizeof(struct errhdr_t) ||
1789                     tgt_len != sizeof(struct errhdr_t)) {
1790                     goto unimplemented;
1791                 }
1792                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1793                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1794                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1795                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1796                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1797                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1798                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1799                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1800                     (void *) &errh->offender, sizeof(errh->offender));
1801                 break;
1802             }
1803             default:
1804                 goto unimplemented;
1805             }
1806             break;
1807 
1808         case SOL_IPV6:
1809             switch (cmsg->cmsg_type) {
1810             case IPV6_HOPLIMIT:
1811             {
1812                 uint32_t *v = (uint32_t *)data;
1813                 uint32_t *t_int = (uint32_t *)target_data;
1814 
1815                 if (len != sizeof(uint32_t) ||
1816                     tgt_len != sizeof(uint32_t)) {
1817                     goto unimplemented;
1818                 }
1819                 __put_user(*v, t_int);
1820                 break;
1821             }
1822             case IPV6_RECVERR:
1823             {
1824                 struct errhdr6_t {
1825                    struct sock_extended_err ee;
1826                    struct sockaddr_in6 offender;
1827                 };
1828                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1829                 struct errhdr6_t *target_errh =
1830                     (struct errhdr6_t *)target_data;
1831 
1832                 if (len != sizeof(struct errhdr6_t) ||
1833                     tgt_len != sizeof(struct errhdr6_t)) {
1834                     goto unimplemented;
1835                 }
1836                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1837                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1838                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1839                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1840                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1841                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1842                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1843                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1844                     (void *) &errh->offender, sizeof(errh->offender));
1845                 break;
1846             }
1847             default:
1848                 goto unimplemented;
1849             }
1850             break;
1851 
1852         default:
1853         unimplemented:
1854             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1855                           cmsg->cmsg_level, cmsg->cmsg_type);
1856             memcpy(target_data, data, MIN(len, tgt_len));
1857             if (tgt_len > len) {
1858                 memset(target_data + len, 0, tgt_len - len);
1859             }
1860         }
1861 
1862         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1863         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1864         if (msg_controllen < tgt_space) {
1865             tgt_space = msg_controllen;
1866         }
1867         msg_controllen -= tgt_space;
1868         space += tgt_space;
1869         cmsg = CMSG_NXTHDR(msgh, cmsg);
1870         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1871                                          target_cmsg_start);
1872     }
1873     unlock_user(target_cmsg, target_cmsg_addr, space);
1874  the_end:
1875     target_msgh->msg_controllen = tswapal(space);
1876     return 0;
1877 }
1878 
1879 /* do_setsockopt() Must return target values and target errnos. */
1880 static abi_long do_setsockopt(int sockfd, int level, int optname,
1881                               abi_ulong optval_addr, socklen_t optlen)
1882 {
1883     abi_long ret;
1884     int val;
1885     struct ip_mreqn *ip_mreq;
1886     struct ip_mreq_source *ip_mreq_source;
1887 
1888     switch(level) {
1889     case SOL_TCP:
1890         /* TCP options all take an 'int' value.  */
1891         if (optlen < sizeof(uint32_t))
1892             return -TARGET_EINVAL;
1893 
1894         if (get_user_u32(val, optval_addr))
1895             return -TARGET_EFAULT;
1896         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1897         break;
1898     case SOL_IP:
1899         switch(optname) {
1900         case IP_TOS:
1901         case IP_TTL:
1902         case IP_HDRINCL:
1903         case IP_ROUTER_ALERT:
1904         case IP_RECVOPTS:
1905         case IP_RETOPTS:
1906         case IP_PKTINFO:
1907         case IP_MTU_DISCOVER:
1908         case IP_RECVERR:
1909         case IP_RECVTTL:
1910         case IP_RECVTOS:
1911 #ifdef IP_FREEBIND
1912         case IP_FREEBIND:
1913 #endif
1914         case IP_MULTICAST_TTL:
1915         case IP_MULTICAST_LOOP:
1916             val = 0;
1917             if (optlen >= sizeof(uint32_t)) {
1918                 if (get_user_u32(val, optval_addr))
1919                     return -TARGET_EFAULT;
1920             } else if (optlen >= 1) {
1921                 if (get_user_u8(val, optval_addr))
1922                     return -TARGET_EFAULT;
1923             }
1924             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1925             break;
1926         case IP_ADD_MEMBERSHIP:
1927         case IP_DROP_MEMBERSHIP:
1928             if (optlen < sizeof (struct target_ip_mreq) ||
1929                 optlen > sizeof (struct target_ip_mreqn))
1930                 return -TARGET_EINVAL;
1931 
1932             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1933             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1934             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1935             break;
1936 
1937         case IP_BLOCK_SOURCE:
1938         case IP_UNBLOCK_SOURCE:
1939         case IP_ADD_SOURCE_MEMBERSHIP:
1940         case IP_DROP_SOURCE_MEMBERSHIP:
1941             if (optlen != sizeof (struct target_ip_mreq_source))
1942                 return -TARGET_EINVAL;
1943 
1944             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1945             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1946             unlock_user (ip_mreq_source, optval_addr, 0);
1947             break;
1948 
1949         default:
1950             goto unimplemented;
1951         }
1952         break;
1953     case SOL_IPV6:
1954         switch (optname) {
1955         case IPV6_MTU_DISCOVER:
1956         case IPV6_MTU:
1957         case IPV6_V6ONLY:
1958         case IPV6_RECVPKTINFO:
1959         case IPV6_UNICAST_HOPS:
1960         case IPV6_MULTICAST_HOPS:
1961         case IPV6_MULTICAST_LOOP:
1962         case IPV6_RECVERR:
1963         case IPV6_RECVHOPLIMIT:
1964         case IPV6_2292HOPLIMIT:
1965         case IPV6_CHECKSUM:
1966         case IPV6_ADDRFORM:
1967         case IPV6_2292PKTINFO:
1968         case IPV6_RECVTCLASS:
1969         case IPV6_RECVRTHDR:
1970         case IPV6_2292RTHDR:
1971         case IPV6_RECVHOPOPTS:
1972         case IPV6_2292HOPOPTS:
1973         case IPV6_RECVDSTOPTS:
1974         case IPV6_2292DSTOPTS:
1975         case IPV6_TCLASS:
1976 #ifdef IPV6_RECVPATHMTU
1977         case IPV6_RECVPATHMTU:
1978 #endif
1979 #ifdef IPV6_TRANSPARENT
1980         case IPV6_TRANSPARENT:
1981 #endif
1982 #ifdef IPV6_FREEBIND
1983         case IPV6_FREEBIND:
1984 #endif
1985 #ifdef IPV6_RECVORIGDSTADDR
1986         case IPV6_RECVORIGDSTADDR:
1987 #endif
1988             val = 0;
1989             if (optlen < sizeof(uint32_t)) {
1990                 return -TARGET_EINVAL;
1991             }
1992             if (get_user_u32(val, optval_addr)) {
1993                 return -TARGET_EFAULT;
1994             }
1995             ret = get_errno(setsockopt(sockfd, level, optname,
1996                                        &val, sizeof(val)));
1997             break;
1998         case IPV6_PKTINFO:
1999         {
2000             struct in6_pktinfo pki;
2001 
2002             if (optlen < sizeof(pki)) {
2003                 return -TARGET_EINVAL;
2004             }
2005 
2006             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2007                 return -TARGET_EFAULT;
2008             }
2009 
2010             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2011 
2012             ret = get_errno(setsockopt(sockfd, level, optname,
2013                                        &pki, sizeof(pki)));
2014             break;
2015         }
2016         case IPV6_ADD_MEMBERSHIP:
2017         case IPV6_DROP_MEMBERSHIP:
2018         {
2019             struct ipv6_mreq ipv6mreq;
2020 
2021             if (optlen < sizeof(ipv6mreq)) {
2022                 return -TARGET_EINVAL;
2023             }
2024 
2025             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2026                 return -TARGET_EFAULT;
2027             }
2028 
2029             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2030 
2031             ret = get_errno(setsockopt(sockfd, level, optname,
2032                                        &ipv6mreq, sizeof(ipv6mreq)));
2033             break;
2034         }
2035         default:
2036             goto unimplemented;
2037         }
2038         break;
2039     case SOL_ICMPV6:
2040         switch (optname) {
2041         case ICMPV6_FILTER:
2042         {
2043             struct icmp6_filter icmp6f;
2044 
2045             if (optlen > sizeof(icmp6f)) {
2046                 optlen = sizeof(icmp6f);
2047             }
2048 
2049             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2050                 return -TARGET_EFAULT;
2051             }
2052 
2053             for (val = 0; val < 8; val++) {
2054                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2055             }
2056 
2057             ret = get_errno(setsockopt(sockfd, level, optname,
2058                                        &icmp6f, optlen));
2059             break;
2060         }
2061         default:
2062             goto unimplemented;
2063         }
2064         break;
2065     case SOL_RAW:
2066         switch (optname) {
2067         case ICMP_FILTER:
2068         case IPV6_CHECKSUM:
2069             /* those take an u32 value */
2070             if (optlen < sizeof(uint32_t)) {
2071                 return -TARGET_EINVAL;
2072             }
2073 
2074             if (get_user_u32(val, optval_addr)) {
2075                 return -TARGET_EFAULT;
2076             }
2077             ret = get_errno(setsockopt(sockfd, level, optname,
2078                                        &val, sizeof(val)));
2079             break;
2080 
2081         default:
2082             goto unimplemented;
2083         }
2084         break;
2085 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2086     case SOL_ALG:
2087         switch (optname) {
2088         case ALG_SET_KEY:
2089         {
2090             char *alg_key = g_malloc(optlen);
2091 
2092             if (!alg_key) {
2093                 return -TARGET_ENOMEM;
2094             }
2095             if (copy_from_user(alg_key, optval_addr, optlen)) {
2096                 g_free(alg_key);
2097                 return -TARGET_EFAULT;
2098             }
2099             ret = get_errno(setsockopt(sockfd, level, optname,
2100                                        alg_key, optlen));
2101             g_free(alg_key);
2102             break;
2103         }
2104         case ALG_SET_AEAD_AUTHSIZE:
2105         {
2106             ret = get_errno(setsockopt(sockfd, level, optname,
2107                                        NULL, optlen));
2108             break;
2109         }
2110         default:
2111             goto unimplemented;
2112         }
2113         break;
2114 #endif
2115     case TARGET_SOL_SOCKET:
2116         switch (optname) {
2117         case TARGET_SO_RCVTIMEO:
2118         {
2119                 struct timeval tv;
2120 
2121                 optname = SO_RCVTIMEO;
2122 
2123 set_timeout:
2124                 if (optlen != sizeof(struct target_timeval)) {
2125                     return -TARGET_EINVAL;
2126                 }
2127 
2128                 if (copy_from_user_timeval(&tv, optval_addr)) {
2129                     return -TARGET_EFAULT;
2130                 }
2131 
2132                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2133                                 &tv, sizeof(tv)));
2134                 return ret;
2135         }
2136         case TARGET_SO_SNDTIMEO:
2137                 optname = SO_SNDTIMEO;
2138                 goto set_timeout;
2139         case TARGET_SO_ATTACH_FILTER:
2140         {
2141                 struct target_sock_fprog *tfprog;
2142                 struct target_sock_filter *tfilter;
2143                 struct sock_fprog fprog;
2144                 struct sock_filter *filter;
2145                 int i;
2146 
2147                 if (optlen != sizeof(*tfprog)) {
2148                     return -TARGET_EINVAL;
2149                 }
2150                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2151                     return -TARGET_EFAULT;
2152                 }
2153                 if (!lock_user_struct(VERIFY_READ, tfilter,
2154                                       tswapal(tfprog->filter), 0)) {
2155                     unlock_user_struct(tfprog, optval_addr, 1);
2156                     return -TARGET_EFAULT;
2157                 }
2158 
2159                 fprog.len = tswap16(tfprog->len);
2160                 filter = g_try_new(struct sock_filter, fprog.len);
2161                 if (filter == NULL) {
2162                     unlock_user_struct(tfilter, tfprog->filter, 1);
2163                     unlock_user_struct(tfprog, optval_addr, 1);
2164                     return -TARGET_ENOMEM;
2165                 }
2166                 for (i = 0; i < fprog.len; i++) {
2167                     filter[i].code = tswap16(tfilter[i].code);
2168                     filter[i].jt = tfilter[i].jt;
2169                     filter[i].jf = tfilter[i].jf;
2170                     filter[i].k = tswap32(tfilter[i].k);
2171                 }
2172                 fprog.filter = filter;
2173 
2174                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2175                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2176                 g_free(filter);
2177 
2178                 unlock_user_struct(tfilter, tfprog->filter, 1);
2179                 unlock_user_struct(tfprog, optval_addr, 1);
2180                 return ret;
2181         }
2182 	case TARGET_SO_BINDTODEVICE:
2183 	{
2184 		char *dev_ifname, *addr_ifname;
2185 
2186 		if (optlen > IFNAMSIZ - 1) {
2187 		    optlen = IFNAMSIZ - 1;
2188 		}
2189 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2190 		if (!dev_ifname) {
2191 		    return -TARGET_EFAULT;
2192 		}
2193 		optname = SO_BINDTODEVICE;
2194 		addr_ifname = alloca(IFNAMSIZ);
2195 		memcpy(addr_ifname, dev_ifname, optlen);
2196 		addr_ifname[optlen] = 0;
2197 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2198                                            addr_ifname, optlen));
2199 		unlock_user (dev_ifname, optval_addr, 0);
2200 		return ret;
2201 	}
2202         case TARGET_SO_LINGER:
2203         {
2204                 struct linger lg;
2205                 struct target_linger *tlg;
2206 
2207                 if (optlen != sizeof(struct target_linger)) {
2208                     return -TARGET_EINVAL;
2209                 }
2210                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2211                     return -TARGET_EFAULT;
2212                 }
2213                 __get_user(lg.l_onoff, &tlg->l_onoff);
2214                 __get_user(lg.l_linger, &tlg->l_linger);
2215                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2216                                 &lg, sizeof(lg)));
2217                 unlock_user_struct(tlg, optval_addr, 0);
2218                 return ret;
2219         }
2220             /* Options with 'int' argument.  */
2221         case TARGET_SO_DEBUG:
2222 		optname = SO_DEBUG;
2223 		break;
2224         case TARGET_SO_REUSEADDR:
2225 		optname = SO_REUSEADDR;
2226 		break;
2227 #ifdef SO_REUSEPORT
2228         case TARGET_SO_REUSEPORT:
2229                 optname = SO_REUSEPORT;
2230                 break;
2231 #endif
2232         case TARGET_SO_TYPE:
2233 		optname = SO_TYPE;
2234 		break;
2235         case TARGET_SO_ERROR:
2236 		optname = SO_ERROR;
2237 		break;
2238         case TARGET_SO_DONTROUTE:
2239 		optname = SO_DONTROUTE;
2240 		break;
2241         case TARGET_SO_BROADCAST:
2242 		optname = SO_BROADCAST;
2243 		break;
2244         case TARGET_SO_SNDBUF:
2245 		optname = SO_SNDBUF;
2246 		break;
2247         case TARGET_SO_SNDBUFFORCE:
2248                 optname = SO_SNDBUFFORCE;
2249                 break;
2250         case TARGET_SO_RCVBUF:
2251 		optname = SO_RCVBUF;
2252 		break;
2253         case TARGET_SO_RCVBUFFORCE:
2254                 optname = SO_RCVBUFFORCE;
2255                 break;
2256         case TARGET_SO_KEEPALIVE:
2257 		optname = SO_KEEPALIVE;
2258 		break;
2259         case TARGET_SO_OOBINLINE:
2260 		optname = SO_OOBINLINE;
2261 		break;
2262         case TARGET_SO_NO_CHECK:
2263 		optname = SO_NO_CHECK;
2264 		break;
2265         case TARGET_SO_PRIORITY:
2266 		optname = SO_PRIORITY;
2267 		break;
2268 #ifdef SO_BSDCOMPAT
2269         case TARGET_SO_BSDCOMPAT:
2270 		optname = SO_BSDCOMPAT;
2271 		break;
2272 #endif
2273         case TARGET_SO_PASSCRED:
2274 		optname = SO_PASSCRED;
2275 		break;
2276         case TARGET_SO_PASSSEC:
2277                 optname = SO_PASSSEC;
2278                 break;
2279         case TARGET_SO_TIMESTAMP:
2280 		optname = SO_TIMESTAMP;
2281 		break;
2282         case TARGET_SO_RCVLOWAT:
2283 		optname = SO_RCVLOWAT;
2284 		break;
2285         default:
2286             goto unimplemented;
2287         }
2288 	if (optlen < sizeof(uint32_t))
2289             return -TARGET_EINVAL;
2290 
2291 	if (get_user_u32(val, optval_addr))
2292             return -TARGET_EFAULT;
2293 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2294         break;
2295 #ifdef SOL_NETLINK
2296     case SOL_NETLINK:
2297         switch (optname) {
2298         case NETLINK_PKTINFO:
2299         case NETLINK_ADD_MEMBERSHIP:
2300         case NETLINK_DROP_MEMBERSHIP:
2301         case NETLINK_BROADCAST_ERROR:
2302         case NETLINK_NO_ENOBUFS:
2303 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2304         case NETLINK_LISTEN_ALL_NSID:
2305         case NETLINK_CAP_ACK:
2306 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2307 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2308         case NETLINK_EXT_ACK:
2309 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2310 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2311         case NETLINK_GET_STRICT_CHK:
2312 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2313             break;
2314         default:
2315             goto unimplemented;
2316         }
2317         val = 0;
2318         if (optlen < sizeof(uint32_t)) {
2319             return -TARGET_EINVAL;
2320         }
2321         if (get_user_u32(val, optval_addr)) {
2322             return -TARGET_EFAULT;
2323         }
2324         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2325                                    sizeof(val)));
2326         break;
2327 #endif /* SOL_NETLINK */
2328     default:
2329     unimplemented:
2330         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2331                       level, optname);
2332         ret = -TARGET_ENOPROTOOPT;
2333     }
2334     return ret;
2335 }
2336 
2337 /* do_getsockopt() Must return target values and target errnos. */
2338 static abi_long do_getsockopt(int sockfd, int level, int optname,
2339                               abi_ulong optval_addr, abi_ulong optlen)
2340 {
2341     abi_long ret;
2342     int len, val;
2343     socklen_t lv;
2344 
2345     switch(level) {
2346     case TARGET_SOL_SOCKET:
2347         level = SOL_SOCKET;
2348         switch (optname) {
2349         /* These don't just return a single integer */
2350         case TARGET_SO_PEERNAME:
2351             goto unimplemented;
2352         case TARGET_SO_RCVTIMEO: {
2353             struct timeval tv;
2354             socklen_t tvlen;
2355 
2356             optname = SO_RCVTIMEO;
2357 
2358 get_timeout:
2359             if (get_user_u32(len, optlen)) {
2360                 return -TARGET_EFAULT;
2361             }
2362             if (len < 0) {
2363                 return -TARGET_EINVAL;
2364             }
2365 
2366             tvlen = sizeof(tv);
2367             ret = get_errno(getsockopt(sockfd, level, optname,
2368                                        &tv, &tvlen));
2369             if (ret < 0) {
2370                 return ret;
2371             }
2372             if (len > sizeof(struct target_timeval)) {
2373                 len = sizeof(struct target_timeval);
2374             }
2375             if (copy_to_user_timeval(optval_addr, &tv)) {
2376                 return -TARGET_EFAULT;
2377             }
2378             if (put_user_u32(len, optlen)) {
2379                 return -TARGET_EFAULT;
2380             }
2381             break;
2382         }
2383         case TARGET_SO_SNDTIMEO:
2384             optname = SO_SNDTIMEO;
2385             goto get_timeout;
2386         case TARGET_SO_PEERCRED: {
2387             struct ucred cr;
2388             socklen_t crlen;
2389             struct target_ucred *tcr;
2390 
2391             if (get_user_u32(len, optlen)) {
2392                 return -TARGET_EFAULT;
2393             }
2394             if (len < 0) {
2395                 return -TARGET_EINVAL;
2396             }
2397 
2398             crlen = sizeof(cr);
2399             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2400                                        &cr, &crlen));
2401             if (ret < 0) {
2402                 return ret;
2403             }
2404             if (len > crlen) {
2405                 len = crlen;
2406             }
2407             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2408                 return -TARGET_EFAULT;
2409             }
2410             __put_user(cr.pid, &tcr->pid);
2411             __put_user(cr.uid, &tcr->uid);
2412             __put_user(cr.gid, &tcr->gid);
2413             unlock_user_struct(tcr, optval_addr, 1);
2414             if (put_user_u32(len, optlen)) {
2415                 return -TARGET_EFAULT;
2416             }
2417             break;
2418         }
2419         case TARGET_SO_PEERSEC: {
2420             char *name;
2421 
2422             if (get_user_u32(len, optlen)) {
2423                 return -TARGET_EFAULT;
2424             }
2425             if (len < 0) {
2426                 return -TARGET_EINVAL;
2427             }
2428             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2429             if (!name) {
2430                 return -TARGET_EFAULT;
2431             }
2432             lv = len;
2433             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2434                                        name, &lv));
2435             if (put_user_u32(lv, optlen)) {
2436                 ret = -TARGET_EFAULT;
2437             }
2438             unlock_user(name, optval_addr, lv);
2439             break;
2440         }
2441         case TARGET_SO_LINGER:
2442         {
2443             struct linger lg;
2444             socklen_t lglen;
2445             struct target_linger *tlg;
2446 
2447             if (get_user_u32(len, optlen)) {
2448                 return -TARGET_EFAULT;
2449             }
2450             if (len < 0) {
2451                 return -TARGET_EINVAL;
2452             }
2453 
2454             lglen = sizeof(lg);
2455             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2456                                        &lg, &lglen));
2457             if (ret < 0) {
2458                 return ret;
2459             }
2460             if (len > lglen) {
2461                 len = lglen;
2462             }
2463             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2464                 return -TARGET_EFAULT;
2465             }
2466             __put_user(lg.l_onoff, &tlg->l_onoff);
2467             __put_user(lg.l_linger, &tlg->l_linger);
2468             unlock_user_struct(tlg, optval_addr, 1);
2469             if (put_user_u32(len, optlen)) {
2470                 return -TARGET_EFAULT;
2471             }
2472             break;
2473         }
2474         /* Options with 'int' argument.  */
2475         case TARGET_SO_DEBUG:
2476             optname = SO_DEBUG;
2477             goto int_case;
2478         case TARGET_SO_REUSEADDR:
2479             optname = SO_REUSEADDR;
2480             goto int_case;
2481 #ifdef SO_REUSEPORT
2482         case TARGET_SO_REUSEPORT:
2483             optname = SO_REUSEPORT;
2484             goto int_case;
2485 #endif
2486         case TARGET_SO_TYPE:
2487             optname = SO_TYPE;
2488             goto int_case;
2489         case TARGET_SO_ERROR:
2490             optname = SO_ERROR;
2491             goto int_case;
2492         case TARGET_SO_DONTROUTE:
2493             optname = SO_DONTROUTE;
2494             goto int_case;
2495         case TARGET_SO_BROADCAST:
2496             optname = SO_BROADCAST;
2497             goto int_case;
2498         case TARGET_SO_SNDBUF:
2499             optname = SO_SNDBUF;
2500             goto int_case;
2501         case TARGET_SO_RCVBUF:
2502             optname = SO_RCVBUF;
2503             goto int_case;
2504         case TARGET_SO_KEEPALIVE:
2505             optname = SO_KEEPALIVE;
2506             goto int_case;
2507         case TARGET_SO_OOBINLINE:
2508             optname = SO_OOBINLINE;
2509             goto int_case;
2510         case TARGET_SO_NO_CHECK:
2511             optname = SO_NO_CHECK;
2512             goto int_case;
2513         case TARGET_SO_PRIORITY:
2514             optname = SO_PRIORITY;
2515             goto int_case;
2516 #ifdef SO_BSDCOMPAT
2517         case TARGET_SO_BSDCOMPAT:
2518             optname = SO_BSDCOMPAT;
2519             goto int_case;
2520 #endif
2521         case TARGET_SO_PASSCRED:
2522             optname = SO_PASSCRED;
2523             goto int_case;
2524         case TARGET_SO_TIMESTAMP:
2525             optname = SO_TIMESTAMP;
2526             goto int_case;
2527         case TARGET_SO_RCVLOWAT:
2528             optname = SO_RCVLOWAT;
2529             goto int_case;
2530         case TARGET_SO_ACCEPTCONN:
2531             optname = SO_ACCEPTCONN;
2532             goto int_case;
2533         default:
2534             goto int_case;
2535         }
2536         break;
2537     case SOL_TCP:
2538         /* TCP options all take an 'int' value.  */
2539     int_case:
2540         if (get_user_u32(len, optlen))
2541             return -TARGET_EFAULT;
2542         if (len < 0)
2543             return -TARGET_EINVAL;
2544         lv = sizeof(lv);
2545         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2546         if (ret < 0)
2547             return ret;
2548         if (optname == SO_TYPE) {
2549             val = host_to_target_sock_type(val);
2550         }
2551         if (len > lv)
2552             len = lv;
2553         if (len == 4) {
2554             if (put_user_u32(val, optval_addr))
2555                 return -TARGET_EFAULT;
2556         } else {
2557             if (put_user_u8(val, optval_addr))
2558                 return -TARGET_EFAULT;
2559         }
2560         if (put_user_u32(len, optlen))
2561             return -TARGET_EFAULT;
2562         break;
2563     case SOL_IP:
2564         switch(optname) {
2565         case IP_TOS:
2566         case IP_TTL:
2567         case IP_HDRINCL:
2568         case IP_ROUTER_ALERT:
2569         case IP_RECVOPTS:
2570         case IP_RETOPTS:
2571         case IP_PKTINFO:
2572         case IP_MTU_DISCOVER:
2573         case IP_RECVERR:
2574         case IP_RECVTOS:
2575 #ifdef IP_FREEBIND
2576         case IP_FREEBIND:
2577 #endif
2578         case IP_MULTICAST_TTL:
2579         case IP_MULTICAST_LOOP:
2580             if (get_user_u32(len, optlen))
2581                 return -TARGET_EFAULT;
2582             if (len < 0)
2583                 return -TARGET_EINVAL;
2584             lv = sizeof(lv);
2585             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2586             if (ret < 0)
2587                 return ret;
2588             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2589                 len = 1;
2590                 if (put_user_u32(len, optlen)
2591                     || put_user_u8(val, optval_addr))
2592                     return -TARGET_EFAULT;
2593             } else {
2594                 if (len > sizeof(int))
2595                     len = sizeof(int);
2596                 if (put_user_u32(len, optlen)
2597                     || put_user_u32(val, optval_addr))
2598                     return -TARGET_EFAULT;
2599             }
2600             break;
2601         default:
2602             ret = -TARGET_ENOPROTOOPT;
2603             break;
2604         }
2605         break;
2606     case SOL_IPV6:
2607         switch (optname) {
2608         case IPV6_MTU_DISCOVER:
2609         case IPV6_MTU:
2610         case IPV6_V6ONLY:
2611         case IPV6_RECVPKTINFO:
2612         case IPV6_UNICAST_HOPS:
2613         case IPV6_MULTICAST_HOPS:
2614         case IPV6_MULTICAST_LOOP:
2615         case IPV6_RECVERR:
2616         case IPV6_RECVHOPLIMIT:
2617         case IPV6_2292HOPLIMIT:
2618         case IPV6_CHECKSUM:
2619         case IPV6_ADDRFORM:
2620         case IPV6_2292PKTINFO:
2621         case IPV6_RECVTCLASS:
2622         case IPV6_RECVRTHDR:
2623         case IPV6_2292RTHDR:
2624         case IPV6_RECVHOPOPTS:
2625         case IPV6_2292HOPOPTS:
2626         case IPV6_RECVDSTOPTS:
2627         case IPV6_2292DSTOPTS:
2628         case IPV6_TCLASS:
2629 #ifdef IPV6_RECVPATHMTU
2630         case IPV6_RECVPATHMTU:
2631 #endif
2632 #ifdef IPV6_TRANSPARENT
2633         case IPV6_TRANSPARENT:
2634 #endif
2635 #ifdef IPV6_FREEBIND
2636         case IPV6_FREEBIND:
2637 #endif
2638 #ifdef IPV6_RECVORIGDSTADDR
2639         case IPV6_RECVORIGDSTADDR:
2640 #endif
2641             if (get_user_u32(len, optlen))
2642                 return -TARGET_EFAULT;
2643             if (len < 0)
2644                 return -TARGET_EINVAL;
2645             lv = sizeof(lv);
2646             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2647             if (ret < 0)
2648                 return ret;
2649             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2650                 len = 1;
2651                 if (put_user_u32(len, optlen)
2652                     || put_user_u8(val, optval_addr))
2653                     return -TARGET_EFAULT;
2654             } else {
2655                 if (len > sizeof(int))
2656                     len = sizeof(int);
2657                 if (put_user_u32(len, optlen)
2658                     || put_user_u32(val, optval_addr))
2659                     return -TARGET_EFAULT;
2660             }
2661             break;
2662         default:
2663             ret = -TARGET_ENOPROTOOPT;
2664             break;
2665         }
2666         break;
2667 #ifdef SOL_NETLINK
2668     case SOL_NETLINK:
2669         switch (optname) {
2670         case NETLINK_PKTINFO:
2671         case NETLINK_BROADCAST_ERROR:
2672         case NETLINK_NO_ENOBUFS:
2673 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2674         case NETLINK_LISTEN_ALL_NSID:
2675         case NETLINK_CAP_ACK:
2676 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2677 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2678         case NETLINK_EXT_ACK:
2679 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2680 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2681         case NETLINK_GET_STRICT_CHK:
2682 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2683             if (get_user_u32(len, optlen)) {
2684                 return -TARGET_EFAULT;
2685             }
2686             if (len != sizeof(val)) {
2687                 return -TARGET_EINVAL;
2688             }
2689             lv = len;
2690             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2691             if (ret < 0) {
2692                 return ret;
2693             }
2694             if (put_user_u32(lv, optlen)
2695                 || put_user_u32(val, optval_addr)) {
2696                 return -TARGET_EFAULT;
2697             }
2698             break;
2699 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2700         case NETLINK_LIST_MEMBERSHIPS:
2701         {
2702             uint32_t *results;
2703             int i;
2704             if (get_user_u32(len, optlen)) {
2705                 return -TARGET_EFAULT;
2706             }
2707             if (len < 0) {
2708                 return -TARGET_EINVAL;
2709             }
2710             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2711             if (!results) {
2712                 return -TARGET_EFAULT;
2713             }
2714             lv = len;
2715             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2716             if (ret < 0) {
2717                 unlock_user(results, optval_addr, 0);
2718                 return ret;
2719             }
2720             /* swap host endianess to target endianess. */
2721             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2722                 results[i] = tswap32(results[i]);
2723             }
2724             if (put_user_u32(lv, optlen)) {
2725                 return -TARGET_EFAULT;
2726             }
2727             unlock_user(results, optval_addr, 0);
2728             break;
2729         }
2730 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2731         default:
2732             goto unimplemented;
2733         }
2734         break;
2735 #endif /* SOL_NETLINK */
2736     default:
2737     unimplemented:
2738         qemu_log_mask(LOG_UNIMP,
2739                       "getsockopt level=%d optname=%d not yet supported\n",
2740                       level, optname);
2741         ret = -TARGET_EOPNOTSUPP;
2742         break;
2743     }
2744     return ret;
2745 }
2746 
2747 /* Convert target low/high pair representing file offset into the host
2748  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2749  * as the kernel doesn't handle them either.
2750  */
2751 static void target_to_host_low_high(abi_ulong tlow,
2752                                     abi_ulong thigh,
2753                                     unsigned long *hlow,
2754                                     unsigned long *hhigh)
2755 {
2756     uint64_t off = tlow |
2757         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2758         TARGET_LONG_BITS / 2;
2759 
2760     *hlow = off;
2761     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2762 }
2763 
2764 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2765                                 abi_ulong count, int copy)
2766 {
2767     struct target_iovec *target_vec;
2768     struct iovec *vec;
2769     abi_ulong total_len, max_len;
2770     int i;
2771     int err = 0;
2772     bool bad_address = false;
2773 
2774     if (count == 0) {
2775         errno = 0;
2776         return NULL;
2777     }
2778     if (count > IOV_MAX) {
2779         errno = EINVAL;
2780         return NULL;
2781     }
2782 
2783     vec = g_try_new0(struct iovec, count);
2784     if (vec == NULL) {
2785         errno = ENOMEM;
2786         return NULL;
2787     }
2788 
2789     target_vec = lock_user(VERIFY_READ, target_addr,
2790                            count * sizeof(struct target_iovec), 1);
2791     if (target_vec == NULL) {
2792         err = EFAULT;
2793         goto fail2;
2794     }
2795 
2796     /* ??? If host page size > target page size, this will result in a
2797        value larger than what we can actually support.  */
2798     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2799     total_len = 0;
2800 
2801     for (i = 0; i < count; i++) {
2802         abi_ulong base = tswapal(target_vec[i].iov_base);
2803         abi_long len = tswapal(target_vec[i].iov_len);
2804 
2805         if (len < 0) {
2806             err = EINVAL;
2807             goto fail;
2808         } else if (len == 0) {
2809             /* Zero length pointer is ignored.  */
2810             vec[i].iov_base = 0;
2811         } else {
2812             vec[i].iov_base = lock_user(type, base, len, copy);
2813             /* If the first buffer pointer is bad, this is a fault.  But
2814              * subsequent bad buffers will result in a partial write; this
2815              * is realized by filling the vector with null pointers and
2816              * zero lengths. */
2817             if (!vec[i].iov_base) {
2818                 if (i == 0) {
2819                     err = EFAULT;
2820                     goto fail;
2821                 } else {
2822                     bad_address = true;
2823                 }
2824             }
2825             if (bad_address) {
2826                 len = 0;
2827             }
2828             if (len > max_len - total_len) {
2829                 len = max_len - total_len;
2830             }
2831         }
2832         vec[i].iov_len = len;
2833         total_len += len;
2834     }
2835 
2836     unlock_user(target_vec, target_addr, 0);
2837     return vec;
2838 
2839  fail:
2840     while (--i >= 0) {
2841         if (tswapal(target_vec[i].iov_len) > 0) {
2842             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2843         }
2844     }
2845     unlock_user(target_vec, target_addr, 0);
2846  fail2:
2847     g_free(vec);
2848     errno = err;
2849     return NULL;
2850 }
2851 
2852 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2853                          abi_ulong count, int copy)
2854 {
2855     struct target_iovec *target_vec;
2856     int i;
2857 
2858     target_vec = lock_user(VERIFY_READ, target_addr,
2859                            count * sizeof(struct target_iovec), 1);
2860     if (target_vec) {
2861         for (i = 0; i < count; i++) {
2862             abi_ulong base = tswapal(target_vec[i].iov_base);
2863             abi_long len = tswapal(target_vec[i].iov_len);
2864             if (len < 0) {
2865                 break;
2866             }
2867             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2868         }
2869         unlock_user(target_vec, target_addr, 0);
2870     }
2871 
2872     g_free(vec);
2873 }
2874 
2875 static inline int target_to_host_sock_type(int *type)
2876 {
2877     int host_type = 0;
2878     int target_type = *type;
2879 
2880     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2881     case TARGET_SOCK_DGRAM:
2882         host_type = SOCK_DGRAM;
2883         break;
2884     case TARGET_SOCK_STREAM:
2885         host_type = SOCK_STREAM;
2886         break;
2887     default:
2888         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2889         break;
2890     }
2891     if (target_type & TARGET_SOCK_CLOEXEC) {
2892 #if defined(SOCK_CLOEXEC)
2893         host_type |= SOCK_CLOEXEC;
2894 #else
2895         return -TARGET_EINVAL;
2896 #endif
2897     }
2898     if (target_type & TARGET_SOCK_NONBLOCK) {
2899 #if defined(SOCK_NONBLOCK)
2900         host_type |= SOCK_NONBLOCK;
2901 #elif !defined(O_NONBLOCK)
2902         return -TARGET_EINVAL;
2903 #endif
2904     }
2905     *type = host_type;
2906     return 0;
2907 }
2908 
2909 /* Try to emulate socket type flags after socket creation.  */
2910 static int sock_flags_fixup(int fd, int target_type)
2911 {
2912 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2913     if (target_type & TARGET_SOCK_NONBLOCK) {
2914         int flags = fcntl(fd, F_GETFL);
2915         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2916             close(fd);
2917             return -TARGET_EINVAL;
2918         }
2919     }
2920 #endif
2921     return fd;
2922 }
2923 
2924 /* do_socket() Must return target values and target errnos. */
2925 static abi_long do_socket(int domain, int type, int protocol)
2926 {
2927     int target_type = type;
2928     int ret;
2929 
2930     ret = target_to_host_sock_type(&type);
2931     if (ret) {
2932         return ret;
2933     }
2934 
2935     if (domain == PF_NETLINK && !(
2936 #ifdef CONFIG_RTNETLINK
2937          protocol == NETLINK_ROUTE ||
2938 #endif
2939          protocol == NETLINK_KOBJECT_UEVENT ||
2940          protocol == NETLINK_AUDIT)) {
2941         return -EPFNOSUPPORT;
2942     }
2943 
2944     if (domain == AF_PACKET ||
2945         (domain == AF_INET && type == SOCK_PACKET)) {
2946         protocol = tswap16(protocol);
2947     }
2948 
2949     ret = get_errno(socket(domain, type, protocol));
2950     if (ret >= 0) {
2951         ret = sock_flags_fixup(ret, target_type);
2952         if (type == SOCK_PACKET) {
2953             /* Manage an obsolete case :
2954              * if socket type is SOCK_PACKET, bind by name
2955              */
2956             fd_trans_register(ret, &target_packet_trans);
2957         } else if (domain == PF_NETLINK) {
2958             switch (protocol) {
2959 #ifdef CONFIG_RTNETLINK
2960             case NETLINK_ROUTE:
2961                 fd_trans_register(ret, &target_netlink_route_trans);
2962                 break;
2963 #endif
2964             case NETLINK_KOBJECT_UEVENT:
2965                 /* nothing to do: messages are strings */
2966                 break;
2967             case NETLINK_AUDIT:
2968                 fd_trans_register(ret, &target_netlink_audit_trans);
2969                 break;
2970             default:
2971                 g_assert_not_reached();
2972             }
2973         }
2974     }
2975     return ret;
2976 }
2977 
2978 /* do_bind() Must return target values and target errnos. */
2979 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2980                         socklen_t addrlen)
2981 {
2982     void *addr;
2983     abi_long ret;
2984 
2985     if ((int)addrlen < 0) {
2986         return -TARGET_EINVAL;
2987     }
2988 
2989     addr = alloca(addrlen+1);
2990 
2991     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2992     if (ret)
2993         return ret;
2994 
2995     return get_errno(bind(sockfd, addr, addrlen));
2996 }
2997 
2998 /* do_connect() Must return target values and target errnos. */
2999 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3000                            socklen_t addrlen)
3001 {
3002     void *addr;
3003     abi_long ret;
3004 
3005     if ((int)addrlen < 0) {
3006         return -TARGET_EINVAL;
3007     }
3008 
3009     addr = alloca(addrlen+1);
3010 
3011     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3012     if (ret)
3013         return ret;
3014 
3015     return get_errno(safe_connect(sockfd, addr, addrlen));
3016 }
3017 
3018 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3019 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3020                                       int flags, int send)
3021 {
3022     abi_long ret, len;
3023     struct msghdr msg;
3024     abi_ulong count;
3025     struct iovec *vec;
3026     abi_ulong target_vec;
3027 
3028     if (msgp->msg_name) {
3029         msg.msg_namelen = tswap32(msgp->msg_namelen);
3030         msg.msg_name = alloca(msg.msg_namelen+1);
3031         ret = target_to_host_sockaddr(fd, msg.msg_name,
3032                                       tswapal(msgp->msg_name),
3033                                       msg.msg_namelen);
3034         if (ret == -TARGET_EFAULT) {
3035             /* For connected sockets msg_name and msg_namelen must
3036              * be ignored, so returning EFAULT immediately is wrong.
3037              * Instead, pass a bad msg_name to the host kernel, and
3038              * let it decide whether to return EFAULT or not.
3039              */
3040             msg.msg_name = (void *)-1;
3041         } else if (ret) {
3042             goto out2;
3043         }
3044     } else {
3045         msg.msg_name = NULL;
3046         msg.msg_namelen = 0;
3047     }
3048     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3049     msg.msg_control = alloca(msg.msg_controllen);
3050     memset(msg.msg_control, 0, msg.msg_controllen);
3051 
3052     msg.msg_flags = tswap32(msgp->msg_flags);
3053 
3054     count = tswapal(msgp->msg_iovlen);
3055     target_vec = tswapal(msgp->msg_iov);
3056 
3057     if (count > IOV_MAX) {
3058         /* sendrcvmsg returns a different errno for this condition than
3059          * readv/writev, so we must catch it here before lock_iovec() does.
3060          */
3061         ret = -TARGET_EMSGSIZE;
3062         goto out2;
3063     }
3064 
3065     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3066                      target_vec, count, send);
3067     if (vec == NULL) {
3068         ret = -host_to_target_errno(errno);
3069         goto out2;
3070     }
3071     msg.msg_iovlen = count;
3072     msg.msg_iov = vec;
3073 
3074     if (send) {
3075         if (fd_trans_target_to_host_data(fd)) {
3076             void *host_msg;
3077 
3078             host_msg = g_malloc(msg.msg_iov->iov_len);
3079             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3080             ret = fd_trans_target_to_host_data(fd)(host_msg,
3081                                                    msg.msg_iov->iov_len);
3082             if (ret >= 0) {
3083                 msg.msg_iov->iov_base = host_msg;
3084                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3085             }
3086             g_free(host_msg);
3087         } else {
3088             ret = target_to_host_cmsg(&msg, msgp);
3089             if (ret == 0) {
3090                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3091             }
3092         }
3093     } else {
3094         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3095         if (!is_error(ret)) {
3096             len = ret;
3097             if (fd_trans_host_to_target_data(fd)) {
3098                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3099                                                MIN(msg.msg_iov->iov_len, len));
3100             } else {
3101                 ret = host_to_target_cmsg(msgp, &msg);
3102             }
3103             if (!is_error(ret)) {
3104                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3105                 msgp->msg_flags = tswap32(msg.msg_flags);
3106                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3107                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3108                                     msg.msg_name, msg.msg_namelen);
3109                     if (ret) {
3110                         goto out;
3111                     }
3112                 }
3113 
3114                 ret = len;
3115             }
3116         }
3117     }
3118 
3119 out:
3120     unlock_iovec(vec, target_vec, count, !send);
3121 out2:
3122     return ret;
3123 }
3124 
3125 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3126                                int flags, int send)
3127 {
3128     abi_long ret;
3129     struct target_msghdr *msgp;
3130 
3131     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3132                           msgp,
3133                           target_msg,
3134                           send ? 1 : 0)) {
3135         return -TARGET_EFAULT;
3136     }
3137     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3138     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3139     return ret;
3140 }
3141 
3142 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3143  * so it might not have this *mmsg-specific flag either.
3144  */
3145 #ifndef MSG_WAITFORONE
3146 #define MSG_WAITFORONE 0x10000
3147 #endif
3148 
3149 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3150                                 unsigned int vlen, unsigned int flags,
3151                                 int send)
3152 {
3153     struct target_mmsghdr *mmsgp;
3154     abi_long ret = 0;
3155     int i;
3156 
3157     if (vlen > UIO_MAXIOV) {
3158         vlen = UIO_MAXIOV;
3159     }
3160 
3161     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3162     if (!mmsgp) {
3163         return -TARGET_EFAULT;
3164     }
3165 
3166     for (i = 0; i < vlen; i++) {
3167         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3168         if (is_error(ret)) {
3169             break;
3170         }
3171         mmsgp[i].msg_len = tswap32(ret);
3172         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3173         if (flags & MSG_WAITFORONE) {
3174             flags |= MSG_DONTWAIT;
3175         }
3176     }
3177 
3178     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3179 
3180     /* Return number of datagrams sent if we sent any at all;
3181      * otherwise return the error.
3182      */
3183     if (i) {
3184         return i;
3185     }
3186     return ret;
3187 }
3188 
3189 /* do_accept4() Must return target values and target errnos. */
3190 static abi_long do_accept4(int fd, abi_ulong target_addr,
3191                            abi_ulong target_addrlen_addr, int flags)
3192 {
3193     socklen_t addrlen, ret_addrlen;
3194     void *addr;
3195     abi_long ret;
3196     int host_flags;
3197 
3198     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3199 
3200     if (target_addr == 0) {
3201         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3202     }
3203 
3204     /* linux returns EINVAL if addrlen pointer is invalid */
3205     if (get_user_u32(addrlen, target_addrlen_addr))
3206         return -TARGET_EINVAL;
3207 
3208     if ((int)addrlen < 0) {
3209         return -TARGET_EINVAL;
3210     }
3211 
3212     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3213         return -TARGET_EINVAL;
3214 
3215     addr = alloca(addrlen);
3216 
3217     ret_addrlen = addrlen;
3218     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3219     if (!is_error(ret)) {
3220         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3221         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3222             ret = -TARGET_EFAULT;
3223         }
3224     }
3225     return ret;
3226 }
3227 
3228 /* do_getpeername() Must return target values and target errnos. */
3229 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3230                                abi_ulong target_addrlen_addr)
3231 {
3232     socklen_t addrlen, ret_addrlen;
3233     void *addr;
3234     abi_long ret;
3235 
3236     if (get_user_u32(addrlen, target_addrlen_addr))
3237         return -TARGET_EFAULT;
3238 
3239     if ((int)addrlen < 0) {
3240         return -TARGET_EINVAL;
3241     }
3242 
3243     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3244         return -TARGET_EFAULT;
3245 
3246     addr = alloca(addrlen);
3247 
3248     ret_addrlen = addrlen;
3249     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3250     if (!is_error(ret)) {
3251         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3252         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3253             ret = -TARGET_EFAULT;
3254         }
3255     }
3256     return ret;
3257 }
3258 
3259 /* do_getsockname() Must return target values and target errnos. */
3260 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3261                                abi_ulong target_addrlen_addr)
3262 {
3263     socklen_t addrlen, ret_addrlen;
3264     void *addr;
3265     abi_long ret;
3266 
3267     if (get_user_u32(addrlen, target_addrlen_addr))
3268         return -TARGET_EFAULT;
3269 
3270     if ((int)addrlen < 0) {
3271         return -TARGET_EINVAL;
3272     }
3273 
3274     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3275         return -TARGET_EFAULT;
3276 
3277     addr = alloca(addrlen);
3278 
3279     ret_addrlen = addrlen;
3280     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3281     if (!is_error(ret)) {
3282         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3283         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3284             ret = -TARGET_EFAULT;
3285         }
3286     }
3287     return ret;
3288 }
3289 
3290 /* do_socketpair() Must return target values and target errnos. */
3291 static abi_long do_socketpair(int domain, int type, int protocol,
3292                               abi_ulong target_tab_addr)
3293 {
3294     int tab[2];
3295     abi_long ret;
3296 
3297     target_to_host_sock_type(&type);
3298 
3299     ret = get_errno(socketpair(domain, type, protocol, tab));
3300     if (!is_error(ret)) {
3301         if (put_user_s32(tab[0], target_tab_addr)
3302             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3303             ret = -TARGET_EFAULT;
3304     }
3305     return ret;
3306 }
3307 
3308 /* do_sendto() Must return target values and target errnos. */
3309 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3310                           abi_ulong target_addr, socklen_t addrlen)
3311 {
3312     void *addr;
3313     void *host_msg;
3314     void *copy_msg = NULL;
3315     abi_long ret;
3316 
3317     if ((int)addrlen < 0) {
3318         return -TARGET_EINVAL;
3319     }
3320 
3321     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3322     if (!host_msg)
3323         return -TARGET_EFAULT;
3324     if (fd_trans_target_to_host_data(fd)) {
3325         copy_msg = host_msg;
3326         host_msg = g_malloc(len);
3327         memcpy(host_msg, copy_msg, len);
3328         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3329         if (ret < 0) {
3330             goto fail;
3331         }
3332     }
3333     if (target_addr) {
3334         addr = alloca(addrlen+1);
3335         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3336         if (ret) {
3337             goto fail;
3338         }
3339         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3340     } else {
3341         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3342     }
3343 fail:
3344     if (copy_msg) {
3345         g_free(host_msg);
3346         host_msg = copy_msg;
3347     }
3348     unlock_user(host_msg, msg, 0);
3349     return ret;
3350 }
3351 
3352 /* do_recvfrom() Must return target values and target errnos. */
3353 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3354                             abi_ulong target_addr,
3355                             abi_ulong target_addrlen)
3356 {
3357     socklen_t addrlen, ret_addrlen;
3358     void *addr;
3359     void *host_msg;
3360     abi_long ret;
3361 
3362     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3363     if (!host_msg)
3364         return -TARGET_EFAULT;
3365     if (target_addr) {
3366         if (get_user_u32(addrlen, target_addrlen)) {
3367             ret = -TARGET_EFAULT;
3368             goto fail;
3369         }
3370         if ((int)addrlen < 0) {
3371             ret = -TARGET_EINVAL;
3372             goto fail;
3373         }
3374         addr = alloca(addrlen);
3375         ret_addrlen = addrlen;
3376         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3377                                       addr, &ret_addrlen));
3378     } else {
3379         addr = NULL; /* To keep compiler quiet.  */
3380         addrlen = 0; /* To keep compiler quiet.  */
3381         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3382     }
3383     if (!is_error(ret)) {
3384         if (fd_trans_host_to_target_data(fd)) {
3385             abi_long trans;
3386             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3387             if (is_error(trans)) {
3388                 ret = trans;
3389                 goto fail;
3390             }
3391         }
3392         if (target_addr) {
3393             host_to_target_sockaddr(target_addr, addr,
3394                                     MIN(addrlen, ret_addrlen));
3395             if (put_user_u32(ret_addrlen, target_addrlen)) {
3396                 ret = -TARGET_EFAULT;
3397                 goto fail;
3398             }
3399         }
3400         unlock_user(host_msg, msg, len);
3401     } else {
3402 fail:
3403         unlock_user(host_msg, msg, 0);
3404     }
3405     return ret;
3406 }
3407 
3408 #ifdef TARGET_NR_socketcall
3409 /* do_socketcall() must return target values and target errnos. */
3410 static abi_long do_socketcall(int num, abi_ulong vptr)
3411 {
3412     static const unsigned nargs[] = { /* number of arguments per operation */
3413         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3414         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3415         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3416         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3417         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3418         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3419         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3420         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3421         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3422         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3423         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3424         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3425         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3426         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3427         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3428         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3429         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3430         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3431         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3432         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3433     };
3434     abi_long a[6]; /* max 6 args */
3435     unsigned i;
3436 
3437     /* check the range of the first argument num */
3438     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3439     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3440         return -TARGET_EINVAL;
3441     }
3442     /* ensure we have space for args */
3443     if (nargs[num] > ARRAY_SIZE(a)) {
3444         return -TARGET_EINVAL;
3445     }
3446     /* collect the arguments in a[] according to nargs[] */
3447     for (i = 0; i < nargs[num]; ++i) {
3448         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3449             return -TARGET_EFAULT;
3450         }
3451     }
3452     /* now when we have the args, invoke the appropriate underlying function */
3453     switch (num) {
3454     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3455         return do_socket(a[0], a[1], a[2]);
3456     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3457         return do_bind(a[0], a[1], a[2]);
3458     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3459         return do_connect(a[0], a[1], a[2]);
3460     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3461         return get_errno(listen(a[0], a[1]));
3462     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3463         return do_accept4(a[0], a[1], a[2], 0);
3464     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3465         return do_getsockname(a[0], a[1], a[2]);
3466     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3467         return do_getpeername(a[0], a[1], a[2]);
3468     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3469         return do_socketpair(a[0], a[1], a[2], a[3]);
3470     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3471         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3472     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3473         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3474     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3475         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3476     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3477         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3478     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3479         return get_errno(shutdown(a[0], a[1]));
3480     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3481         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3482     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3483         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3484     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3485         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3486     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3487         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3488     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3489         return do_accept4(a[0], a[1], a[2], a[3]);
3490     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3491         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3492     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3493         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3494     default:
3495         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3496         return -TARGET_EINVAL;
3497     }
3498 }
3499 #endif
3500 
3501 #define N_SHM_REGIONS	32
3502 
3503 static struct shm_region {
3504     abi_ulong start;
3505     abi_ulong size;
3506     bool in_use;
3507 } shm_regions[N_SHM_REGIONS];
3508 
3509 #ifndef TARGET_SEMID64_DS
3510 /* asm-generic version of this struct */
3511 struct target_semid64_ds
3512 {
3513   struct target_ipc_perm sem_perm;
3514   abi_ulong sem_otime;
3515 #if TARGET_ABI_BITS == 32
3516   abi_ulong __unused1;
3517 #endif
3518   abi_ulong sem_ctime;
3519 #if TARGET_ABI_BITS == 32
3520   abi_ulong __unused2;
3521 #endif
3522   abi_ulong sem_nsems;
3523   abi_ulong __unused3;
3524   abi_ulong __unused4;
3525 };
3526 #endif
3527 
3528 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3529                                                abi_ulong target_addr)
3530 {
3531     struct target_ipc_perm *target_ip;
3532     struct target_semid64_ds *target_sd;
3533 
3534     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3535         return -TARGET_EFAULT;
3536     target_ip = &(target_sd->sem_perm);
3537     host_ip->__key = tswap32(target_ip->__key);
3538     host_ip->uid = tswap32(target_ip->uid);
3539     host_ip->gid = tswap32(target_ip->gid);
3540     host_ip->cuid = tswap32(target_ip->cuid);
3541     host_ip->cgid = tswap32(target_ip->cgid);
3542 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3543     host_ip->mode = tswap32(target_ip->mode);
3544 #else
3545     host_ip->mode = tswap16(target_ip->mode);
3546 #endif
3547 #if defined(TARGET_PPC)
3548     host_ip->__seq = tswap32(target_ip->__seq);
3549 #else
3550     host_ip->__seq = tswap16(target_ip->__seq);
3551 #endif
3552     unlock_user_struct(target_sd, target_addr, 0);
3553     return 0;
3554 }
3555 
3556 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3557                                                struct ipc_perm *host_ip)
3558 {
3559     struct target_ipc_perm *target_ip;
3560     struct target_semid64_ds *target_sd;
3561 
3562     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3563         return -TARGET_EFAULT;
3564     target_ip = &(target_sd->sem_perm);
3565     target_ip->__key = tswap32(host_ip->__key);
3566     target_ip->uid = tswap32(host_ip->uid);
3567     target_ip->gid = tswap32(host_ip->gid);
3568     target_ip->cuid = tswap32(host_ip->cuid);
3569     target_ip->cgid = tswap32(host_ip->cgid);
3570 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3571     target_ip->mode = tswap32(host_ip->mode);
3572 #else
3573     target_ip->mode = tswap16(host_ip->mode);
3574 #endif
3575 #if defined(TARGET_PPC)
3576     target_ip->__seq = tswap32(host_ip->__seq);
3577 #else
3578     target_ip->__seq = tswap16(host_ip->__seq);
3579 #endif
3580     unlock_user_struct(target_sd, target_addr, 1);
3581     return 0;
3582 }
3583 
3584 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3585                                                abi_ulong target_addr)
3586 {
3587     struct target_semid64_ds *target_sd;
3588 
3589     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3590         return -TARGET_EFAULT;
3591     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3592         return -TARGET_EFAULT;
3593     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3594     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3595     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3596     unlock_user_struct(target_sd, target_addr, 0);
3597     return 0;
3598 }
3599 
3600 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3601                                                struct semid_ds *host_sd)
3602 {
3603     struct target_semid64_ds *target_sd;
3604 
3605     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3606         return -TARGET_EFAULT;
3607     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3608         return -TARGET_EFAULT;
3609     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3610     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3611     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3612     unlock_user_struct(target_sd, target_addr, 1);
3613     return 0;
3614 }
3615 
3616 struct target_seminfo {
3617     int semmap;
3618     int semmni;
3619     int semmns;
3620     int semmnu;
3621     int semmsl;
3622     int semopm;
3623     int semume;
3624     int semusz;
3625     int semvmx;
3626     int semaem;
3627 };
3628 
3629 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3630                                               struct seminfo *host_seminfo)
3631 {
3632     struct target_seminfo *target_seminfo;
3633     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3634         return -TARGET_EFAULT;
3635     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3636     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3637     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3638     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3639     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3640     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3641     __put_user(host_seminfo->semume, &target_seminfo->semume);
3642     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3643     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3644     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3645     unlock_user_struct(target_seminfo, target_addr, 1);
3646     return 0;
3647 }
3648 
3649 union semun {
3650 	int val;
3651 	struct semid_ds *buf;
3652 	unsigned short *array;
3653 	struct seminfo *__buf;
3654 };
3655 
3656 union target_semun {
3657 	int val;
3658 	abi_ulong buf;
3659 	abi_ulong array;
3660 	abi_ulong __buf;
3661 };
3662 
3663 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3664                                                abi_ulong target_addr)
3665 {
3666     int nsems;
3667     unsigned short *array;
3668     union semun semun;
3669     struct semid_ds semid_ds;
3670     int i, ret;
3671 
3672     semun.buf = &semid_ds;
3673 
3674     ret = semctl(semid, 0, IPC_STAT, semun);
3675     if (ret == -1)
3676         return get_errno(ret);
3677 
3678     nsems = semid_ds.sem_nsems;
3679 
3680     *host_array = g_try_new(unsigned short, nsems);
3681     if (!*host_array) {
3682         return -TARGET_ENOMEM;
3683     }
3684     array = lock_user(VERIFY_READ, target_addr,
3685                       nsems*sizeof(unsigned short), 1);
3686     if (!array) {
3687         g_free(*host_array);
3688         return -TARGET_EFAULT;
3689     }
3690 
3691     for(i=0; i<nsems; i++) {
3692         __get_user((*host_array)[i], &array[i]);
3693     }
3694     unlock_user(array, target_addr, 0);
3695 
3696     return 0;
3697 }
3698 
3699 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3700                                                unsigned short **host_array)
3701 {
3702     int nsems;
3703     unsigned short *array;
3704     union semun semun;
3705     struct semid_ds semid_ds;
3706     int i, ret;
3707 
3708     semun.buf = &semid_ds;
3709 
3710     ret = semctl(semid, 0, IPC_STAT, semun);
3711     if (ret == -1)
3712         return get_errno(ret);
3713 
3714     nsems = semid_ds.sem_nsems;
3715 
3716     array = lock_user(VERIFY_WRITE, target_addr,
3717                       nsems*sizeof(unsigned short), 0);
3718     if (!array)
3719         return -TARGET_EFAULT;
3720 
3721     for(i=0; i<nsems; i++) {
3722         __put_user((*host_array)[i], &array[i]);
3723     }
3724     g_free(*host_array);
3725     unlock_user(array, target_addr, 1);
3726 
3727     return 0;
3728 }
3729 
3730 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3731                                  abi_ulong target_arg)
3732 {
3733     union target_semun target_su = { .buf = target_arg };
3734     union semun arg;
3735     struct semid_ds dsarg;
3736     unsigned short *array = NULL;
3737     struct seminfo seminfo;
3738     abi_long ret = -TARGET_EINVAL;
3739     abi_long err;
3740     cmd &= 0xff;
3741 
3742     switch( cmd ) {
3743 	case GETVAL:
3744 	case SETVAL:
3745             /* In 64 bit cross-endian situations, we will erroneously pick up
3746              * the wrong half of the union for the "val" element.  To rectify
3747              * this, the entire 8-byte structure is byteswapped, followed by
3748 	     * a swap of the 4 byte val field. In other cases, the data is
3749 	     * already in proper host byte order. */
3750 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3751 		target_su.buf = tswapal(target_su.buf);
3752 		arg.val = tswap32(target_su.val);
3753 	    } else {
3754 		arg.val = target_su.val;
3755 	    }
3756             ret = get_errno(semctl(semid, semnum, cmd, arg));
3757             break;
3758 	case GETALL:
3759 	case SETALL:
3760             err = target_to_host_semarray(semid, &array, target_su.array);
3761             if (err)
3762                 return err;
3763             arg.array = array;
3764             ret = get_errno(semctl(semid, semnum, cmd, arg));
3765             err = host_to_target_semarray(semid, target_su.array, &array);
3766             if (err)
3767                 return err;
3768             break;
3769 	case IPC_STAT:
3770 	case IPC_SET:
3771 	case SEM_STAT:
3772             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3773             if (err)
3774                 return err;
3775             arg.buf = &dsarg;
3776             ret = get_errno(semctl(semid, semnum, cmd, arg));
3777             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3778             if (err)
3779                 return err;
3780             break;
3781 	case IPC_INFO:
3782 	case SEM_INFO:
3783             arg.__buf = &seminfo;
3784             ret = get_errno(semctl(semid, semnum, cmd, arg));
3785             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3786             if (err)
3787                 return err;
3788             break;
3789 	case IPC_RMID:
3790 	case GETPID:
3791 	case GETNCNT:
3792 	case GETZCNT:
3793             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3794             break;
3795     }
3796 
3797     return ret;
3798 }
3799 
3800 struct target_sembuf {
3801     unsigned short sem_num;
3802     short sem_op;
3803     short sem_flg;
3804 };
3805 
3806 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3807                                              abi_ulong target_addr,
3808                                              unsigned nsops)
3809 {
3810     struct target_sembuf *target_sembuf;
3811     int i;
3812 
3813     target_sembuf = lock_user(VERIFY_READ, target_addr,
3814                               nsops*sizeof(struct target_sembuf), 1);
3815     if (!target_sembuf)
3816         return -TARGET_EFAULT;
3817 
3818     for(i=0; i<nsops; i++) {
3819         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3820         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3821         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3822     }
3823 
3824     unlock_user(target_sembuf, target_addr, 0);
3825 
3826     return 0;
3827 }
3828 
3829 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3830 {
3831     struct sembuf sops[nsops];
3832     abi_long ret;
3833 
3834     if (target_to_host_sembuf(sops, ptr, nsops))
3835         return -TARGET_EFAULT;
3836 
3837     ret = -TARGET_ENOSYS;
3838 #ifdef __NR_semtimedop
3839     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3840 #endif
3841 #ifdef __NR_ipc
3842     if (ret == -TARGET_ENOSYS) {
3843         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3844     }
3845 #endif
3846     return ret;
3847 }
3848 
3849 struct target_msqid_ds
3850 {
3851     struct target_ipc_perm msg_perm;
3852     abi_ulong msg_stime;
3853 #if TARGET_ABI_BITS == 32
3854     abi_ulong __unused1;
3855 #endif
3856     abi_ulong msg_rtime;
3857 #if TARGET_ABI_BITS == 32
3858     abi_ulong __unused2;
3859 #endif
3860     abi_ulong msg_ctime;
3861 #if TARGET_ABI_BITS == 32
3862     abi_ulong __unused3;
3863 #endif
3864     abi_ulong __msg_cbytes;
3865     abi_ulong msg_qnum;
3866     abi_ulong msg_qbytes;
3867     abi_ulong msg_lspid;
3868     abi_ulong msg_lrpid;
3869     abi_ulong __unused4;
3870     abi_ulong __unused5;
3871 };
3872 
3873 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3874                                                abi_ulong target_addr)
3875 {
3876     struct target_msqid_ds *target_md;
3877 
3878     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3879         return -TARGET_EFAULT;
3880     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3881         return -TARGET_EFAULT;
3882     host_md->msg_stime = tswapal(target_md->msg_stime);
3883     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3884     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3885     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3886     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3887     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3888     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3889     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3890     unlock_user_struct(target_md, target_addr, 0);
3891     return 0;
3892 }
3893 
3894 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3895                                                struct msqid_ds *host_md)
3896 {
3897     struct target_msqid_ds *target_md;
3898 
3899     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3900         return -TARGET_EFAULT;
3901     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3902         return -TARGET_EFAULT;
3903     target_md->msg_stime = tswapal(host_md->msg_stime);
3904     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3905     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3906     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3907     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3908     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3909     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3910     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3911     unlock_user_struct(target_md, target_addr, 1);
3912     return 0;
3913 }
3914 
3915 struct target_msginfo {
3916     int msgpool;
3917     int msgmap;
3918     int msgmax;
3919     int msgmnb;
3920     int msgmni;
3921     int msgssz;
3922     int msgtql;
3923     unsigned short int msgseg;
3924 };
3925 
3926 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3927                                               struct msginfo *host_msginfo)
3928 {
3929     struct target_msginfo *target_msginfo;
3930     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3931         return -TARGET_EFAULT;
3932     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3933     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3934     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3935     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3936     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3937     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3938     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3939     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3940     unlock_user_struct(target_msginfo, target_addr, 1);
3941     return 0;
3942 }
3943 
3944 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3945 {
3946     struct msqid_ds dsarg;
3947     struct msginfo msginfo;
3948     abi_long ret = -TARGET_EINVAL;
3949 
3950     cmd &= 0xff;
3951 
3952     switch (cmd) {
3953     case IPC_STAT:
3954     case IPC_SET:
3955     case MSG_STAT:
3956         if (target_to_host_msqid_ds(&dsarg,ptr))
3957             return -TARGET_EFAULT;
3958         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3959         if (host_to_target_msqid_ds(ptr,&dsarg))
3960             return -TARGET_EFAULT;
3961         break;
3962     case IPC_RMID:
3963         ret = get_errno(msgctl(msgid, cmd, NULL));
3964         break;
3965     case IPC_INFO:
3966     case MSG_INFO:
3967         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3968         if (host_to_target_msginfo(ptr, &msginfo))
3969             return -TARGET_EFAULT;
3970         break;
3971     }
3972 
3973     return ret;
3974 }
3975 
3976 struct target_msgbuf {
3977     abi_long mtype;
3978     char	mtext[1];
3979 };
3980 
3981 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3982                                  ssize_t msgsz, int msgflg)
3983 {
3984     struct target_msgbuf *target_mb;
3985     struct msgbuf *host_mb;
3986     abi_long ret = 0;
3987 
3988     if (msgsz < 0) {
3989         return -TARGET_EINVAL;
3990     }
3991 
3992     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3993         return -TARGET_EFAULT;
3994     host_mb = g_try_malloc(msgsz + sizeof(long));
3995     if (!host_mb) {
3996         unlock_user_struct(target_mb, msgp, 0);
3997         return -TARGET_ENOMEM;
3998     }
3999     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4000     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4001     ret = -TARGET_ENOSYS;
4002 #ifdef __NR_msgsnd
4003     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4004 #endif
4005 #ifdef __NR_ipc
4006     if (ret == -TARGET_ENOSYS) {
4007         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4008                                  host_mb, 0));
4009     }
4010 #endif
4011     g_free(host_mb);
4012     unlock_user_struct(target_mb, msgp, 0);
4013 
4014     return ret;
4015 }
4016 
4017 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4018                                  ssize_t msgsz, abi_long msgtyp,
4019                                  int msgflg)
4020 {
4021     struct target_msgbuf *target_mb;
4022     char *target_mtext;
4023     struct msgbuf *host_mb;
4024     abi_long ret = 0;
4025 
4026     if (msgsz < 0) {
4027         return -TARGET_EINVAL;
4028     }
4029 
4030     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4031         return -TARGET_EFAULT;
4032 
4033     host_mb = g_try_malloc(msgsz + sizeof(long));
4034     if (!host_mb) {
4035         ret = -TARGET_ENOMEM;
4036         goto end;
4037     }
4038     ret = -TARGET_ENOSYS;
4039 #ifdef __NR_msgrcv
4040     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4041 #endif
4042 #ifdef __NR_ipc
4043     if (ret == -TARGET_ENOSYS) {
4044         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4045                         msgflg, host_mb, msgtyp));
4046     }
4047 #endif
4048 
4049     if (ret > 0) {
4050         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4051         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4052         if (!target_mtext) {
4053             ret = -TARGET_EFAULT;
4054             goto end;
4055         }
4056         memcpy(target_mb->mtext, host_mb->mtext, ret);
4057         unlock_user(target_mtext, target_mtext_addr, ret);
4058     }
4059 
4060     target_mb->mtype = tswapal(host_mb->mtype);
4061 
4062 end:
4063     if (target_mb)
4064         unlock_user_struct(target_mb, msgp, 1);
4065     g_free(host_mb);
4066     return ret;
4067 }
4068 
4069 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4070                                                abi_ulong target_addr)
4071 {
4072     struct target_shmid_ds *target_sd;
4073 
4074     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4075         return -TARGET_EFAULT;
4076     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4077         return -TARGET_EFAULT;
4078     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4079     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4080     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4081     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4082     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4083     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4084     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4085     unlock_user_struct(target_sd, target_addr, 0);
4086     return 0;
4087 }
4088 
4089 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4090                                                struct shmid_ds *host_sd)
4091 {
4092     struct target_shmid_ds *target_sd;
4093 
4094     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4095         return -TARGET_EFAULT;
4096     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4097         return -TARGET_EFAULT;
4098     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4099     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4100     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4101     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4102     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4103     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4104     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4105     unlock_user_struct(target_sd, target_addr, 1);
4106     return 0;
4107 }
4108 
4109 struct  target_shminfo {
4110     abi_ulong shmmax;
4111     abi_ulong shmmin;
4112     abi_ulong shmmni;
4113     abi_ulong shmseg;
4114     abi_ulong shmall;
4115 };
4116 
4117 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4118                                               struct shminfo *host_shminfo)
4119 {
4120     struct target_shminfo *target_shminfo;
4121     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4122         return -TARGET_EFAULT;
4123     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4124     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4125     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4126     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4127     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4128     unlock_user_struct(target_shminfo, target_addr, 1);
4129     return 0;
4130 }
4131 
4132 struct target_shm_info {
4133     int used_ids;
4134     abi_ulong shm_tot;
4135     abi_ulong shm_rss;
4136     abi_ulong shm_swp;
4137     abi_ulong swap_attempts;
4138     abi_ulong swap_successes;
4139 };
4140 
4141 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4142                                                struct shm_info *host_shm_info)
4143 {
4144     struct target_shm_info *target_shm_info;
4145     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4146         return -TARGET_EFAULT;
4147     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4148     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4149     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4150     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4151     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4152     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4153     unlock_user_struct(target_shm_info, target_addr, 1);
4154     return 0;
4155 }
4156 
4157 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4158 {
4159     struct shmid_ds dsarg;
4160     struct shminfo shminfo;
4161     struct shm_info shm_info;
4162     abi_long ret = -TARGET_EINVAL;
4163 
4164     cmd &= 0xff;
4165 
4166     switch(cmd) {
4167     case IPC_STAT:
4168     case IPC_SET:
4169     case SHM_STAT:
4170         if (target_to_host_shmid_ds(&dsarg, buf))
4171             return -TARGET_EFAULT;
4172         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4173         if (host_to_target_shmid_ds(buf, &dsarg))
4174             return -TARGET_EFAULT;
4175         break;
4176     case IPC_INFO:
4177         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4178         if (host_to_target_shminfo(buf, &shminfo))
4179             return -TARGET_EFAULT;
4180         break;
4181     case SHM_INFO:
4182         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4183         if (host_to_target_shm_info(buf, &shm_info))
4184             return -TARGET_EFAULT;
4185         break;
4186     case IPC_RMID:
4187     case SHM_LOCK:
4188     case SHM_UNLOCK:
4189         ret = get_errno(shmctl(shmid, cmd, NULL));
4190         break;
4191     }
4192 
4193     return ret;
4194 }
4195 
4196 #ifndef TARGET_FORCE_SHMLBA
4197 /* For most architectures, SHMLBA is the same as the page size;
4198  * some architectures have larger values, in which case they should
4199  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4200  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4201  * and defining its own value for SHMLBA.
4202  *
4203  * The kernel also permits SHMLBA to be set by the architecture to a
4204  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4205  * this means that addresses are rounded to the large size if
4206  * SHM_RND is set but addresses not aligned to that size are not rejected
4207  * as long as they are at least page-aligned. Since the only architecture
4208  * which uses this is ia64 this code doesn't provide for that oddity.
4209  */
4210 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4211 {
4212     return TARGET_PAGE_SIZE;
4213 }
4214 #endif
4215 
4216 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4217                                  int shmid, abi_ulong shmaddr, int shmflg)
4218 {
4219     abi_long raddr;
4220     void *host_raddr;
4221     struct shmid_ds shm_info;
4222     int i,ret;
4223     abi_ulong shmlba;
4224 
4225     /* find out the length of the shared memory segment */
4226     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4227     if (is_error(ret)) {
4228         /* can't get length, bail out */
4229         return ret;
4230     }
4231 
4232     shmlba = target_shmlba(cpu_env);
4233 
4234     if (shmaddr & (shmlba - 1)) {
4235         if (shmflg & SHM_RND) {
4236             shmaddr &= ~(shmlba - 1);
4237         } else {
4238             return -TARGET_EINVAL;
4239         }
4240     }
4241     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4242         return -TARGET_EINVAL;
4243     }
4244 
4245     mmap_lock();
4246 
4247     if (shmaddr)
4248         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4249     else {
4250         abi_ulong mmap_start;
4251 
4252         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4253         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4254 
4255         if (mmap_start == -1) {
4256             errno = ENOMEM;
4257             host_raddr = (void *)-1;
4258         } else
4259             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4260     }
4261 
4262     if (host_raddr == (void *)-1) {
4263         mmap_unlock();
4264         return get_errno((long)host_raddr);
4265     }
4266     raddr=h2g((unsigned long)host_raddr);
4267 
4268     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4269                    PAGE_VALID | PAGE_READ |
4270                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4271 
4272     for (i = 0; i < N_SHM_REGIONS; i++) {
4273         if (!shm_regions[i].in_use) {
4274             shm_regions[i].in_use = true;
4275             shm_regions[i].start = raddr;
4276             shm_regions[i].size = shm_info.shm_segsz;
4277             break;
4278         }
4279     }
4280 
4281     mmap_unlock();
4282     return raddr;
4283 
4284 }
4285 
4286 static inline abi_long do_shmdt(abi_ulong shmaddr)
4287 {
4288     int i;
4289     abi_long rv;
4290 
4291     mmap_lock();
4292 
4293     for (i = 0; i < N_SHM_REGIONS; ++i) {
4294         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4295             shm_regions[i].in_use = false;
4296             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4297             break;
4298         }
4299     }
4300     rv = get_errno(shmdt(g2h(shmaddr)));
4301 
4302     mmap_unlock();
4303 
4304     return rv;
4305 }
4306 
4307 #ifdef TARGET_NR_ipc
4308 /* ??? This only works with linear mappings.  */
4309 /* do_ipc() must return target values and target errnos. */
4310 static abi_long do_ipc(CPUArchState *cpu_env,
4311                        unsigned int call, abi_long first,
4312                        abi_long second, abi_long third,
4313                        abi_long ptr, abi_long fifth)
4314 {
4315     int version;
4316     abi_long ret = 0;
4317 
4318     version = call >> 16;
4319     call &= 0xffff;
4320 
4321     switch (call) {
4322     case IPCOP_semop:
4323         ret = do_semop(first, ptr, second);
4324         break;
4325 
4326     case IPCOP_semget:
4327         ret = get_errno(semget(first, second, third));
4328         break;
4329 
4330     case IPCOP_semctl: {
4331         /* The semun argument to semctl is passed by value, so dereference the
4332          * ptr argument. */
4333         abi_ulong atptr;
4334         get_user_ual(atptr, ptr);
4335         ret = do_semctl(first, second, third, atptr);
4336         break;
4337     }
4338 
4339     case IPCOP_msgget:
4340         ret = get_errno(msgget(first, second));
4341         break;
4342 
4343     case IPCOP_msgsnd:
4344         ret = do_msgsnd(first, ptr, second, third);
4345         break;
4346 
4347     case IPCOP_msgctl:
4348         ret = do_msgctl(first, second, ptr);
4349         break;
4350 
4351     case IPCOP_msgrcv:
4352         switch (version) {
4353         case 0:
4354             {
4355                 struct target_ipc_kludge {
4356                     abi_long msgp;
4357                     abi_long msgtyp;
4358                 } *tmp;
4359 
4360                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4361                     ret = -TARGET_EFAULT;
4362                     break;
4363                 }
4364 
4365                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4366 
4367                 unlock_user_struct(tmp, ptr, 0);
4368                 break;
4369             }
4370         default:
4371             ret = do_msgrcv(first, ptr, second, fifth, third);
4372         }
4373         break;
4374 
4375     case IPCOP_shmat:
4376         switch (version) {
4377         default:
4378         {
4379             abi_ulong raddr;
4380             raddr = do_shmat(cpu_env, first, ptr, second);
4381             if (is_error(raddr))
4382                 return get_errno(raddr);
4383             if (put_user_ual(raddr, third))
4384                 return -TARGET_EFAULT;
4385             break;
4386         }
4387         case 1:
4388             ret = -TARGET_EINVAL;
4389             break;
4390         }
4391 	break;
4392     case IPCOP_shmdt:
4393         ret = do_shmdt(ptr);
4394 	break;
4395 
4396     case IPCOP_shmget:
4397 	/* IPC_* flag values are the same on all linux platforms */
4398 	ret = get_errno(shmget(first, second, third));
4399 	break;
4400 
4401 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4402     case IPCOP_shmctl:
4403         ret = do_shmctl(first, second, ptr);
4404         break;
4405     default:
4406         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4407                       call, version);
4408 	ret = -TARGET_ENOSYS;
4409 	break;
4410     }
4411     return ret;
4412 }
4413 #endif
4414 
4415 /* kernel structure types definitions */
4416 
4417 #define STRUCT(name, ...) STRUCT_ ## name,
4418 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4419 enum {
4420 #include "syscall_types.h"
4421 STRUCT_MAX
4422 };
4423 #undef STRUCT
4424 #undef STRUCT_SPECIAL
4425 
4426 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4427 #define STRUCT_SPECIAL(name)
4428 #include "syscall_types.h"
4429 #undef STRUCT
4430 #undef STRUCT_SPECIAL
4431 
4432 typedef struct IOCTLEntry IOCTLEntry;
4433 
4434 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4435                              int fd, int cmd, abi_long arg);
4436 
4437 struct IOCTLEntry {
4438     int target_cmd;
4439     unsigned int host_cmd;
4440     const char *name;
4441     int access;
4442     do_ioctl_fn *do_ioctl;
4443     const argtype arg_type[5];
4444 };
4445 
4446 #define IOC_R 0x0001
4447 #define IOC_W 0x0002
4448 #define IOC_RW (IOC_R | IOC_W)
4449 
4450 #define MAX_STRUCT_SIZE 4096
4451 
4452 #ifdef CONFIG_FIEMAP
4453 /* So fiemap access checks don't overflow on 32 bit systems.
4454  * This is very slightly smaller than the limit imposed by
4455  * the underlying kernel.
4456  */
4457 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4458                             / sizeof(struct fiemap_extent))
4459 
4460 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4461                                        int fd, int cmd, abi_long arg)
4462 {
4463     /* The parameter for this ioctl is a struct fiemap followed
4464      * by an array of struct fiemap_extent whose size is set
4465      * in fiemap->fm_extent_count. The array is filled in by the
4466      * ioctl.
4467      */
4468     int target_size_in, target_size_out;
4469     struct fiemap *fm;
4470     const argtype *arg_type = ie->arg_type;
4471     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4472     void *argptr, *p;
4473     abi_long ret;
4474     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4475     uint32_t outbufsz;
4476     int free_fm = 0;
4477 
4478     assert(arg_type[0] == TYPE_PTR);
4479     assert(ie->access == IOC_RW);
4480     arg_type++;
4481     target_size_in = thunk_type_size(arg_type, 0);
4482     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4483     if (!argptr) {
4484         return -TARGET_EFAULT;
4485     }
4486     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4487     unlock_user(argptr, arg, 0);
4488     fm = (struct fiemap *)buf_temp;
4489     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4490         return -TARGET_EINVAL;
4491     }
4492 
4493     outbufsz = sizeof (*fm) +
4494         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4495 
4496     if (outbufsz > MAX_STRUCT_SIZE) {
4497         /* We can't fit all the extents into the fixed size buffer.
4498          * Allocate one that is large enough and use it instead.
4499          */
4500         fm = g_try_malloc(outbufsz);
4501         if (!fm) {
4502             return -TARGET_ENOMEM;
4503         }
4504         memcpy(fm, buf_temp, sizeof(struct fiemap));
4505         free_fm = 1;
4506     }
4507     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4508     if (!is_error(ret)) {
4509         target_size_out = target_size_in;
4510         /* An extent_count of 0 means we were only counting the extents
4511          * so there are no structs to copy
4512          */
4513         if (fm->fm_extent_count != 0) {
4514             target_size_out += fm->fm_mapped_extents * extent_size;
4515         }
4516         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4517         if (!argptr) {
4518             ret = -TARGET_EFAULT;
4519         } else {
4520             /* Convert the struct fiemap */
4521             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4522             if (fm->fm_extent_count != 0) {
4523                 p = argptr + target_size_in;
4524                 /* ...and then all the struct fiemap_extents */
4525                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4526                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4527                                   THUNK_TARGET);
4528                     p += extent_size;
4529                 }
4530             }
4531             unlock_user(argptr, arg, target_size_out);
4532         }
4533     }
4534     if (free_fm) {
4535         g_free(fm);
4536     }
4537     return ret;
4538 }
4539 #endif
4540 
4541 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4542                                 int fd, int cmd, abi_long arg)
4543 {
4544     const argtype *arg_type = ie->arg_type;
4545     int target_size;
4546     void *argptr;
4547     int ret;
4548     struct ifconf *host_ifconf;
4549     uint32_t outbufsz;
4550     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4551     int target_ifreq_size;
4552     int nb_ifreq;
4553     int free_buf = 0;
4554     int i;
4555     int target_ifc_len;
4556     abi_long target_ifc_buf;
4557     int host_ifc_len;
4558     char *host_ifc_buf;
4559 
4560     assert(arg_type[0] == TYPE_PTR);
4561     assert(ie->access == IOC_RW);
4562 
4563     arg_type++;
4564     target_size = thunk_type_size(arg_type, 0);
4565 
4566     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4567     if (!argptr)
4568         return -TARGET_EFAULT;
4569     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4570     unlock_user(argptr, arg, 0);
4571 
4572     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4573     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4574     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4575 
4576     if (target_ifc_buf != 0) {
4577         target_ifc_len = host_ifconf->ifc_len;
4578         nb_ifreq = target_ifc_len / target_ifreq_size;
4579         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4580 
4581         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4582         if (outbufsz > MAX_STRUCT_SIZE) {
4583             /*
4584              * We can't fit all the extents into the fixed size buffer.
4585              * Allocate one that is large enough and use it instead.
4586              */
4587             host_ifconf = malloc(outbufsz);
4588             if (!host_ifconf) {
4589                 return -TARGET_ENOMEM;
4590             }
4591             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4592             free_buf = 1;
4593         }
4594         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4595 
4596         host_ifconf->ifc_len = host_ifc_len;
4597     } else {
4598       host_ifc_buf = NULL;
4599     }
4600     host_ifconf->ifc_buf = host_ifc_buf;
4601 
4602     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4603     if (!is_error(ret)) {
4604 	/* convert host ifc_len to target ifc_len */
4605 
4606         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4607         target_ifc_len = nb_ifreq * target_ifreq_size;
4608         host_ifconf->ifc_len = target_ifc_len;
4609 
4610 	/* restore target ifc_buf */
4611 
4612         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4613 
4614 	/* copy struct ifconf to target user */
4615 
4616         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4617         if (!argptr)
4618             return -TARGET_EFAULT;
4619         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4620         unlock_user(argptr, arg, target_size);
4621 
4622         if (target_ifc_buf != 0) {
4623             /* copy ifreq[] to target user */
4624             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4625             for (i = 0; i < nb_ifreq ; i++) {
4626                 thunk_convert(argptr + i * target_ifreq_size,
4627                               host_ifc_buf + i * sizeof(struct ifreq),
4628                               ifreq_arg_type, THUNK_TARGET);
4629             }
4630             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4631         }
4632     }
4633 
4634     if (free_buf) {
4635         free(host_ifconf);
4636     }
4637 
4638     return ret;
4639 }
4640 
4641 #if defined(CONFIG_USBFS)
4642 #if HOST_LONG_BITS > 64
4643 #error USBDEVFS thunks do not support >64 bit hosts yet.
4644 #endif
4645 struct live_urb {
4646     uint64_t target_urb_adr;
4647     uint64_t target_buf_adr;
4648     char *target_buf_ptr;
4649     struct usbdevfs_urb host_urb;
4650 };
4651 
4652 static GHashTable *usbdevfs_urb_hashtable(void)
4653 {
4654     static GHashTable *urb_hashtable;
4655 
4656     if (!urb_hashtable) {
4657         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4658     }
4659     return urb_hashtable;
4660 }
4661 
4662 static void urb_hashtable_insert(struct live_urb *urb)
4663 {
4664     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4665     g_hash_table_insert(urb_hashtable, urb, urb);
4666 }
4667 
4668 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4669 {
4670     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4671     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4672 }
4673 
4674 static void urb_hashtable_remove(struct live_urb *urb)
4675 {
4676     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4677     g_hash_table_remove(urb_hashtable, urb);
4678 }
4679 
4680 static abi_long
4681 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4682                           int fd, int cmd, abi_long arg)
4683 {
4684     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4685     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4686     struct live_urb *lurb;
4687     void *argptr;
4688     uint64_t hurb;
4689     int target_size;
4690     uintptr_t target_urb_adr;
4691     abi_long ret;
4692 
4693     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4694 
4695     memset(buf_temp, 0, sizeof(uint64_t));
4696     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4697     if (is_error(ret)) {
4698         return ret;
4699     }
4700 
4701     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4702     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4703     if (!lurb->target_urb_adr) {
4704         return -TARGET_EFAULT;
4705     }
4706     urb_hashtable_remove(lurb);
4707     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4708         lurb->host_urb.buffer_length);
4709     lurb->target_buf_ptr = NULL;
4710 
4711     /* restore the guest buffer pointer */
4712     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4713 
4714     /* update the guest urb struct */
4715     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4716     if (!argptr) {
4717         g_free(lurb);
4718         return -TARGET_EFAULT;
4719     }
4720     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4721     unlock_user(argptr, lurb->target_urb_adr, target_size);
4722 
4723     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4724     /* write back the urb handle */
4725     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4726     if (!argptr) {
4727         g_free(lurb);
4728         return -TARGET_EFAULT;
4729     }
4730 
4731     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4732     target_urb_adr = lurb->target_urb_adr;
4733     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4734     unlock_user(argptr, arg, target_size);
4735 
4736     g_free(lurb);
4737     return ret;
4738 }
4739 
4740 static abi_long
4741 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4742                              uint8_t *buf_temp __attribute__((unused)),
4743                              int fd, int cmd, abi_long arg)
4744 {
4745     struct live_urb *lurb;
4746 
4747     /* map target address back to host URB with metadata. */
4748     lurb = urb_hashtable_lookup(arg);
4749     if (!lurb) {
4750         return -TARGET_EFAULT;
4751     }
4752     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4753 }
4754 
4755 static abi_long
4756 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4757                             int fd, int cmd, abi_long arg)
4758 {
4759     const argtype *arg_type = ie->arg_type;
4760     int target_size;
4761     abi_long ret;
4762     void *argptr;
4763     int rw_dir;
4764     struct live_urb *lurb;
4765 
4766     /*
4767      * each submitted URB needs to map to a unique ID for the
4768      * kernel, and that unique ID needs to be a pointer to
4769      * host memory.  hence, we need to malloc for each URB.
4770      * isochronous transfers have a variable length struct.
4771      */
4772     arg_type++;
4773     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4774 
4775     /* construct host copy of urb and metadata */
4776     lurb = g_try_malloc0(sizeof(struct live_urb));
4777     if (!lurb) {
4778         return -TARGET_ENOMEM;
4779     }
4780 
4781     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4782     if (!argptr) {
4783         g_free(lurb);
4784         return -TARGET_EFAULT;
4785     }
4786     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4787     unlock_user(argptr, arg, 0);
4788 
4789     lurb->target_urb_adr = arg;
4790     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4791 
4792     /* buffer space used depends on endpoint type so lock the entire buffer */
4793     /* control type urbs should check the buffer contents for true direction */
4794     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4795     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4796         lurb->host_urb.buffer_length, 1);
4797     if (lurb->target_buf_ptr == NULL) {
4798         g_free(lurb);
4799         return -TARGET_EFAULT;
4800     }
4801 
4802     /* update buffer pointer in host copy */
4803     lurb->host_urb.buffer = lurb->target_buf_ptr;
4804 
4805     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4806     if (is_error(ret)) {
4807         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4808         g_free(lurb);
4809     } else {
4810         urb_hashtable_insert(lurb);
4811     }
4812 
4813     return ret;
4814 }
4815 #endif /* CONFIG_USBFS */
4816 
4817 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4818                             int cmd, abi_long arg)
4819 {
4820     void *argptr;
4821     struct dm_ioctl *host_dm;
4822     abi_long guest_data;
4823     uint32_t guest_data_size;
4824     int target_size;
4825     const argtype *arg_type = ie->arg_type;
4826     abi_long ret;
4827     void *big_buf = NULL;
4828     char *host_data;
4829 
4830     arg_type++;
4831     target_size = thunk_type_size(arg_type, 0);
4832     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4833     if (!argptr) {
4834         ret = -TARGET_EFAULT;
4835         goto out;
4836     }
4837     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4838     unlock_user(argptr, arg, 0);
4839 
4840     /* buf_temp is too small, so fetch things into a bigger buffer */
4841     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4842     memcpy(big_buf, buf_temp, target_size);
4843     buf_temp = big_buf;
4844     host_dm = big_buf;
4845 
4846     guest_data = arg + host_dm->data_start;
4847     if ((guest_data - arg) < 0) {
4848         ret = -TARGET_EINVAL;
4849         goto out;
4850     }
4851     guest_data_size = host_dm->data_size - host_dm->data_start;
4852     host_data = (char*)host_dm + host_dm->data_start;
4853 
4854     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4855     if (!argptr) {
4856         ret = -TARGET_EFAULT;
4857         goto out;
4858     }
4859 
4860     switch (ie->host_cmd) {
4861     case DM_REMOVE_ALL:
4862     case DM_LIST_DEVICES:
4863     case DM_DEV_CREATE:
4864     case DM_DEV_REMOVE:
4865     case DM_DEV_SUSPEND:
4866     case DM_DEV_STATUS:
4867     case DM_DEV_WAIT:
4868     case DM_TABLE_STATUS:
4869     case DM_TABLE_CLEAR:
4870     case DM_TABLE_DEPS:
4871     case DM_LIST_VERSIONS:
4872         /* no input data */
4873         break;
4874     case DM_DEV_RENAME:
4875     case DM_DEV_SET_GEOMETRY:
4876         /* data contains only strings */
4877         memcpy(host_data, argptr, guest_data_size);
4878         break;
4879     case DM_TARGET_MSG:
4880         memcpy(host_data, argptr, guest_data_size);
4881         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4882         break;
4883     case DM_TABLE_LOAD:
4884     {
4885         void *gspec = argptr;
4886         void *cur_data = host_data;
4887         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4888         int spec_size = thunk_type_size(arg_type, 0);
4889         int i;
4890 
4891         for (i = 0; i < host_dm->target_count; i++) {
4892             struct dm_target_spec *spec = cur_data;
4893             uint32_t next;
4894             int slen;
4895 
4896             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4897             slen = strlen((char*)gspec + spec_size) + 1;
4898             next = spec->next;
4899             spec->next = sizeof(*spec) + slen;
4900             strcpy((char*)&spec[1], gspec + spec_size);
4901             gspec += next;
4902             cur_data += spec->next;
4903         }
4904         break;
4905     }
4906     default:
4907         ret = -TARGET_EINVAL;
4908         unlock_user(argptr, guest_data, 0);
4909         goto out;
4910     }
4911     unlock_user(argptr, guest_data, 0);
4912 
4913     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4914     if (!is_error(ret)) {
4915         guest_data = arg + host_dm->data_start;
4916         guest_data_size = host_dm->data_size - host_dm->data_start;
4917         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4918         switch (ie->host_cmd) {
4919         case DM_REMOVE_ALL:
4920         case DM_DEV_CREATE:
4921         case DM_DEV_REMOVE:
4922         case DM_DEV_RENAME:
4923         case DM_DEV_SUSPEND:
4924         case DM_DEV_STATUS:
4925         case DM_TABLE_LOAD:
4926         case DM_TABLE_CLEAR:
4927         case DM_TARGET_MSG:
4928         case DM_DEV_SET_GEOMETRY:
4929             /* no return data */
4930             break;
4931         case DM_LIST_DEVICES:
4932         {
4933             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4934             uint32_t remaining_data = guest_data_size;
4935             void *cur_data = argptr;
4936             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4937             int nl_size = 12; /* can't use thunk_size due to alignment */
4938 
4939             while (1) {
4940                 uint32_t next = nl->next;
4941                 if (next) {
4942                     nl->next = nl_size + (strlen(nl->name) + 1);
4943                 }
4944                 if (remaining_data < nl->next) {
4945                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4946                     break;
4947                 }
4948                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4949                 strcpy(cur_data + nl_size, nl->name);
4950                 cur_data += nl->next;
4951                 remaining_data -= nl->next;
4952                 if (!next) {
4953                     break;
4954                 }
4955                 nl = (void*)nl + next;
4956             }
4957             break;
4958         }
4959         case DM_DEV_WAIT:
4960         case DM_TABLE_STATUS:
4961         {
4962             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4963             void *cur_data = argptr;
4964             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4965             int spec_size = thunk_type_size(arg_type, 0);
4966             int i;
4967 
4968             for (i = 0; i < host_dm->target_count; i++) {
4969                 uint32_t next = spec->next;
4970                 int slen = strlen((char*)&spec[1]) + 1;
4971                 spec->next = (cur_data - argptr) + spec_size + slen;
4972                 if (guest_data_size < spec->next) {
4973                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4974                     break;
4975                 }
4976                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4977                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4978                 cur_data = argptr + spec->next;
4979                 spec = (void*)host_dm + host_dm->data_start + next;
4980             }
4981             break;
4982         }
4983         case DM_TABLE_DEPS:
4984         {
4985             void *hdata = (void*)host_dm + host_dm->data_start;
4986             int count = *(uint32_t*)hdata;
4987             uint64_t *hdev = hdata + 8;
4988             uint64_t *gdev = argptr + 8;
4989             int i;
4990 
4991             *(uint32_t*)argptr = tswap32(count);
4992             for (i = 0; i < count; i++) {
4993                 *gdev = tswap64(*hdev);
4994                 gdev++;
4995                 hdev++;
4996             }
4997             break;
4998         }
4999         case DM_LIST_VERSIONS:
5000         {
5001             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5002             uint32_t remaining_data = guest_data_size;
5003             void *cur_data = argptr;
5004             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5005             int vers_size = thunk_type_size(arg_type, 0);
5006 
5007             while (1) {
5008                 uint32_t next = vers->next;
5009                 if (next) {
5010                     vers->next = vers_size + (strlen(vers->name) + 1);
5011                 }
5012                 if (remaining_data < vers->next) {
5013                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5014                     break;
5015                 }
5016                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5017                 strcpy(cur_data + vers_size, vers->name);
5018                 cur_data += vers->next;
5019                 remaining_data -= vers->next;
5020                 if (!next) {
5021                     break;
5022                 }
5023                 vers = (void*)vers + next;
5024             }
5025             break;
5026         }
5027         default:
5028             unlock_user(argptr, guest_data, 0);
5029             ret = -TARGET_EINVAL;
5030             goto out;
5031         }
5032         unlock_user(argptr, guest_data, guest_data_size);
5033 
5034         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5035         if (!argptr) {
5036             ret = -TARGET_EFAULT;
5037             goto out;
5038         }
5039         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5040         unlock_user(argptr, arg, target_size);
5041     }
5042 out:
5043     g_free(big_buf);
5044     return ret;
5045 }
5046 
5047 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5048                                int cmd, abi_long arg)
5049 {
5050     void *argptr;
5051     int target_size;
5052     const argtype *arg_type = ie->arg_type;
5053     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5054     abi_long ret;
5055 
5056     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5057     struct blkpg_partition host_part;
5058 
5059     /* Read and convert blkpg */
5060     arg_type++;
5061     target_size = thunk_type_size(arg_type, 0);
5062     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5063     if (!argptr) {
5064         ret = -TARGET_EFAULT;
5065         goto out;
5066     }
5067     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5068     unlock_user(argptr, arg, 0);
5069 
5070     switch (host_blkpg->op) {
5071     case BLKPG_ADD_PARTITION:
5072     case BLKPG_DEL_PARTITION:
5073         /* payload is struct blkpg_partition */
5074         break;
5075     default:
5076         /* Unknown opcode */
5077         ret = -TARGET_EINVAL;
5078         goto out;
5079     }
5080 
5081     /* Read and convert blkpg->data */
5082     arg = (abi_long)(uintptr_t)host_blkpg->data;
5083     target_size = thunk_type_size(part_arg_type, 0);
5084     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5085     if (!argptr) {
5086         ret = -TARGET_EFAULT;
5087         goto out;
5088     }
5089     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5090     unlock_user(argptr, arg, 0);
5091 
5092     /* Swizzle the data pointer to our local copy and call! */
5093     host_blkpg->data = &host_part;
5094     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5095 
5096 out:
5097     return ret;
5098 }
5099 
5100 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5101                                 int fd, int cmd, abi_long arg)
5102 {
5103     const argtype *arg_type = ie->arg_type;
5104     const StructEntry *se;
5105     const argtype *field_types;
5106     const int *dst_offsets, *src_offsets;
5107     int target_size;
5108     void *argptr;
5109     abi_ulong *target_rt_dev_ptr = NULL;
5110     unsigned long *host_rt_dev_ptr = NULL;
5111     abi_long ret;
5112     int i;
5113 
5114     assert(ie->access == IOC_W);
5115     assert(*arg_type == TYPE_PTR);
5116     arg_type++;
5117     assert(*arg_type == TYPE_STRUCT);
5118     target_size = thunk_type_size(arg_type, 0);
5119     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5120     if (!argptr) {
5121         return -TARGET_EFAULT;
5122     }
5123     arg_type++;
5124     assert(*arg_type == (int)STRUCT_rtentry);
5125     se = struct_entries + *arg_type++;
5126     assert(se->convert[0] == NULL);
5127     /* convert struct here to be able to catch rt_dev string */
5128     field_types = se->field_types;
5129     dst_offsets = se->field_offsets[THUNK_HOST];
5130     src_offsets = se->field_offsets[THUNK_TARGET];
5131     for (i = 0; i < se->nb_fields; i++) {
5132         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5133             assert(*field_types == TYPE_PTRVOID);
5134             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5135             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5136             if (*target_rt_dev_ptr != 0) {
5137                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5138                                                   tswapal(*target_rt_dev_ptr));
5139                 if (!*host_rt_dev_ptr) {
5140                     unlock_user(argptr, arg, 0);
5141                     return -TARGET_EFAULT;
5142                 }
5143             } else {
5144                 *host_rt_dev_ptr = 0;
5145             }
5146             field_types++;
5147             continue;
5148         }
5149         field_types = thunk_convert(buf_temp + dst_offsets[i],
5150                                     argptr + src_offsets[i],
5151                                     field_types, THUNK_HOST);
5152     }
5153     unlock_user(argptr, arg, 0);
5154 
5155     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5156 
5157     assert(host_rt_dev_ptr != NULL);
5158     assert(target_rt_dev_ptr != NULL);
5159     if (*host_rt_dev_ptr != 0) {
5160         unlock_user((void *)*host_rt_dev_ptr,
5161                     *target_rt_dev_ptr, 0);
5162     }
5163     return ret;
5164 }
5165 
5166 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5167                                      int fd, int cmd, abi_long arg)
5168 {
5169     int sig = target_to_host_signal(arg);
5170     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5171 }
5172 
5173 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5174                                     int fd, int cmd, abi_long arg)
5175 {
5176     struct timeval tv;
5177     abi_long ret;
5178 
5179     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5180     if (is_error(ret)) {
5181         return ret;
5182     }
5183 
5184     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5185         if (copy_to_user_timeval(arg, &tv)) {
5186             return -TARGET_EFAULT;
5187         }
5188     } else {
5189         if (copy_to_user_timeval64(arg, &tv)) {
5190             return -TARGET_EFAULT;
5191         }
5192     }
5193 
5194     return ret;
5195 }
5196 
5197 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5198                                       int fd, int cmd, abi_long arg)
5199 {
5200     struct timespec ts;
5201     abi_long ret;
5202 
5203     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5204     if (is_error(ret)) {
5205         return ret;
5206     }
5207 
5208     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5209         if (host_to_target_timespec(arg, &ts)) {
5210             return -TARGET_EFAULT;
5211         }
5212     } else{
5213         if (host_to_target_timespec64(arg, &ts)) {
5214             return -TARGET_EFAULT;
5215         }
5216     }
5217 
5218     return ret;
5219 }
5220 
5221 #ifdef TIOCGPTPEER
5222 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5223                                      int fd, int cmd, abi_long arg)
5224 {
5225     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5226     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5227 }
5228 #endif
5229 
5230 static IOCTLEntry ioctl_entries[] = {
5231 #define IOCTL(cmd, access, ...) \
5232     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5233 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5234     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5235 #define IOCTL_IGNORE(cmd) \
5236     { TARGET_ ## cmd, 0, #cmd },
5237 #include "ioctls.h"
5238     { 0, 0, },
5239 };
5240 
5241 /* ??? Implement proper locking for ioctls.  */
5242 /* do_ioctl() Must return target values and target errnos. */
5243 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5244 {
5245     const IOCTLEntry *ie;
5246     const argtype *arg_type;
5247     abi_long ret;
5248     uint8_t buf_temp[MAX_STRUCT_SIZE];
5249     int target_size;
5250     void *argptr;
5251 
5252     ie = ioctl_entries;
5253     for(;;) {
5254         if (ie->target_cmd == 0) {
5255             qemu_log_mask(
5256                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5257             return -TARGET_ENOSYS;
5258         }
5259         if (ie->target_cmd == cmd)
5260             break;
5261         ie++;
5262     }
5263     arg_type = ie->arg_type;
5264     if (ie->do_ioctl) {
5265         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5266     } else if (!ie->host_cmd) {
5267         /* Some architectures define BSD ioctls in their headers
5268            that are not implemented in Linux.  */
5269         return -TARGET_ENOSYS;
5270     }
5271 
5272     switch(arg_type[0]) {
5273     case TYPE_NULL:
5274         /* no argument */
5275         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5276         break;
5277     case TYPE_PTRVOID:
5278     case TYPE_INT:
5279     case TYPE_LONG:
5280     case TYPE_ULONG:
5281         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5282         break;
5283     case TYPE_PTR:
5284         arg_type++;
5285         target_size = thunk_type_size(arg_type, 0);
5286         switch(ie->access) {
5287         case IOC_R:
5288             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5289             if (!is_error(ret)) {
5290                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5291                 if (!argptr)
5292                     return -TARGET_EFAULT;
5293                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5294                 unlock_user(argptr, arg, target_size);
5295             }
5296             break;
5297         case IOC_W:
5298             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5299             if (!argptr)
5300                 return -TARGET_EFAULT;
5301             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5302             unlock_user(argptr, arg, 0);
5303             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5304             break;
5305         default:
5306         case IOC_RW:
5307             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5308             if (!argptr)
5309                 return -TARGET_EFAULT;
5310             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5311             unlock_user(argptr, arg, 0);
5312             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5313             if (!is_error(ret)) {
5314                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5315                 if (!argptr)
5316                     return -TARGET_EFAULT;
5317                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5318                 unlock_user(argptr, arg, target_size);
5319             }
5320             break;
5321         }
5322         break;
5323     default:
5324         qemu_log_mask(LOG_UNIMP,
5325                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5326                       (long)cmd, arg_type[0]);
5327         ret = -TARGET_ENOSYS;
5328         break;
5329     }
5330     return ret;
5331 }
5332 
5333 static const bitmask_transtbl iflag_tbl[] = {
5334         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5335         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5336         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5337         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5338         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5339         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5340         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5341         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5342         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5343         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5344         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5345         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5346         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5347         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5348         { 0, 0, 0, 0 }
5349 };
5350 
5351 static const bitmask_transtbl oflag_tbl[] = {
5352 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5353 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5354 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5355 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5356 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5357 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5358 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5359 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5360 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5361 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5362 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5363 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5364 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5365 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5366 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5367 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5368 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5369 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5370 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5371 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5372 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5373 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5374 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5375 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5376 	{ 0, 0, 0, 0 }
5377 };
5378 
5379 static const bitmask_transtbl cflag_tbl[] = {
5380 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5381 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5382 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5383 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5384 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5385 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5386 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5387 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5388 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5389 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5390 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5391 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5392 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5393 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5394 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5395 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5396 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5397 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5398 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5399 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5400 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5401 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5402 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5403 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5404 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5405 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5406 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5407 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5408 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5409 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5410 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5411 	{ 0, 0, 0, 0 }
5412 };
5413 
5414 static const bitmask_transtbl lflag_tbl[] = {
5415 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5416 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5417 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5418 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5419 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5420 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5421 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5422 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5423 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5424 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5425 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5426 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5427 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5428 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5429 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5430 	{ 0, 0, 0, 0 }
5431 };
5432 
5433 static void target_to_host_termios (void *dst, const void *src)
5434 {
5435     struct host_termios *host = dst;
5436     const struct target_termios *target = src;
5437 
5438     host->c_iflag =
5439         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5440     host->c_oflag =
5441         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5442     host->c_cflag =
5443         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5444     host->c_lflag =
5445         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5446     host->c_line = target->c_line;
5447 
5448     memset(host->c_cc, 0, sizeof(host->c_cc));
5449     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5450     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5451     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5452     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5453     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5454     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5455     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5456     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5457     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5458     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5459     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5460     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5461     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5462     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5463     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5464     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5465     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5466 }
5467 
5468 static void host_to_target_termios (void *dst, const void *src)
5469 {
5470     struct target_termios *target = dst;
5471     const struct host_termios *host = src;
5472 
5473     target->c_iflag =
5474         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5475     target->c_oflag =
5476         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5477     target->c_cflag =
5478         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5479     target->c_lflag =
5480         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5481     target->c_line = host->c_line;
5482 
5483     memset(target->c_cc, 0, sizeof(target->c_cc));
5484     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5485     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5486     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5487     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5488     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5489     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5490     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5491     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5492     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5493     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5494     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5495     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5496     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5497     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5498     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5499     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5500     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5501 }
5502 
5503 static const StructEntry struct_termios_def = {
5504     .convert = { host_to_target_termios, target_to_host_termios },
5505     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5506     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5507 };
5508 
5509 static bitmask_transtbl mmap_flags_tbl[] = {
5510     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5511     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5512     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5513     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5514       MAP_ANONYMOUS, MAP_ANONYMOUS },
5515     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5516       MAP_GROWSDOWN, MAP_GROWSDOWN },
5517     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5518       MAP_DENYWRITE, MAP_DENYWRITE },
5519     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5520       MAP_EXECUTABLE, MAP_EXECUTABLE },
5521     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5522     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5523       MAP_NORESERVE, MAP_NORESERVE },
5524     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5525     /* MAP_STACK had been ignored by the kernel for quite some time.
5526        Recognize it for the target insofar as we do not want to pass
5527        it through to the host.  */
5528     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5529     { 0, 0, 0, 0 }
5530 };
5531 
5532 #if defined(TARGET_I386)
5533 
5534 /* NOTE: there is really one LDT for all the threads */
5535 static uint8_t *ldt_table;
5536 
5537 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5538 {
5539     int size;
5540     void *p;
5541 
5542     if (!ldt_table)
5543         return 0;
5544     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5545     if (size > bytecount)
5546         size = bytecount;
5547     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5548     if (!p)
5549         return -TARGET_EFAULT;
5550     /* ??? Should this by byteswapped?  */
5551     memcpy(p, ldt_table, size);
5552     unlock_user(p, ptr, size);
5553     return size;
5554 }
5555 
5556 /* XXX: add locking support */
5557 static abi_long write_ldt(CPUX86State *env,
5558                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5559 {
5560     struct target_modify_ldt_ldt_s ldt_info;
5561     struct target_modify_ldt_ldt_s *target_ldt_info;
5562     int seg_32bit, contents, read_exec_only, limit_in_pages;
5563     int seg_not_present, useable, lm;
5564     uint32_t *lp, entry_1, entry_2;
5565 
5566     if (bytecount != sizeof(ldt_info))
5567         return -TARGET_EINVAL;
5568     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5569         return -TARGET_EFAULT;
5570     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5571     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5572     ldt_info.limit = tswap32(target_ldt_info->limit);
5573     ldt_info.flags = tswap32(target_ldt_info->flags);
5574     unlock_user_struct(target_ldt_info, ptr, 0);
5575 
5576     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5577         return -TARGET_EINVAL;
5578     seg_32bit = ldt_info.flags & 1;
5579     contents = (ldt_info.flags >> 1) & 3;
5580     read_exec_only = (ldt_info.flags >> 3) & 1;
5581     limit_in_pages = (ldt_info.flags >> 4) & 1;
5582     seg_not_present = (ldt_info.flags >> 5) & 1;
5583     useable = (ldt_info.flags >> 6) & 1;
5584 #ifdef TARGET_ABI32
5585     lm = 0;
5586 #else
5587     lm = (ldt_info.flags >> 7) & 1;
5588 #endif
5589     if (contents == 3) {
5590         if (oldmode)
5591             return -TARGET_EINVAL;
5592         if (seg_not_present == 0)
5593             return -TARGET_EINVAL;
5594     }
5595     /* allocate the LDT */
5596     if (!ldt_table) {
5597         env->ldt.base = target_mmap(0,
5598                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5599                                     PROT_READ|PROT_WRITE,
5600                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5601         if (env->ldt.base == -1)
5602             return -TARGET_ENOMEM;
5603         memset(g2h(env->ldt.base), 0,
5604                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5605         env->ldt.limit = 0xffff;
5606         ldt_table = g2h(env->ldt.base);
5607     }
5608 
5609     /* NOTE: same code as Linux kernel */
5610     /* Allow LDTs to be cleared by the user. */
5611     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5612         if (oldmode ||
5613             (contents == 0		&&
5614              read_exec_only == 1	&&
5615              seg_32bit == 0		&&
5616              limit_in_pages == 0	&&
5617              seg_not_present == 1	&&
5618              useable == 0 )) {
5619             entry_1 = 0;
5620             entry_2 = 0;
5621             goto install;
5622         }
5623     }
5624 
5625     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5626         (ldt_info.limit & 0x0ffff);
5627     entry_2 = (ldt_info.base_addr & 0xff000000) |
5628         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5629         (ldt_info.limit & 0xf0000) |
5630         ((read_exec_only ^ 1) << 9) |
5631         (contents << 10) |
5632         ((seg_not_present ^ 1) << 15) |
5633         (seg_32bit << 22) |
5634         (limit_in_pages << 23) |
5635         (lm << 21) |
5636         0x7000;
5637     if (!oldmode)
5638         entry_2 |= (useable << 20);
5639 
5640     /* Install the new entry ...  */
5641 install:
5642     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5643     lp[0] = tswap32(entry_1);
5644     lp[1] = tswap32(entry_2);
5645     return 0;
5646 }
5647 
5648 /* specific and weird i386 syscalls */
5649 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5650                               unsigned long bytecount)
5651 {
5652     abi_long ret;
5653 
5654     switch (func) {
5655     case 0:
5656         ret = read_ldt(ptr, bytecount);
5657         break;
5658     case 1:
5659         ret = write_ldt(env, ptr, bytecount, 1);
5660         break;
5661     case 0x11:
5662         ret = write_ldt(env, ptr, bytecount, 0);
5663         break;
5664     default:
5665         ret = -TARGET_ENOSYS;
5666         break;
5667     }
5668     return ret;
5669 }
5670 
5671 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5672 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5673 {
5674     uint64_t *gdt_table = g2h(env->gdt.base);
5675     struct target_modify_ldt_ldt_s ldt_info;
5676     struct target_modify_ldt_ldt_s *target_ldt_info;
5677     int seg_32bit, contents, read_exec_only, limit_in_pages;
5678     int seg_not_present, useable, lm;
5679     uint32_t *lp, entry_1, entry_2;
5680     int i;
5681 
5682     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5683     if (!target_ldt_info)
5684         return -TARGET_EFAULT;
5685     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5686     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5687     ldt_info.limit = tswap32(target_ldt_info->limit);
5688     ldt_info.flags = tswap32(target_ldt_info->flags);
5689     if (ldt_info.entry_number == -1) {
5690         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5691             if (gdt_table[i] == 0) {
5692                 ldt_info.entry_number = i;
5693                 target_ldt_info->entry_number = tswap32(i);
5694                 break;
5695             }
5696         }
5697     }
5698     unlock_user_struct(target_ldt_info, ptr, 1);
5699 
5700     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5701         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5702            return -TARGET_EINVAL;
5703     seg_32bit = ldt_info.flags & 1;
5704     contents = (ldt_info.flags >> 1) & 3;
5705     read_exec_only = (ldt_info.flags >> 3) & 1;
5706     limit_in_pages = (ldt_info.flags >> 4) & 1;
5707     seg_not_present = (ldt_info.flags >> 5) & 1;
5708     useable = (ldt_info.flags >> 6) & 1;
5709 #ifdef TARGET_ABI32
5710     lm = 0;
5711 #else
5712     lm = (ldt_info.flags >> 7) & 1;
5713 #endif
5714 
5715     if (contents == 3) {
5716         if (seg_not_present == 0)
5717             return -TARGET_EINVAL;
5718     }
5719 
5720     /* NOTE: same code as Linux kernel */
5721     /* Allow LDTs to be cleared by the user. */
5722     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5723         if ((contents == 0             &&
5724              read_exec_only == 1       &&
5725              seg_32bit == 0            &&
5726              limit_in_pages == 0       &&
5727              seg_not_present == 1      &&
5728              useable == 0 )) {
5729             entry_1 = 0;
5730             entry_2 = 0;
5731             goto install;
5732         }
5733     }
5734 
5735     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5736         (ldt_info.limit & 0x0ffff);
5737     entry_2 = (ldt_info.base_addr & 0xff000000) |
5738         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5739         (ldt_info.limit & 0xf0000) |
5740         ((read_exec_only ^ 1) << 9) |
5741         (contents << 10) |
5742         ((seg_not_present ^ 1) << 15) |
5743         (seg_32bit << 22) |
5744         (limit_in_pages << 23) |
5745         (useable << 20) |
5746         (lm << 21) |
5747         0x7000;
5748 
5749     /* Install the new entry ...  */
5750 install:
5751     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5752     lp[0] = tswap32(entry_1);
5753     lp[1] = tswap32(entry_2);
5754     return 0;
5755 }
5756 
5757 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5758 {
5759     struct target_modify_ldt_ldt_s *target_ldt_info;
5760     uint64_t *gdt_table = g2h(env->gdt.base);
5761     uint32_t base_addr, limit, flags;
5762     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5763     int seg_not_present, useable, lm;
5764     uint32_t *lp, entry_1, entry_2;
5765 
5766     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5767     if (!target_ldt_info)
5768         return -TARGET_EFAULT;
5769     idx = tswap32(target_ldt_info->entry_number);
5770     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5771         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5772         unlock_user_struct(target_ldt_info, ptr, 1);
5773         return -TARGET_EINVAL;
5774     }
5775     lp = (uint32_t *)(gdt_table + idx);
5776     entry_1 = tswap32(lp[0]);
5777     entry_2 = tswap32(lp[1]);
5778 
5779     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5780     contents = (entry_2 >> 10) & 3;
5781     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5782     seg_32bit = (entry_2 >> 22) & 1;
5783     limit_in_pages = (entry_2 >> 23) & 1;
5784     useable = (entry_2 >> 20) & 1;
5785 #ifdef TARGET_ABI32
5786     lm = 0;
5787 #else
5788     lm = (entry_2 >> 21) & 1;
5789 #endif
5790     flags = (seg_32bit << 0) | (contents << 1) |
5791         (read_exec_only << 3) | (limit_in_pages << 4) |
5792         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5793     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5794     base_addr = (entry_1 >> 16) |
5795         (entry_2 & 0xff000000) |
5796         ((entry_2 & 0xff) << 16);
5797     target_ldt_info->base_addr = tswapal(base_addr);
5798     target_ldt_info->limit = tswap32(limit);
5799     target_ldt_info->flags = tswap32(flags);
5800     unlock_user_struct(target_ldt_info, ptr, 1);
5801     return 0;
5802 }
5803 #endif /* TARGET_I386 && TARGET_ABI32 */
5804 
5805 #ifndef TARGET_ABI32
5806 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5807 {
5808     abi_long ret = 0;
5809     abi_ulong val;
5810     int idx;
5811 
5812     switch(code) {
5813     case TARGET_ARCH_SET_GS:
5814     case TARGET_ARCH_SET_FS:
5815         if (code == TARGET_ARCH_SET_GS)
5816             idx = R_GS;
5817         else
5818             idx = R_FS;
5819         cpu_x86_load_seg(env, idx, 0);
5820         env->segs[idx].base = addr;
5821         break;
5822     case TARGET_ARCH_GET_GS:
5823     case TARGET_ARCH_GET_FS:
5824         if (code == TARGET_ARCH_GET_GS)
5825             idx = R_GS;
5826         else
5827             idx = R_FS;
5828         val = env->segs[idx].base;
5829         if (put_user(val, addr, abi_ulong))
5830             ret = -TARGET_EFAULT;
5831         break;
5832     default:
5833         ret = -TARGET_EINVAL;
5834         break;
5835     }
5836     return ret;
5837 }
5838 #endif
5839 
5840 #endif /* defined(TARGET_I386) */
5841 
5842 #define NEW_STACK_SIZE 0x40000
5843 
5844 
5845 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5846 typedef struct {
5847     CPUArchState *env;
5848     pthread_mutex_t mutex;
5849     pthread_cond_t cond;
5850     pthread_t thread;
5851     uint32_t tid;
5852     abi_ulong child_tidptr;
5853     abi_ulong parent_tidptr;
5854     sigset_t sigmask;
5855 } new_thread_info;
5856 
5857 static void *clone_func(void *arg)
5858 {
5859     new_thread_info *info = arg;
5860     CPUArchState *env;
5861     CPUState *cpu;
5862     TaskState *ts;
5863 
5864     rcu_register_thread();
5865     tcg_register_thread();
5866     env = info->env;
5867     cpu = env_cpu(env);
5868     thread_cpu = cpu;
5869     ts = (TaskState *)cpu->opaque;
5870     info->tid = sys_gettid();
5871     task_settid(ts);
5872     if (info->child_tidptr)
5873         put_user_u32(info->tid, info->child_tidptr);
5874     if (info->parent_tidptr)
5875         put_user_u32(info->tid, info->parent_tidptr);
5876     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5877     /* Enable signals.  */
5878     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5879     /* Signal to the parent that we're ready.  */
5880     pthread_mutex_lock(&info->mutex);
5881     pthread_cond_broadcast(&info->cond);
5882     pthread_mutex_unlock(&info->mutex);
5883     /* Wait until the parent has finished initializing the tls state.  */
5884     pthread_mutex_lock(&clone_lock);
5885     pthread_mutex_unlock(&clone_lock);
5886     cpu_loop(env);
5887     /* never exits */
5888     return NULL;
5889 }
5890 
5891 /* do_fork() Must return host values and target errnos (unlike most
5892    do_*() functions). */
5893 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5894                    abi_ulong parent_tidptr, target_ulong newtls,
5895                    abi_ulong child_tidptr)
5896 {
5897     CPUState *cpu = env_cpu(env);
5898     int ret;
5899     TaskState *ts;
5900     CPUState *new_cpu;
5901     CPUArchState *new_env;
5902     sigset_t sigmask;
5903 
5904     flags &= ~CLONE_IGNORED_FLAGS;
5905 
5906     /* Emulate vfork() with fork() */
5907     if (flags & CLONE_VFORK)
5908         flags &= ~(CLONE_VFORK | CLONE_VM);
5909 
5910     if (flags & CLONE_VM) {
5911         TaskState *parent_ts = (TaskState *)cpu->opaque;
5912         new_thread_info info;
5913         pthread_attr_t attr;
5914 
5915         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5916             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5917             return -TARGET_EINVAL;
5918         }
5919 
5920         ts = g_new0(TaskState, 1);
5921         init_task_state(ts);
5922 
5923         /* Grab a mutex so that thread setup appears atomic.  */
5924         pthread_mutex_lock(&clone_lock);
5925 
5926         /* we create a new CPU instance. */
5927         new_env = cpu_copy(env);
5928         /* Init regs that differ from the parent.  */
5929         cpu_clone_regs_child(new_env, newsp, flags);
5930         cpu_clone_regs_parent(env, flags);
5931         new_cpu = env_cpu(new_env);
5932         new_cpu->opaque = ts;
5933         ts->bprm = parent_ts->bprm;
5934         ts->info = parent_ts->info;
5935         ts->signal_mask = parent_ts->signal_mask;
5936 
5937         if (flags & CLONE_CHILD_CLEARTID) {
5938             ts->child_tidptr = child_tidptr;
5939         }
5940 
5941         if (flags & CLONE_SETTLS) {
5942             cpu_set_tls (new_env, newtls);
5943         }
5944 
5945         memset(&info, 0, sizeof(info));
5946         pthread_mutex_init(&info.mutex, NULL);
5947         pthread_mutex_lock(&info.mutex);
5948         pthread_cond_init(&info.cond, NULL);
5949         info.env = new_env;
5950         if (flags & CLONE_CHILD_SETTID) {
5951             info.child_tidptr = child_tidptr;
5952         }
5953         if (flags & CLONE_PARENT_SETTID) {
5954             info.parent_tidptr = parent_tidptr;
5955         }
5956 
5957         ret = pthread_attr_init(&attr);
5958         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5959         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5960         /* It is not safe to deliver signals until the child has finished
5961            initializing, so temporarily block all signals.  */
5962         sigfillset(&sigmask);
5963         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5964         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5965 
5966         /* If this is our first additional thread, we need to ensure we
5967          * generate code for parallel execution and flush old translations.
5968          */
5969         if (!parallel_cpus) {
5970             parallel_cpus = true;
5971             tb_flush(cpu);
5972         }
5973 
5974         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5975         /* TODO: Free new CPU state if thread creation failed.  */
5976 
5977         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5978         pthread_attr_destroy(&attr);
5979         if (ret == 0) {
5980             /* Wait for the child to initialize.  */
5981             pthread_cond_wait(&info.cond, &info.mutex);
5982             ret = info.tid;
5983         } else {
5984             ret = -1;
5985         }
5986         pthread_mutex_unlock(&info.mutex);
5987         pthread_cond_destroy(&info.cond);
5988         pthread_mutex_destroy(&info.mutex);
5989         pthread_mutex_unlock(&clone_lock);
5990     } else {
5991         /* if no CLONE_VM, we consider it is a fork */
5992         if (flags & CLONE_INVALID_FORK_FLAGS) {
5993             return -TARGET_EINVAL;
5994         }
5995 
5996         /* We can't support custom termination signals */
5997         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5998             return -TARGET_EINVAL;
5999         }
6000 
6001         if (block_signals()) {
6002             return -TARGET_ERESTARTSYS;
6003         }
6004 
6005         fork_start();
6006         ret = fork();
6007         if (ret == 0) {
6008             /* Child Process.  */
6009             cpu_clone_regs_child(env, newsp, flags);
6010             fork_end(1);
6011             /* There is a race condition here.  The parent process could
6012                theoretically read the TID in the child process before the child
6013                tid is set.  This would require using either ptrace
6014                (not implemented) or having *_tidptr to point at a shared memory
6015                mapping.  We can't repeat the spinlock hack used above because
6016                the child process gets its own copy of the lock.  */
6017             if (flags & CLONE_CHILD_SETTID)
6018                 put_user_u32(sys_gettid(), child_tidptr);
6019             if (flags & CLONE_PARENT_SETTID)
6020                 put_user_u32(sys_gettid(), parent_tidptr);
6021             ts = (TaskState *)cpu->opaque;
6022             if (flags & CLONE_SETTLS)
6023                 cpu_set_tls (env, newtls);
6024             if (flags & CLONE_CHILD_CLEARTID)
6025                 ts->child_tidptr = child_tidptr;
6026         } else {
6027             cpu_clone_regs_parent(env, flags);
6028             fork_end(0);
6029         }
6030     }
6031     return ret;
6032 }
6033 
6034 /* warning : doesn't handle linux specific flags... */
6035 static int target_to_host_fcntl_cmd(int cmd)
6036 {
6037     int ret;
6038 
6039     switch(cmd) {
6040     case TARGET_F_DUPFD:
6041     case TARGET_F_GETFD:
6042     case TARGET_F_SETFD:
6043     case TARGET_F_GETFL:
6044     case TARGET_F_SETFL:
6045         ret = cmd;
6046         break;
6047     case TARGET_F_GETLK:
6048         ret = F_GETLK64;
6049         break;
6050     case TARGET_F_SETLK:
6051         ret = F_SETLK64;
6052         break;
6053     case TARGET_F_SETLKW:
6054         ret = F_SETLKW64;
6055         break;
6056     case TARGET_F_GETOWN:
6057         ret = F_GETOWN;
6058         break;
6059     case TARGET_F_SETOWN:
6060         ret = F_SETOWN;
6061         break;
6062     case TARGET_F_GETSIG:
6063         ret = F_GETSIG;
6064         break;
6065     case TARGET_F_SETSIG:
6066         ret = F_SETSIG;
6067         break;
6068 #if TARGET_ABI_BITS == 32
6069     case TARGET_F_GETLK64:
6070         ret = F_GETLK64;
6071         break;
6072     case TARGET_F_SETLK64:
6073         ret = F_SETLK64;
6074         break;
6075     case TARGET_F_SETLKW64:
6076         ret = F_SETLKW64;
6077         break;
6078 #endif
6079     case TARGET_F_SETLEASE:
6080         ret = F_SETLEASE;
6081         break;
6082     case TARGET_F_GETLEASE:
6083         ret = F_GETLEASE;
6084         break;
6085 #ifdef F_DUPFD_CLOEXEC
6086     case TARGET_F_DUPFD_CLOEXEC:
6087         ret = F_DUPFD_CLOEXEC;
6088         break;
6089 #endif
6090     case TARGET_F_NOTIFY:
6091         ret = F_NOTIFY;
6092         break;
6093 #ifdef F_GETOWN_EX
6094     case TARGET_F_GETOWN_EX:
6095         ret = F_GETOWN_EX;
6096         break;
6097 #endif
6098 #ifdef F_SETOWN_EX
6099     case TARGET_F_SETOWN_EX:
6100         ret = F_SETOWN_EX;
6101         break;
6102 #endif
6103 #ifdef F_SETPIPE_SZ
6104     case TARGET_F_SETPIPE_SZ:
6105         ret = F_SETPIPE_SZ;
6106         break;
6107     case TARGET_F_GETPIPE_SZ:
6108         ret = F_GETPIPE_SZ;
6109         break;
6110 #endif
6111     default:
6112         ret = -TARGET_EINVAL;
6113         break;
6114     }
6115 
6116 #if defined(__powerpc64__)
6117     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6118      * is not supported by kernel. The glibc fcntl call actually adjusts
6119      * them to 5, 6 and 7 before making the syscall(). Since we make the
6120      * syscall directly, adjust to what is supported by the kernel.
6121      */
6122     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6123         ret -= F_GETLK64 - 5;
6124     }
6125 #endif
6126 
6127     return ret;
6128 }
6129 
6130 #define FLOCK_TRANSTBL \
6131     switch (type) { \
6132     TRANSTBL_CONVERT(F_RDLCK); \
6133     TRANSTBL_CONVERT(F_WRLCK); \
6134     TRANSTBL_CONVERT(F_UNLCK); \
6135     TRANSTBL_CONVERT(F_EXLCK); \
6136     TRANSTBL_CONVERT(F_SHLCK); \
6137     }
6138 
6139 static int target_to_host_flock(int type)
6140 {
6141 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6142     FLOCK_TRANSTBL
6143 #undef  TRANSTBL_CONVERT
6144     return -TARGET_EINVAL;
6145 }
6146 
6147 static int host_to_target_flock(int type)
6148 {
6149 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6150     FLOCK_TRANSTBL
6151 #undef  TRANSTBL_CONVERT
6152     /* if we don't know how to convert the value coming
6153      * from the host we copy to the target field as-is
6154      */
6155     return type;
6156 }
6157 
6158 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6159                                             abi_ulong target_flock_addr)
6160 {
6161     struct target_flock *target_fl;
6162     int l_type;
6163 
6164     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6165         return -TARGET_EFAULT;
6166     }
6167 
6168     __get_user(l_type, &target_fl->l_type);
6169     l_type = target_to_host_flock(l_type);
6170     if (l_type < 0) {
6171         return l_type;
6172     }
6173     fl->l_type = l_type;
6174     __get_user(fl->l_whence, &target_fl->l_whence);
6175     __get_user(fl->l_start, &target_fl->l_start);
6176     __get_user(fl->l_len, &target_fl->l_len);
6177     __get_user(fl->l_pid, &target_fl->l_pid);
6178     unlock_user_struct(target_fl, target_flock_addr, 0);
6179     return 0;
6180 }
6181 
6182 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6183                                           const struct flock64 *fl)
6184 {
6185     struct target_flock *target_fl;
6186     short l_type;
6187 
6188     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6189         return -TARGET_EFAULT;
6190     }
6191 
6192     l_type = host_to_target_flock(fl->l_type);
6193     __put_user(l_type, &target_fl->l_type);
6194     __put_user(fl->l_whence, &target_fl->l_whence);
6195     __put_user(fl->l_start, &target_fl->l_start);
6196     __put_user(fl->l_len, &target_fl->l_len);
6197     __put_user(fl->l_pid, &target_fl->l_pid);
6198     unlock_user_struct(target_fl, target_flock_addr, 1);
6199     return 0;
6200 }
6201 
6202 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6203 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6204 
6205 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6206 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6207                                                    abi_ulong target_flock_addr)
6208 {
6209     struct target_oabi_flock64 *target_fl;
6210     int l_type;
6211 
6212     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6213         return -TARGET_EFAULT;
6214     }
6215 
6216     __get_user(l_type, &target_fl->l_type);
6217     l_type = target_to_host_flock(l_type);
6218     if (l_type < 0) {
6219         return l_type;
6220     }
6221     fl->l_type = l_type;
6222     __get_user(fl->l_whence, &target_fl->l_whence);
6223     __get_user(fl->l_start, &target_fl->l_start);
6224     __get_user(fl->l_len, &target_fl->l_len);
6225     __get_user(fl->l_pid, &target_fl->l_pid);
6226     unlock_user_struct(target_fl, target_flock_addr, 0);
6227     return 0;
6228 }
6229 
6230 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6231                                                  const struct flock64 *fl)
6232 {
6233     struct target_oabi_flock64 *target_fl;
6234     short l_type;
6235 
6236     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6237         return -TARGET_EFAULT;
6238     }
6239 
6240     l_type = host_to_target_flock(fl->l_type);
6241     __put_user(l_type, &target_fl->l_type);
6242     __put_user(fl->l_whence, &target_fl->l_whence);
6243     __put_user(fl->l_start, &target_fl->l_start);
6244     __put_user(fl->l_len, &target_fl->l_len);
6245     __put_user(fl->l_pid, &target_fl->l_pid);
6246     unlock_user_struct(target_fl, target_flock_addr, 1);
6247     return 0;
6248 }
6249 #endif
6250 
6251 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6252                                               abi_ulong target_flock_addr)
6253 {
6254     struct target_flock64 *target_fl;
6255     int l_type;
6256 
6257     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6258         return -TARGET_EFAULT;
6259     }
6260 
6261     __get_user(l_type, &target_fl->l_type);
6262     l_type = target_to_host_flock(l_type);
6263     if (l_type < 0) {
6264         return l_type;
6265     }
6266     fl->l_type = l_type;
6267     __get_user(fl->l_whence, &target_fl->l_whence);
6268     __get_user(fl->l_start, &target_fl->l_start);
6269     __get_user(fl->l_len, &target_fl->l_len);
6270     __get_user(fl->l_pid, &target_fl->l_pid);
6271     unlock_user_struct(target_fl, target_flock_addr, 0);
6272     return 0;
6273 }
6274 
6275 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6276                                             const struct flock64 *fl)
6277 {
6278     struct target_flock64 *target_fl;
6279     short l_type;
6280 
6281     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6282         return -TARGET_EFAULT;
6283     }
6284 
6285     l_type = host_to_target_flock(fl->l_type);
6286     __put_user(l_type, &target_fl->l_type);
6287     __put_user(fl->l_whence, &target_fl->l_whence);
6288     __put_user(fl->l_start, &target_fl->l_start);
6289     __put_user(fl->l_len, &target_fl->l_len);
6290     __put_user(fl->l_pid, &target_fl->l_pid);
6291     unlock_user_struct(target_fl, target_flock_addr, 1);
6292     return 0;
6293 }
6294 
6295 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6296 {
6297     struct flock64 fl64;
6298 #ifdef F_GETOWN_EX
6299     struct f_owner_ex fox;
6300     struct target_f_owner_ex *target_fox;
6301 #endif
6302     abi_long ret;
6303     int host_cmd = target_to_host_fcntl_cmd(cmd);
6304 
6305     if (host_cmd == -TARGET_EINVAL)
6306 	    return host_cmd;
6307 
6308     switch(cmd) {
6309     case TARGET_F_GETLK:
6310         ret = copy_from_user_flock(&fl64, arg);
6311         if (ret) {
6312             return ret;
6313         }
6314         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6315         if (ret == 0) {
6316             ret = copy_to_user_flock(arg, &fl64);
6317         }
6318         break;
6319 
6320     case TARGET_F_SETLK:
6321     case TARGET_F_SETLKW:
6322         ret = copy_from_user_flock(&fl64, arg);
6323         if (ret) {
6324             return ret;
6325         }
6326         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6327         break;
6328 
6329     case TARGET_F_GETLK64:
6330         ret = copy_from_user_flock64(&fl64, arg);
6331         if (ret) {
6332             return ret;
6333         }
6334         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6335         if (ret == 0) {
6336             ret = copy_to_user_flock64(arg, &fl64);
6337         }
6338         break;
6339     case TARGET_F_SETLK64:
6340     case TARGET_F_SETLKW64:
6341         ret = copy_from_user_flock64(&fl64, arg);
6342         if (ret) {
6343             return ret;
6344         }
6345         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6346         break;
6347 
6348     case TARGET_F_GETFL:
6349         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6350         if (ret >= 0) {
6351             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6352         }
6353         break;
6354 
6355     case TARGET_F_SETFL:
6356         ret = get_errno(safe_fcntl(fd, host_cmd,
6357                                    target_to_host_bitmask(arg,
6358                                                           fcntl_flags_tbl)));
6359         break;
6360 
6361 #ifdef F_GETOWN_EX
6362     case TARGET_F_GETOWN_EX:
6363         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6364         if (ret >= 0) {
6365             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6366                 return -TARGET_EFAULT;
6367             target_fox->type = tswap32(fox.type);
6368             target_fox->pid = tswap32(fox.pid);
6369             unlock_user_struct(target_fox, arg, 1);
6370         }
6371         break;
6372 #endif
6373 
6374 #ifdef F_SETOWN_EX
6375     case TARGET_F_SETOWN_EX:
6376         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6377             return -TARGET_EFAULT;
6378         fox.type = tswap32(target_fox->type);
6379         fox.pid = tswap32(target_fox->pid);
6380         unlock_user_struct(target_fox, arg, 0);
6381         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6382         break;
6383 #endif
6384 
6385     case TARGET_F_SETOWN:
6386     case TARGET_F_GETOWN:
6387     case TARGET_F_SETSIG:
6388     case TARGET_F_GETSIG:
6389     case TARGET_F_SETLEASE:
6390     case TARGET_F_GETLEASE:
6391     case TARGET_F_SETPIPE_SZ:
6392     case TARGET_F_GETPIPE_SZ:
6393         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6394         break;
6395 
6396     default:
6397         ret = get_errno(safe_fcntl(fd, cmd, arg));
6398         break;
6399     }
6400     return ret;
6401 }
6402 
6403 #ifdef USE_UID16
6404 
6405 static inline int high2lowuid(int uid)
6406 {
6407     if (uid > 65535)
6408         return 65534;
6409     else
6410         return uid;
6411 }
6412 
6413 static inline int high2lowgid(int gid)
6414 {
6415     if (gid > 65535)
6416         return 65534;
6417     else
6418         return gid;
6419 }
6420 
6421 static inline int low2highuid(int uid)
6422 {
6423     if ((int16_t)uid == -1)
6424         return -1;
6425     else
6426         return uid;
6427 }
6428 
6429 static inline int low2highgid(int gid)
6430 {
6431     if ((int16_t)gid == -1)
6432         return -1;
6433     else
6434         return gid;
6435 }
6436 static inline int tswapid(int id)
6437 {
6438     return tswap16(id);
6439 }
6440 
6441 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6442 
6443 #else /* !USE_UID16 */
6444 static inline int high2lowuid(int uid)
6445 {
6446     return uid;
6447 }
6448 static inline int high2lowgid(int gid)
6449 {
6450     return gid;
6451 }
6452 static inline int low2highuid(int uid)
6453 {
6454     return uid;
6455 }
6456 static inline int low2highgid(int gid)
6457 {
6458     return gid;
6459 }
6460 static inline int tswapid(int id)
6461 {
6462     return tswap32(id);
6463 }
6464 
6465 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6466 
6467 #endif /* USE_UID16 */
6468 
6469 /* We must do direct syscalls for setting UID/GID, because we want to
6470  * implement the Linux system call semantics of "change only for this thread",
6471  * not the libc/POSIX semantics of "change for all threads in process".
6472  * (See http://ewontfix.com/17/ for more details.)
6473  * We use the 32-bit version of the syscalls if present; if it is not
6474  * then either the host architecture supports 32-bit UIDs natively with
6475  * the standard syscall, or the 16-bit UID is the best we can do.
6476  */
6477 #ifdef __NR_setuid32
6478 #define __NR_sys_setuid __NR_setuid32
6479 #else
6480 #define __NR_sys_setuid __NR_setuid
6481 #endif
6482 #ifdef __NR_setgid32
6483 #define __NR_sys_setgid __NR_setgid32
6484 #else
6485 #define __NR_sys_setgid __NR_setgid
6486 #endif
6487 #ifdef __NR_setresuid32
6488 #define __NR_sys_setresuid __NR_setresuid32
6489 #else
6490 #define __NR_sys_setresuid __NR_setresuid
6491 #endif
6492 #ifdef __NR_setresgid32
6493 #define __NR_sys_setresgid __NR_setresgid32
6494 #else
6495 #define __NR_sys_setresgid __NR_setresgid
6496 #endif
6497 
6498 _syscall1(int, sys_setuid, uid_t, uid)
6499 _syscall1(int, sys_setgid, gid_t, gid)
6500 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6501 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6502 
6503 void syscall_init(void)
6504 {
6505     IOCTLEntry *ie;
6506     const argtype *arg_type;
6507     int size;
6508     int i;
6509 
6510     thunk_init(STRUCT_MAX);
6511 
6512 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6513 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6514 #include "syscall_types.h"
6515 #undef STRUCT
6516 #undef STRUCT_SPECIAL
6517 
6518     /* Build target_to_host_errno_table[] table from
6519      * host_to_target_errno_table[]. */
6520     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6521         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6522     }
6523 
6524     /* we patch the ioctl size if necessary. We rely on the fact that
6525        no ioctl has all the bits at '1' in the size field */
6526     ie = ioctl_entries;
6527     while (ie->target_cmd != 0) {
6528         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6529             TARGET_IOC_SIZEMASK) {
6530             arg_type = ie->arg_type;
6531             if (arg_type[0] != TYPE_PTR) {
6532                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6533                         ie->target_cmd);
6534                 exit(1);
6535             }
6536             arg_type++;
6537             size = thunk_type_size(arg_type, 0);
6538             ie->target_cmd = (ie->target_cmd &
6539                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6540                 (size << TARGET_IOC_SIZESHIFT);
6541         }
6542 
6543         /* automatic consistency check if same arch */
6544 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6545     (defined(__x86_64__) && defined(TARGET_X86_64))
6546         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6547             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6548                     ie->name, ie->target_cmd, ie->host_cmd);
6549         }
6550 #endif
6551         ie++;
6552     }
6553 }
6554 
6555 #if TARGET_ABI_BITS == 32
6556 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6557 {
6558 #ifdef TARGET_WORDS_BIGENDIAN
6559     return ((uint64_t)word0 << 32) | word1;
6560 #else
6561     return ((uint64_t)word1 << 32) | word0;
6562 #endif
6563 }
6564 #else /* TARGET_ABI_BITS == 32 */
6565 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6566 {
6567     return word0;
6568 }
6569 #endif /* TARGET_ABI_BITS != 32 */
6570 
6571 #ifdef TARGET_NR_truncate64
6572 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6573                                          abi_long arg2,
6574                                          abi_long arg3,
6575                                          abi_long arg4)
6576 {
6577     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6578         arg2 = arg3;
6579         arg3 = arg4;
6580     }
6581     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6582 }
6583 #endif
6584 
6585 #ifdef TARGET_NR_ftruncate64
6586 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6587                                           abi_long arg2,
6588                                           abi_long arg3,
6589                                           abi_long arg4)
6590 {
6591     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6592         arg2 = arg3;
6593         arg3 = arg4;
6594     }
6595     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6596 }
6597 #endif
6598 
6599 #if defined(TARGET_NR_timer_settime) || \
6600     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6601 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6602                                                  abi_ulong target_addr)
6603 {
6604     struct target_itimerspec *target_itspec;
6605 
6606     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6607         return -TARGET_EFAULT;
6608     }
6609 
6610     host_itspec->it_interval.tv_sec =
6611                             tswapal(target_itspec->it_interval.tv_sec);
6612     host_itspec->it_interval.tv_nsec =
6613                             tswapal(target_itspec->it_interval.tv_nsec);
6614     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6615     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6616 
6617     unlock_user_struct(target_itspec, target_addr, 1);
6618     return 0;
6619 }
6620 #endif
6621 
6622 #if ((defined(TARGET_NR_timerfd_gettime) || \
6623       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6624     defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6625 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6626                                                struct itimerspec *host_its)
6627 {
6628     struct target_itimerspec *target_itspec;
6629 
6630     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6631         return -TARGET_EFAULT;
6632     }
6633 
6634     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6635     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6636 
6637     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6638     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6639 
6640     unlock_user_struct(target_itspec, target_addr, 0);
6641     return 0;
6642 }
6643 #endif
6644 
6645 #if defined(TARGET_NR_adjtimex) || \
6646     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6647 static inline abi_long target_to_host_timex(struct timex *host_tx,
6648                                             abi_long target_addr)
6649 {
6650     struct target_timex *target_tx;
6651 
6652     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6653         return -TARGET_EFAULT;
6654     }
6655 
6656     __get_user(host_tx->modes, &target_tx->modes);
6657     __get_user(host_tx->offset, &target_tx->offset);
6658     __get_user(host_tx->freq, &target_tx->freq);
6659     __get_user(host_tx->maxerror, &target_tx->maxerror);
6660     __get_user(host_tx->esterror, &target_tx->esterror);
6661     __get_user(host_tx->status, &target_tx->status);
6662     __get_user(host_tx->constant, &target_tx->constant);
6663     __get_user(host_tx->precision, &target_tx->precision);
6664     __get_user(host_tx->tolerance, &target_tx->tolerance);
6665     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6666     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6667     __get_user(host_tx->tick, &target_tx->tick);
6668     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6669     __get_user(host_tx->jitter, &target_tx->jitter);
6670     __get_user(host_tx->shift, &target_tx->shift);
6671     __get_user(host_tx->stabil, &target_tx->stabil);
6672     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6673     __get_user(host_tx->calcnt, &target_tx->calcnt);
6674     __get_user(host_tx->errcnt, &target_tx->errcnt);
6675     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6676     __get_user(host_tx->tai, &target_tx->tai);
6677 
6678     unlock_user_struct(target_tx, target_addr, 0);
6679     return 0;
6680 }
6681 
6682 static inline abi_long host_to_target_timex(abi_long target_addr,
6683                                             struct timex *host_tx)
6684 {
6685     struct target_timex *target_tx;
6686 
6687     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6688         return -TARGET_EFAULT;
6689     }
6690 
6691     __put_user(host_tx->modes, &target_tx->modes);
6692     __put_user(host_tx->offset, &target_tx->offset);
6693     __put_user(host_tx->freq, &target_tx->freq);
6694     __put_user(host_tx->maxerror, &target_tx->maxerror);
6695     __put_user(host_tx->esterror, &target_tx->esterror);
6696     __put_user(host_tx->status, &target_tx->status);
6697     __put_user(host_tx->constant, &target_tx->constant);
6698     __put_user(host_tx->precision, &target_tx->precision);
6699     __put_user(host_tx->tolerance, &target_tx->tolerance);
6700     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6701     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6702     __put_user(host_tx->tick, &target_tx->tick);
6703     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6704     __put_user(host_tx->jitter, &target_tx->jitter);
6705     __put_user(host_tx->shift, &target_tx->shift);
6706     __put_user(host_tx->stabil, &target_tx->stabil);
6707     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6708     __put_user(host_tx->calcnt, &target_tx->calcnt);
6709     __put_user(host_tx->errcnt, &target_tx->errcnt);
6710     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6711     __put_user(host_tx->tai, &target_tx->tai);
6712 
6713     unlock_user_struct(target_tx, target_addr, 1);
6714     return 0;
6715 }
6716 #endif
6717 
6718 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6719                                                abi_ulong target_addr)
6720 {
6721     struct target_sigevent *target_sevp;
6722 
6723     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6724         return -TARGET_EFAULT;
6725     }
6726 
6727     /* This union is awkward on 64 bit systems because it has a 32 bit
6728      * integer and a pointer in it; we follow the conversion approach
6729      * used for handling sigval types in signal.c so the guest should get
6730      * the correct value back even if we did a 64 bit byteswap and it's
6731      * using the 32 bit integer.
6732      */
6733     host_sevp->sigev_value.sival_ptr =
6734         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6735     host_sevp->sigev_signo =
6736         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6737     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6738     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6739 
6740     unlock_user_struct(target_sevp, target_addr, 1);
6741     return 0;
6742 }
6743 
6744 #if defined(TARGET_NR_mlockall)
6745 static inline int target_to_host_mlockall_arg(int arg)
6746 {
6747     int result = 0;
6748 
6749     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6750         result |= MCL_CURRENT;
6751     }
6752     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6753         result |= MCL_FUTURE;
6754     }
6755     return result;
6756 }
6757 #endif
6758 
6759 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6760      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6761      defined(TARGET_NR_newfstatat))
6762 static inline abi_long host_to_target_stat64(void *cpu_env,
6763                                              abi_ulong target_addr,
6764                                              struct stat *host_st)
6765 {
6766 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6767     if (((CPUARMState *)cpu_env)->eabi) {
6768         struct target_eabi_stat64 *target_st;
6769 
6770         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6771             return -TARGET_EFAULT;
6772         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6773         __put_user(host_st->st_dev, &target_st->st_dev);
6774         __put_user(host_st->st_ino, &target_st->st_ino);
6775 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6776         __put_user(host_st->st_ino, &target_st->__st_ino);
6777 #endif
6778         __put_user(host_st->st_mode, &target_st->st_mode);
6779         __put_user(host_st->st_nlink, &target_st->st_nlink);
6780         __put_user(host_st->st_uid, &target_st->st_uid);
6781         __put_user(host_st->st_gid, &target_st->st_gid);
6782         __put_user(host_st->st_rdev, &target_st->st_rdev);
6783         __put_user(host_st->st_size, &target_st->st_size);
6784         __put_user(host_st->st_blksize, &target_st->st_blksize);
6785         __put_user(host_st->st_blocks, &target_st->st_blocks);
6786         __put_user(host_st->st_atime, &target_st->target_st_atime);
6787         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6788         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6789 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6790         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6791         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6792         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6793 #endif
6794         unlock_user_struct(target_st, target_addr, 1);
6795     } else
6796 #endif
6797     {
6798 #if defined(TARGET_HAS_STRUCT_STAT64)
6799         struct target_stat64 *target_st;
6800 #else
6801         struct target_stat *target_st;
6802 #endif
6803 
6804         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6805             return -TARGET_EFAULT;
6806         memset(target_st, 0, sizeof(*target_st));
6807         __put_user(host_st->st_dev, &target_st->st_dev);
6808         __put_user(host_st->st_ino, &target_st->st_ino);
6809 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6810         __put_user(host_st->st_ino, &target_st->__st_ino);
6811 #endif
6812         __put_user(host_st->st_mode, &target_st->st_mode);
6813         __put_user(host_st->st_nlink, &target_st->st_nlink);
6814         __put_user(host_st->st_uid, &target_st->st_uid);
6815         __put_user(host_st->st_gid, &target_st->st_gid);
6816         __put_user(host_st->st_rdev, &target_st->st_rdev);
6817         /* XXX: better use of kernel struct */
6818         __put_user(host_st->st_size, &target_st->st_size);
6819         __put_user(host_st->st_blksize, &target_st->st_blksize);
6820         __put_user(host_st->st_blocks, &target_st->st_blocks);
6821         __put_user(host_st->st_atime, &target_st->target_st_atime);
6822         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6823         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6824 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6825         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6826         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6827         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6828 #endif
6829         unlock_user_struct(target_st, target_addr, 1);
6830     }
6831 
6832     return 0;
6833 }
6834 #endif
6835 
6836 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6837 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6838                                             abi_ulong target_addr)
6839 {
6840     struct target_statx *target_stx;
6841 
6842     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6843         return -TARGET_EFAULT;
6844     }
6845     memset(target_stx, 0, sizeof(*target_stx));
6846 
6847     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6848     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6849     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6850     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6851     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6852     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6853     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6854     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6855     __put_user(host_stx->stx_size, &target_stx->stx_size);
6856     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6857     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6858     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6859     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6860     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
6861     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
6862     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
6863     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
6864     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
6865     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
6866     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6867     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6868     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6869     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6870 
6871     unlock_user_struct(target_stx, target_addr, 1);
6872 
6873     return 0;
6874 }
6875 #endif
6876 
6877 
6878 /* ??? Using host futex calls even when target atomic operations
6879    are not really atomic probably breaks things.  However implementing
6880    futexes locally would make futexes shared between multiple processes
6881    tricky.  However they're probably useless because guest atomic
6882    operations won't work either.  */
6883 #if defined(TARGET_NR_futex)
6884 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6885                     target_ulong uaddr2, int val3)
6886 {
6887     struct timespec ts, *pts;
6888     int base_op;
6889 
6890     /* ??? We assume FUTEX_* constants are the same on both host
6891        and target.  */
6892 #ifdef FUTEX_CMD_MASK
6893     base_op = op & FUTEX_CMD_MASK;
6894 #else
6895     base_op = op;
6896 #endif
6897     switch (base_op) {
6898     case FUTEX_WAIT:
6899     case FUTEX_WAIT_BITSET:
6900         if (timeout) {
6901             pts = &ts;
6902             target_to_host_timespec(pts, timeout);
6903         } else {
6904             pts = NULL;
6905         }
6906         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6907                          pts, NULL, val3));
6908     case FUTEX_WAKE:
6909         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6910     case FUTEX_FD:
6911         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6912     case FUTEX_REQUEUE:
6913     case FUTEX_CMP_REQUEUE:
6914     case FUTEX_WAKE_OP:
6915         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6916            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6917            But the prototype takes a `struct timespec *'; insert casts
6918            to satisfy the compiler.  We do not need to tswap TIMEOUT
6919            since it's not compared to guest memory.  */
6920         pts = (struct timespec *)(uintptr_t) timeout;
6921         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6922                                     g2h(uaddr2),
6923                                     (base_op == FUTEX_CMP_REQUEUE
6924                                      ? tswap32(val3)
6925                                      : val3)));
6926     default:
6927         return -TARGET_ENOSYS;
6928     }
6929 }
6930 #endif
6931 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6932 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6933                                      abi_long handle, abi_long mount_id,
6934                                      abi_long flags)
6935 {
6936     struct file_handle *target_fh;
6937     struct file_handle *fh;
6938     int mid = 0;
6939     abi_long ret;
6940     char *name;
6941     unsigned int size, total_size;
6942 
6943     if (get_user_s32(size, handle)) {
6944         return -TARGET_EFAULT;
6945     }
6946 
6947     name = lock_user_string(pathname);
6948     if (!name) {
6949         return -TARGET_EFAULT;
6950     }
6951 
6952     total_size = sizeof(struct file_handle) + size;
6953     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6954     if (!target_fh) {
6955         unlock_user(name, pathname, 0);
6956         return -TARGET_EFAULT;
6957     }
6958 
6959     fh = g_malloc0(total_size);
6960     fh->handle_bytes = size;
6961 
6962     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6963     unlock_user(name, pathname, 0);
6964 
6965     /* man name_to_handle_at(2):
6966      * Other than the use of the handle_bytes field, the caller should treat
6967      * the file_handle structure as an opaque data type
6968      */
6969 
6970     memcpy(target_fh, fh, total_size);
6971     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6972     target_fh->handle_type = tswap32(fh->handle_type);
6973     g_free(fh);
6974     unlock_user(target_fh, handle, total_size);
6975 
6976     if (put_user_s32(mid, mount_id)) {
6977         return -TARGET_EFAULT;
6978     }
6979 
6980     return ret;
6981 
6982 }
6983 #endif
6984 
6985 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6986 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6987                                      abi_long flags)
6988 {
6989     struct file_handle *target_fh;
6990     struct file_handle *fh;
6991     unsigned int size, total_size;
6992     abi_long ret;
6993 
6994     if (get_user_s32(size, handle)) {
6995         return -TARGET_EFAULT;
6996     }
6997 
6998     total_size = sizeof(struct file_handle) + size;
6999     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7000     if (!target_fh) {
7001         return -TARGET_EFAULT;
7002     }
7003 
7004     fh = g_memdup(target_fh, total_size);
7005     fh->handle_bytes = size;
7006     fh->handle_type = tswap32(target_fh->handle_type);
7007 
7008     ret = get_errno(open_by_handle_at(mount_fd, fh,
7009                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7010 
7011     g_free(fh);
7012 
7013     unlock_user(target_fh, handle, total_size);
7014 
7015     return ret;
7016 }
7017 #endif
7018 
7019 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7020 
7021 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7022 {
7023     int host_flags;
7024     target_sigset_t *target_mask;
7025     sigset_t host_mask;
7026     abi_long ret;
7027 
7028     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7029         return -TARGET_EINVAL;
7030     }
7031     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7032         return -TARGET_EFAULT;
7033     }
7034 
7035     target_to_host_sigset(&host_mask, target_mask);
7036 
7037     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7038 
7039     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7040     if (ret >= 0) {
7041         fd_trans_register(ret, &target_signalfd_trans);
7042     }
7043 
7044     unlock_user_struct(target_mask, mask, 0);
7045 
7046     return ret;
7047 }
7048 #endif
7049 
7050 /* Map host to target signal numbers for the wait family of syscalls.
7051    Assume all other status bits are the same.  */
7052 int host_to_target_waitstatus(int status)
7053 {
7054     if (WIFSIGNALED(status)) {
7055         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7056     }
7057     if (WIFSTOPPED(status)) {
7058         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7059                | (status & 0xff);
7060     }
7061     return status;
7062 }
7063 
7064 static int open_self_cmdline(void *cpu_env, int fd)
7065 {
7066     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7067     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7068     int i;
7069 
7070     for (i = 0; i < bprm->argc; i++) {
7071         size_t len = strlen(bprm->argv[i]) + 1;
7072 
7073         if (write(fd, bprm->argv[i], len) != len) {
7074             return -1;
7075         }
7076     }
7077 
7078     return 0;
7079 }
7080 
7081 static int open_self_maps(void *cpu_env, int fd)
7082 {
7083     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7084     TaskState *ts = cpu->opaque;
7085     FILE *fp;
7086     char *line = NULL;
7087     size_t len = 0;
7088     ssize_t read;
7089 
7090     fp = fopen("/proc/self/maps", "r");
7091     if (fp == NULL) {
7092         return -1;
7093     }
7094 
7095     while ((read = getline(&line, &len, fp)) != -1) {
7096         int fields, dev_maj, dev_min, inode;
7097         uint64_t min, max, offset;
7098         char flag_r, flag_w, flag_x, flag_p;
7099         char path[512] = "";
7100         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7101                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7102                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7103 
7104         if ((fields < 10) || (fields > 11)) {
7105             continue;
7106         }
7107         if (h2g_valid(min)) {
7108             int flags = page_get_flags(h2g(min));
7109             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7110             if (page_check_range(h2g(min), max - min, flags) == -1) {
7111                 continue;
7112             }
7113             if (h2g(min) == ts->info->stack_limit) {
7114                 pstrcpy(path, sizeof(path), "      [stack]");
7115             }
7116             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7117                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7118                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7119                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7120                     path[0] ? "         " : "", path);
7121         }
7122     }
7123 
7124     free(line);
7125     fclose(fp);
7126 
7127     return 0;
7128 }
7129 
7130 static int open_self_stat(void *cpu_env, int fd)
7131 {
7132     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7133     TaskState *ts = cpu->opaque;
7134     abi_ulong start_stack = ts->info->start_stack;
7135     int i;
7136 
7137     for (i = 0; i < 44; i++) {
7138       char buf[128];
7139       int len;
7140       uint64_t val = 0;
7141 
7142       if (i == 0) {
7143         /* pid */
7144         val = getpid();
7145         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7146       } else if (i == 1) {
7147         /* app name */
7148         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7149       } else if (i == 27) {
7150         /* stack bottom */
7151         val = start_stack;
7152         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7153       } else {
7154         /* for the rest, there is MasterCard */
7155         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7156       }
7157 
7158       len = strlen(buf);
7159       if (write(fd, buf, len) != len) {
7160           return -1;
7161       }
7162     }
7163 
7164     return 0;
7165 }
7166 
7167 static int open_self_auxv(void *cpu_env, int fd)
7168 {
7169     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7170     TaskState *ts = cpu->opaque;
7171     abi_ulong auxv = ts->info->saved_auxv;
7172     abi_ulong len = ts->info->auxv_len;
7173     char *ptr;
7174 
7175     /*
7176      * Auxiliary vector is stored in target process stack.
7177      * read in whole auxv vector and copy it to file
7178      */
7179     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7180     if (ptr != NULL) {
7181         while (len > 0) {
7182             ssize_t r;
7183             r = write(fd, ptr, len);
7184             if (r <= 0) {
7185                 break;
7186             }
7187             len -= r;
7188             ptr += r;
7189         }
7190         lseek(fd, 0, SEEK_SET);
7191         unlock_user(ptr, auxv, len);
7192     }
7193 
7194     return 0;
7195 }
7196 
7197 static int is_proc_myself(const char *filename, const char *entry)
7198 {
7199     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7200         filename += strlen("/proc/");
7201         if (!strncmp(filename, "self/", strlen("self/"))) {
7202             filename += strlen("self/");
7203         } else if (*filename >= '1' && *filename <= '9') {
7204             char myself[80];
7205             snprintf(myself, sizeof(myself), "%d/", getpid());
7206             if (!strncmp(filename, myself, strlen(myself))) {
7207                 filename += strlen(myself);
7208             } else {
7209                 return 0;
7210             }
7211         } else {
7212             return 0;
7213         }
7214         if (!strcmp(filename, entry)) {
7215             return 1;
7216         }
7217     }
7218     return 0;
7219 }
7220 
7221 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7222     defined(TARGET_SPARC) || defined(TARGET_M68K)
7223 static int is_proc(const char *filename, const char *entry)
7224 {
7225     return strcmp(filename, entry) == 0;
7226 }
7227 #endif
7228 
7229 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7230 static int open_net_route(void *cpu_env, int fd)
7231 {
7232     FILE *fp;
7233     char *line = NULL;
7234     size_t len = 0;
7235     ssize_t read;
7236 
7237     fp = fopen("/proc/net/route", "r");
7238     if (fp == NULL) {
7239         return -1;
7240     }
7241 
7242     /* read header */
7243 
7244     read = getline(&line, &len, fp);
7245     dprintf(fd, "%s", line);
7246 
7247     /* read routes */
7248 
7249     while ((read = getline(&line, &len, fp)) != -1) {
7250         char iface[16];
7251         uint32_t dest, gw, mask;
7252         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7253         int fields;
7254 
7255         fields = sscanf(line,
7256                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7257                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7258                         &mask, &mtu, &window, &irtt);
7259         if (fields != 11) {
7260             continue;
7261         }
7262         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7263                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7264                 metric, tswap32(mask), mtu, window, irtt);
7265     }
7266 
7267     free(line);
7268     fclose(fp);
7269 
7270     return 0;
7271 }
7272 #endif
7273 
7274 #if defined(TARGET_SPARC)
7275 static int open_cpuinfo(void *cpu_env, int fd)
7276 {
7277     dprintf(fd, "type\t\t: sun4u\n");
7278     return 0;
7279 }
7280 #endif
7281 
7282 #if defined(TARGET_M68K)
7283 static int open_hardware(void *cpu_env, int fd)
7284 {
7285     dprintf(fd, "Model:\t\tqemu-m68k\n");
7286     return 0;
7287 }
7288 #endif
7289 
7290 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7291 {
7292     struct fake_open {
7293         const char *filename;
7294         int (*fill)(void *cpu_env, int fd);
7295         int (*cmp)(const char *s1, const char *s2);
7296     };
7297     const struct fake_open *fake_open;
7298     static const struct fake_open fakes[] = {
7299         { "maps", open_self_maps, is_proc_myself },
7300         { "stat", open_self_stat, is_proc_myself },
7301         { "auxv", open_self_auxv, is_proc_myself },
7302         { "cmdline", open_self_cmdline, is_proc_myself },
7303 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7304         { "/proc/net/route", open_net_route, is_proc },
7305 #endif
7306 #if defined(TARGET_SPARC)
7307         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7308 #endif
7309 #if defined(TARGET_M68K)
7310         { "/proc/hardware", open_hardware, is_proc },
7311 #endif
7312         { NULL, NULL, NULL }
7313     };
7314 
7315     if (is_proc_myself(pathname, "exe")) {
7316         int execfd = qemu_getauxval(AT_EXECFD);
7317         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7318     }
7319 
7320     for (fake_open = fakes; fake_open->filename; fake_open++) {
7321         if (fake_open->cmp(pathname, fake_open->filename)) {
7322             break;
7323         }
7324     }
7325 
7326     if (fake_open->filename) {
7327         const char *tmpdir;
7328         char filename[PATH_MAX];
7329         int fd, r;
7330 
7331         /* create temporary file to map stat to */
7332         tmpdir = getenv("TMPDIR");
7333         if (!tmpdir)
7334             tmpdir = "/tmp";
7335         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7336         fd = mkstemp(filename);
7337         if (fd < 0) {
7338             return fd;
7339         }
7340         unlink(filename);
7341 
7342         if ((r = fake_open->fill(cpu_env, fd))) {
7343             int e = errno;
7344             close(fd);
7345             errno = e;
7346             return r;
7347         }
7348         lseek(fd, 0, SEEK_SET);
7349 
7350         return fd;
7351     }
7352 
7353     return safe_openat(dirfd, path(pathname), flags, mode);
7354 }
7355 
7356 #define TIMER_MAGIC 0x0caf0000
7357 #define TIMER_MAGIC_MASK 0xffff0000
7358 
7359 /* Convert QEMU provided timer ID back to internal 16bit index format */
7360 static target_timer_t get_timer_id(abi_long arg)
7361 {
7362     target_timer_t timerid = arg;
7363 
7364     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7365         return -TARGET_EINVAL;
7366     }
7367 
7368     timerid &= 0xffff;
7369 
7370     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7371         return -TARGET_EINVAL;
7372     }
7373 
7374     return timerid;
7375 }
7376 
7377 static int target_to_host_cpu_mask(unsigned long *host_mask,
7378                                    size_t host_size,
7379                                    abi_ulong target_addr,
7380                                    size_t target_size)
7381 {
7382     unsigned target_bits = sizeof(abi_ulong) * 8;
7383     unsigned host_bits = sizeof(*host_mask) * 8;
7384     abi_ulong *target_mask;
7385     unsigned i, j;
7386 
7387     assert(host_size >= target_size);
7388 
7389     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7390     if (!target_mask) {
7391         return -TARGET_EFAULT;
7392     }
7393     memset(host_mask, 0, host_size);
7394 
7395     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7396         unsigned bit = i * target_bits;
7397         abi_ulong val;
7398 
7399         __get_user(val, &target_mask[i]);
7400         for (j = 0; j < target_bits; j++, bit++) {
7401             if (val & (1UL << j)) {
7402                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7403             }
7404         }
7405     }
7406 
7407     unlock_user(target_mask, target_addr, 0);
7408     return 0;
7409 }
7410 
7411 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7412                                    size_t host_size,
7413                                    abi_ulong target_addr,
7414                                    size_t target_size)
7415 {
7416     unsigned target_bits = sizeof(abi_ulong) * 8;
7417     unsigned host_bits = sizeof(*host_mask) * 8;
7418     abi_ulong *target_mask;
7419     unsigned i, j;
7420 
7421     assert(host_size >= target_size);
7422 
7423     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7424     if (!target_mask) {
7425         return -TARGET_EFAULT;
7426     }
7427 
7428     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7429         unsigned bit = i * target_bits;
7430         abi_ulong val = 0;
7431 
7432         for (j = 0; j < target_bits; j++, bit++) {
7433             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7434                 val |= 1UL << j;
7435             }
7436         }
7437         __put_user(val, &target_mask[i]);
7438     }
7439 
7440     unlock_user(target_mask, target_addr, target_size);
7441     return 0;
7442 }
7443 
7444 /* This is an internal helper for do_syscall so that it is easier
7445  * to have a single return point, so that actions, such as logging
7446  * of syscall results, can be performed.
7447  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7448  */
7449 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7450                             abi_long arg2, abi_long arg3, abi_long arg4,
7451                             abi_long arg5, abi_long arg6, abi_long arg7,
7452                             abi_long arg8)
7453 {
7454     CPUState *cpu = env_cpu(cpu_env);
7455     abi_long ret;
7456 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7457     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7458     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7459     || defined(TARGET_NR_statx)
7460     struct stat st;
7461 #endif
7462 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7463     || defined(TARGET_NR_fstatfs)
7464     struct statfs stfs;
7465 #endif
7466     void *p;
7467 
7468     switch(num) {
7469     case TARGET_NR_exit:
7470         /* In old applications this may be used to implement _exit(2).
7471            However in threaded applictions it is used for thread termination,
7472            and _exit_group is used for application termination.
7473            Do thread termination if we have more then one thread.  */
7474 
7475         if (block_signals()) {
7476             return -TARGET_ERESTARTSYS;
7477         }
7478 
7479         cpu_list_lock();
7480 
7481         if (CPU_NEXT(first_cpu)) {
7482             TaskState *ts;
7483 
7484             /* Remove the CPU from the list.  */
7485             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7486 
7487             cpu_list_unlock();
7488 
7489             ts = cpu->opaque;
7490             if (ts->child_tidptr) {
7491                 put_user_u32(0, ts->child_tidptr);
7492                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7493                           NULL, NULL, 0);
7494             }
7495             thread_cpu = NULL;
7496             object_unref(OBJECT(cpu));
7497             g_free(ts);
7498             rcu_unregister_thread();
7499             pthread_exit(NULL);
7500         }
7501 
7502         cpu_list_unlock();
7503         preexit_cleanup(cpu_env, arg1);
7504         _exit(arg1);
7505         return 0; /* avoid warning */
7506     case TARGET_NR_read:
7507         if (arg2 == 0 && arg3 == 0) {
7508             return get_errno(safe_read(arg1, 0, 0));
7509         } else {
7510             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7511                 return -TARGET_EFAULT;
7512             ret = get_errno(safe_read(arg1, p, arg3));
7513             if (ret >= 0 &&
7514                 fd_trans_host_to_target_data(arg1)) {
7515                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7516             }
7517             unlock_user(p, arg2, ret);
7518         }
7519         return ret;
7520     case TARGET_NR_write:
7521         if (arg2 == 0 && arg3 == 0) {
7522             return get_errno(safe_write(arg1, 0, 0));
7523         }
7524         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7525             return -TARGET_EFAULT;
7526         if (fd_trans_target_to_host_data(arg1)) {
7527             void *copy = g_malloc(arg3);
7528             memcpy(copy, p, arg3);
7529             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7530             if (ret >= 0) {
7531                 ret = get_errno(safe_write(arg1, copy, ret));
7532             }
7533             g_free(copy);
7534         } else {
7535             ret = get_errno(safe_write(arg1, p, arg3));
7536         }
7537         unlock_user(p, arg2, 0);
7538         return ret;
7539 
7540 #ifdef TARGET_NR_open
7541     case TARGET_NR_open:
7542         if (!(p = lock_user_string(arg1)))
7543             return -TARGET_EFAULT;
7544         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7545                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7546                                   arg3));
7547         fd_trans_unregister(ret);
7548         unlock_user(p, arg1, 0);
7549         return ret;
7550 #endif
7551     case TARGET_NR_openat:
7552         if (!(p = lock_user_string(arg2)))
7553             return -TARGET_EFAULT;
7554         ret = get_errno(do_openat(cpu_env, arg1, p,
7555                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7556                                   arg4));
7557         fd_trans_unregister(ret);
7558         unlock_user(p, arg2, 0);
7559         return ret;
7560 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7561     case TARGET_NR_name_to_handle_at:
7562         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7563         return ret;
7564 #endif
7565 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7566     case TARGET_NR_open_by_handle_at:
7567         ret = do_open_by_handle_at(arg1, arg2, arg3);
7568         fd_trans_unregister(ret);
7569         return ret;
7570 #endif
7571     case TARGET_NR_close:
7572         fd_trans_unregister(arg1);
7573         return get_errno(close(arg1));
7574 
7575     case TARGET_NR_brk:
7576         return do_brk(arg1);
7577 #ifdef TARGET_NR_fork
7578     case TARGET_NR_fork:
7579         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7580 #endif
7581 #ifdef TARGET_NR_waitpid
7582     case TARGET_NR_waitpid:
7583         {
7584             int status;
7585             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7586             if (!is_error(ret) && arg2 && ret
7587                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7588                 return -TARGET_EFAULT;
7589         }
7590         return ret;
7591 #endif
7592 #ifdef TARGET_NR_waitid
7593     case TARGET_NR_waitid:
7594         {
7595             siginfo_t info;
7596             info.si_pid = 0;
7597             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7598             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7599                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7600                     return -TARGET_EFAULT;
7601                 host_to_target_siginfo(p, &info);
7602                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7603             }
7604         }
7605         return ret;
7606 #endif
7607 #ifdef TARGET_NR_creat /* not on alpha */
7608     case TARGET_NR_creat:
7609         if (!(p = lock_user_string(arg1)))
7610             return -TARGET_EFAULT;
7611         ret = get_errno(creat(p, arg2));
7612         fd_trans_unregister(ret);
7613         unlock_user(p, arg1, 0);
7614         return ret;
7615 #endif
7616 #ifdef TARGET_NR_link
7617     case TARGET_NR_link:
7618         {
7619             void * p2;
7620             p = lock_user_string(arg1);
7621             p2 = lock_user_string(arg2);
7622             if (!p || !p2)
7623                 ret = -TARGET_EFAULT;
7624             else
7625                 ret = get_errno(link(p, p2));
7626             unlock_user(p2, arg2, 0);
7627             unlock_user(p, arg1, 0);
7628         }
7629         return ret;
7630 #endif
7631 #if defined(TARGET_NR_linkat)
7632     case TARGET_NR_linkat:
7633         {
7634             void * p2 = NULL;
7635             if (!arg2 || !arg4)
7636                 return -TARGET_EFAULT;
7637             p  = lock_user_string(arg2);
7638             p2 = lock_user_string(arg4);
7639             if (!p || !p2)
7640                 ret = -TARGET_EFAULT;
7641             else
7642                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7643             unlock_user(p, arg2, 0);
7644             unlock_user(p2, arg4, 0);
7645         }
7646         return ret;
7647 #endif
7648 #ifdef TARGET_NR_unlink
7649     case TARGET_NR_unlink:
7650         if (!(p = lock_user_string(arg1)))
7651             return -TARGET_EFAULT;
7652         ret = get_errno(unlink(p));
7653         unlock_user(p, arg1, 0);
7654         return ret;
7655 #endif
7656 #if defined(TARGET_NR_unlinkat)
7657     case TARGET_NR_unlinkat:
7658         if (!(p = lock_user_string(arg2)))
7659             return -TARGET_EFAULT;
7660         ret = get_errno(unlinkat(arg1, p, arg3));
7661         unlock_user(p, arg2, 0);
7662         return ret;
7663 #endif
7664     case TARGET_NR_execve:
7665         {
7666             char **argp, **envp;
7667             int argc, envc;
7668             abi_ulong gp;
7669             abi_ulong guest_argp;
7670             abi_ulong guest_envp;
7671             abi_ulong addr;
7672             char **q;
7673             int total_size = 0;
7674 
7675             argc = 0;
7676             guest_argp = arg2;
7677             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7678                 if (get_user_ual(addr, gp))
7679                     return -TARGET_EFAULT;
7680                 if (!addr)
7681                     break;
7682                 argc++;
7683             }
7684             envc = 0;
7685             guest_envp = arg3;
7686             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7687                 if (get_user_ual(addr, gp))
7688                     return -TARGET_EFAULT;
7689                 if (!addr)
7690                     break;
7691                 envc++;
7692             }
7693 
7694             argp = g_new0(char *, argc + 1);
7695             envp = g_new0(char *, envc + 1);
7696 
7697             for (gp = guest_argp, q = argp; gp;
7698                   gp += sizeof(abi_ulong), q++) {
7699                 if (get_user_ual(addr, gp))
7700                     goto execve_efault;
7701                 if (!addr)
7702                     break;
7703                 if (!(*q = lock_user_string(addr)))
7704                     goto execve_efault;
7705                 total_size += strlen(*q) + 1;
7706             }
7707             *q = NULL;
7708 
7709             for (gp = guest_envp, q = envp; gp;
7710                   gp += sizeof(abi_ulong), q++) {
7711                 if (get_user_ual(addr, gp))
7712                     goto execve_efault;
7713                 if (!addr)
7714                     break;
7715                 if (!(*q = lock_user_string(addr)))
7716                     goto execve_efault;
7717                 total_size += strlen(*q) + 1;
7718             }
7719             *q = NULL;
7720 
7721             if (!(p = lock_user_string(arg1)))
7722                 goto execve_efault;
7723             /* Although execve() is not an interruptible syscall it is
7724              * a special case where we must use the safe_syscall wrapper:
7725              * if we allow a signal to happen before we make the host
7726              * syscall then we will 'lose' it, because at the point of
7727              * execve the process leaves QEMU's control. So we use the
7728              * safe syscall wrapper to ensure that we either take the
7729              * signal as a guest signal, or else it does not happen
7730              * before the execve completes and makes it the other
7731              * program's problem.
7732              */
7733             ret = get_errno(safe_execve(p, argp, envp));
7734             unlock_user(p, arg1, 0);
7735 
7736             goto execve_end;
7737 
7738         execve_efault:
7739             ret = -TARGET_EFAULT;
7740 
7741         execve_end:
7742             for (gp = guest_argp, q = argp; *q;
7743                   gp += sizeof(abi_ulong), q++) {
7744                 if (get_user_ual(addr, gp)
7745                     || !addr)
7746                     break;
7747                 unlock_user(*q, addr, 0);
7748             }
7749             for (gp = guest_envp, q = envp; *q;
7750                   gp += sizeof(abi_ulong), q++) {
7751                 if (get_user_ual(addr, gp)
7752                     || !addr)
7753                     break;
7754                 unlock_user(*q, addr, 0);
7755             }
7756 
7757             g_free(argp);
7758             g_free(envp);
7759         }
7760         return ret;
7761     case TARGET_NR_chdir:
7762         if (!(p = lock_user_string(arg1)))
7763             return -TARGET_EFAULT;
7764         ret = get_errno(chdir(p));
7765         unlock_user(p, arg1, 0);
7766         return ret;
7767 #ifdef TARGET_NR_time
7768     case TARGET_NR_time:
7769         {
7770             time_t host_time;
7771             ret = get_errno(time(&host_time));
7772             if (!is_error(ret)
7773                 && arg1
7774                 && put_user_sal(host_time, arg1))
7775                 return -TARGET_EFAULT;
7776         }
7777         return ret;
7778 #endif
7779 #ifdef TARGET_NR_mknod
7780     case TARGET_NR_mknod:
7781         if (!(p = lock_user_string(arg1)))
7782             return -TARGET_EFAULT;
7783         ret = get_errno(mknod(p, arg2, arg3));
7784         unlock_user(p, arg1, 0);
7785         return ret;
7786 #endif
7787 #if defined(TARGET_NR_mknodat)
7788     case TARGET_NR_mknodat:
7789         if (!(p = lock_user_string(arg2)))
7790             return -TARGET_EFAULT;
7791         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7792         unlock_user(p, arg2, 0);
7793         return ret;
7794 #endif
7795 #ifdef TARGET_NR_chmod
7796     case TARGET_NR_chmod:
7797         if (!(p = lock_user_string(arg1)))
7798             return -TARGET_EFAULT;
7799         ret = get_errno(chmod(p, arg2));
7800         unlock_user(p, arg1, 0);
7801         return ret;
7802 #endif
7803 #ifdef TARGET_NR_lseek
7804     case TARGET_NR_lseek:
7805         return get_errno(lseek(arg1, arg2, arg3));
7806 #endif
7807 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7808     /* Alpha specific */
7809     case TARGET_NR_getxpid:
7810         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7811         return get_errno(getpid());
7812 #endif
7813 #ifdef TARGET_NR_getpid
7814     case TARGET_NR_getpid:
7815         return get_errno(getpid());
7816 #endif
7817     case TARGET_NR_mount:
7818         {
7819             /* need to look at the data field */
7820             void *p2, *p3;
7821 
7822             if (arg1) {
7823                 p = lock_user_string(arg1);
7824                 if (!p) {
7825                     return -TARGET_EFAULT;
7826                 }
7827             } else {
7828                 p = NULL;
7829             }
7830 
7831             p2 = lock_user_string(arg2);
7832             if (!p2) {
7833                 if (arg1) {
7834                     unlock_user(p, arg1, 0);
7835                 }
7836                 return -TARGET_EFAULT;
7837             }
7838 
7839             if (arg3) {
7840                 p3 = lock_user_string(arg3);
7841                 if (!p3) {
7842                     if (arg1) {
7843                         unlock_user(p, arg1, 0);
7844                     }
7845                     unlock_user(p2, arg2, 0);
7846                     return -TARGET_EFAULT;
7847                 }
7848             } else {
7849                 p3 = NULL;
7850             }
7851 
7852             /* FIXME - arg5 should be locked, but it isn't clear how to
7853              * do that since it's not guaranteed to be a NULL-terminated
7854              * string.
7855              */
7856             if (!arg5) {
7857                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7858             } else {
7859                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7860             }
7861             ret = get_errno(ret);
7862 
7863             if (arg1) {
7864                 unlock_user(p, arg1, 0);
7865             }
7866             unlock_user(p2, arg2, 0);
7867             if (arg3) {
7868                 unlock_user(p3, arg3, 0);
7869             }
7870         }
7871         return ret;
7872 #ifdef TARGET_NR_umount
7873     case TARGET_NR_umount:
7874         if (!(p = lock_user_string(arg1)))
7875             return -TARGET_EFAULT;
7876         ret = get_errno(umount(p));
7877         unlock_user(p, arg1, 0);
7878         return ret;
7879 #endif
7880 #ifdef TARGET_NR_stime /* not on alpha */
7881     case TARGET_NR_stime:
7882         {
7883             struct timespec ts;
7884             ts.tv_nsec = 0;
7885             if (get_user_sal(ts.tv_sec, arg1)) {
7886                 return -TARGET_EFAULT;
7887             }
7888             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
7889         }
7890 #endif
7891 #ifdef TARGET_NR_alarm /* not on alpha */
7892     case TARGET_NR_alarm:
7893         return alarm(arg1);
7894 #endif
7895 #ifdef TARGET_NR_pause /* not on alpha */
7896     case TARGET_NR_pause:
7897         if (!block_signals()) {
7898             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7899         }
7900         return -TARGET_EINTR;
7901 #endif
7902 #ifdef TARGET_NR_utime
7903     case TARGET_NR_utime:
7904         {
7905             struct utimbuf tbuf, *host_tbuf;
7906             struct target_utimbuf *target_tbuf;
7907             if (arg2) {
7908                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7909                     return -TARGET_EFAULT;
7910                 tbuf.actime = tswapal(target_tbuf->actime);
7911                 tbuf.modtime = tswapal(target_tbuf->modtime);
7912                 unlock_user_struct(target_tbuf, arg2, 0);
7913                 host_tbuf = &tbuf;
7914             } else {
7915                 host_tbuf = NULL;
7916             }
7917             if (!(p = lock_user_string(arg1)))
7918                 return -TARGET_EFAULT;
7919             ret = get_errno(utime(p, host_tbuf));
7920             unlock_user(p, arg1, 0);
7921         }
7922         return ret;
7923 #endif
7924 #ifdef TARGET_NR_utimes
7925     case TARGET_NR_utimes:
7926         {
7927             struct timeval *tvp, tv[2];
7928             if (arg2) {
7929                 if (copy_from_user_timeval(&tv[0], arg2)
7930                     || copy_from_user_timeval(&tv[1],
7931                                               arg2 + sizeof(struct target_timeval)))
7932                     return -TARGET_EFAULT;
7933                 tvp = tv;
7934             } else {
7935                 tvp = NULL;
7936             }
7937             if (!(p = lock_user_string(arg1)))
7938                 return -TARGET_EFAULT;
7939             ret = get_errno(utimes(p, tvp));
7940             unlock_user(p, arg1, 0);
7941         }
7942         return ret;
7943 #endif
7944 #if defined(TARGET_NR_futimesat)
7945     case TARGET_NR_futimesat:
7946         {
7947             struct timeval *tvp, tv[2];
7948             if (arg3) {
7949                 if (copy_from_user_timeval(&tv[0], arg3)
7950                     || copy_from_user_timeval(&tv[1],
7951                                               arg3 + sizeof(struct target_timeval)))
7952                     return -TARGET_EFAULT;
7953                 tvp = tv;
7954             } else {
7955                 tvp = NULL;
7956             }
7957             if (!(p = lock_user_string(arg2))) {
7958                 return -TARGET_EFAULT;
7959             }
7960             ret = get_errno(futimesat(arg1, path(p), tvp));
7961             unlock_user(p, arg2, 0);
7962         }
7963         return ret;
7964 #endif
7965 #ifdef TARGET_NR_access
7966     case TARGET_NR_access:
7967         if (!(p = lock_user_string(arg1))) {
7968             return -TARGET_EFAULT;
7969         }
7970         ret = get_errno(access(path(p), arg2));
7971         unlock_user(p, arg1, 0);
7972         return ret;
7973 #endif
7974 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7975     case TARGET_NR_faccessat:
7976         if (!(p = lock_user_string(arg2))) {
7977             return -TARGET_EFAULT;
7978         }
7979         ret = get_errno(faccessat(arg1, p, arg3, 0));
7980         unlock_user(p, arg2, 0);
7981         return ret;
7982 #endif
7983 #ifdef TARGET_NR_nice /* not on alpha */
7984     case TARGET_NR_nice:
7985         return get_errno(nice(arg1));
7986 #endif
7987     case TARGET_NR_sync:
7988         sync();
7989         return 0;
7990 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7991     case TARGET_NR_syncfs:
7992         return get_errno(syncfs(arg1));
7993 #endif
7994     case TARGET_NR_kill:
7995         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7996 #ifdef TARGET_NR_rename
7997     case TARGET_NR_rename:
7998         {
7999             void *p2;
8000             p = lock_user_string(arg1);
8001             p2 = lock_user_string(arg2);
8002             if (!p || !p2)
8003                 ret = -TARGET_EFAULT;
8004             else
8005                 ret = get_errno(rename(p, p2));
8006             unlock_user(p2, arg2, 0);
8007             unlock_user(p, arg1, 0);
8008         }
8009         return ret;
8010 #endif
8011 #if defined(TARGET_NR_renameat)
8012     case TARGET_NR_renameat:
8013         {
8014             void *p2;
8015             p  = lock_user_string(arg2);
8016             p2 = lock_user_string(arg4);
8017             if (!p || !p2)
8018                 ret = -TARGET_EFAULT;
8019             else
8020                 ret = get_errno(renameat(arg1, p, arg3, p2));
8021             unlock_user(p2, arg4, 0);
8022             unlock_user(p, arg2, 0);
8023         }
8024         return ret;
8025 #endif
8026 #if defined(TARGET_NR_renameat2)
8027     case TARGET_NR_renameat2:
8028         {
8029             void *p2;
8030             p  = lock_user_string(arg2);
8031             p2 = lock_user_string(arg4);
8032             if (!p || !p2) {
8033                 ret = -TARGET_EFAULT;
8034             } else {
8035                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8036             }
8037             unlock_user(p2, arg4, 0);
8038             unlock_user(p, arg2, 0);
8039         }
8040         return ret;
8041 #endif
8042 #ifdef TARGET_NR_mkdir
8043     case TARGET_NR_mkdir:
8044         if (!(p = lock_user_string(arg1)))
8045             return -TARGET_EFAULT;
8046         ret = get_errno(mkdir(p, arg2));
8047         unlock_user(p, arg1, 0);
8048         return ret;
8049 #endif
8050 #if defined(TARGET_NR_mkdirat)
8051     case TARGET_NR_mkdirat:
8052         if (!(p = lock_user_string(arg2)))
8053             return -TARGET_EFAULT;
8054         ret = get_errno(mkdirat(arg1, p, arg3));
8055         unlock_user(p, arg2, 0);
8056         return ret;
8057 #endif
8058 #ifdef TARGET_NR_rmdir
8059     case TARGET_NR_rmdir:
8060         if (!(p = lock_user_string(arg1)))
8061             return -TARGET_EFAULT;
8062         ret = get_errno(rmdir(p));
8063         unlock_user(p, arg1, 0);
8064         return ret;
8065 #endif
8066     case TARGET_NR_dup:
8067         ret = get_errno(dup(arg1));
8068         if (ret >= 0) {
8069             fd_trans_dup(arg1, ret);
8070         }
8071         return ret;
8072 #ifdef TARGET_NR_pipe
8073     case TARGET_NR_pipe:
8074         return do_pipe(cpu_env, arg1, 0, 0);
8075 #endif
8076 #ifdef TARGET_NR_pipe2
8077     case TARGET_NR_pipe2:
8078         return do_pipe(cpu_env, arg1,
8079                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8080 #endif
8081     case TARGET_NR_times:
8082         {
8083             struct target_tms *tmsp;
8084             struct tms tms;
8085             ret = get_errno(times(&tms));
8086             if (arg1) {
8087                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8088                 if (!tmsp)
8089                     return -TARGET_EFAULT;
8090                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8091                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8092                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8093                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8094             }
8095             if (!is_error(ret))
8096                 ret = host_to_target_clock_t(ret);
8097         }
8098         return ret;
8099     case TARGET_NR_acct:
8100         if (arg1 == 0) {
8101             ret = get_errno(acct(NULL));
8102         } else {
8103             if (!(p = lock_user_string(arg1))) {
8104                 return -TARGET_EFAULT;
8105             }
8106             ret = get_errno(acct(path(p)));
8107             unlock_user(p, arg1, 0);
8108         }
8109         return ret;
8110 #ifdef TARGET_NR_umount2
8111     case TARGET_NR_umount2:
8112         if (!(p = lock_user_string(arg1)))
8113             return -TARGET_EFAULT;
8114         ret = get_errno(umount2(p, arg2));
8115         unlock_user(p, arg1, 0);
8116         return ret;
8117 #endif
8118     case TARGET_NR_ioctl:
8119         return do_ioctl(arg1, arg2, arg3);
8120 #ifdef TARGET_NR_fcntl
8121     case TARGET_NR_fcntl:
8122         return do_fcntl(arg1, arg2, arg3);
8123 #endif
8124     case TARGET_NR_setpgid:
8125         return get_errno(setpgid(arg1, arg2));
8126     case TARGET_NR_umask:
8127         return get_errno(umask(arg1));
8128     case TARGET_NR_chroot:
8129         if (!(p = lock_user_string(arg1)))
8130             return -TARGET_EFAULT;
8131         ret = get_errno(chroot(p));
8132         unlock_user(p, arg1, 0);
8133         return ret;
8134 #ifdef TARGET_NR_dup2
8135     case TARGET_NR_dup2:
8136         ret = get_errno(dup2(arg1, arg2));
8137         if (ret >= 0) {
8138             fd_trans_dup(arg1, arg2);
8139         }
8140         return ret;
8141 #endif
8142 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8143     case TARGET_NR_dup3:
8144     {
8145         int host_flags;
8146 
8147         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8148             return -EINVAL;
8149         }
8150         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8151         ret = get_errno(dup3(arg1, arg2, host_flags));
8152         if (ret >= 0) {
8153             fd_trans_dup(arg1, arg2);
8154         }
8155         return ret;
8156     }
8157 #endif
8158 #ifdef TARGET_NR_getppid /* not on alpha */
8159     case TARGET_NR_getppid:
8160         return get_errno(getppid());
8161 #endif
8162 #ifdef TARGET_NR_getpgrp
8163     case TARGET_NR_getpgrp:
8164         return get_errno(getpgrp());
8165 #endif
8166     case TARGET_NR_setsid:
8167         return get_errno(setsid());
8168 #ifdef TARGET_NR_sigaction
8169     case TARGET_NR_sigaction:
8170         {
8171 #if defined(TARGET_ALPHA)
8172             struct target_sigaction act, oact, *pact = 0;
8173             struct target_old_sigaction *old_act;
8174             if (arg2) {
8175                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8176                     return -TARGET_EFAULT;
8177                 act._sa_handler = old_act->_sa_handler;
8178                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8179                 act.sa_flags = old_act->sa_flags;
8180                 act.sa_restorer = 0;
8181                 unlock_user_struct(old_act, arg2, 0);
8182                 pact = &act;
8183             }
8184             ret = get_errno(do_sigaction(arg1, pact, &oact));
8185             if (!is_error(ret) && arg3) {
8186                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8187                     return -TARGET_EFAULT;
8188                 old_act->_sa_handler = oact._sa_handler;
8189                 old_act->sa_mask = oact.sa_mask.sig[0];
8190                 old_act->sa_flags = oact.sa_flags;
8191                 unlock_user_struct(old_act, arg3, 1);
8192             }
8193 #elif defined(TARGET_MIPS)
8194 	    struct target_sigaction act, oact, *pact, *old_act;
8195 
8196 	    if (arg2) {
8197                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8198                     return -TARGET_EFAULT;
8199 		act._sa_handler = old_act->_sa_handler;
8200 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8201 		act.sa_flags = old_act->sa_flags;
8202 		unlock_user_struct(old_act, arg2, 0);
8203 		pact = &act;
8204 	    } else {
8205 		pact = NULL;
8206 	    }
8207 
8208 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8209 
8210 	    if (!is_error(ret) && arg3) {
8211                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8212                     return -TARGET_EFAULT;
8213 		old_act->_sa_handler = oact._sa_handler;
8214 		old_act->sa_flags = oact.sa_flags;
8215 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8216 		old_act->sa_mask.sig[1] = 0;
8217 		old_act->sa_mask.sig[2] = 0;
8218 		old_act->sa_mask.sig[3] = 0;
8219 		unlock_user_struct(old_act, arg3, 1);
8220 	    }
8221 #else
8222             struct target_old_sigaction *old_act;
8223             struct target_sigaction act, oact, *pact;
8224             if (arg2) {
8225                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8226                     return -TARGET_EFAULT;
8227                 act._sa_handler = old_act->_sa_handler;
8228                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8229                 act.sa_flags = old_act->sa_flags;
8230                 act.sa_restorer = old_act->sa_restorer;
8231 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8232                 act.ka_restorer = 0;
8233 #endif
8234                 unlock_user_struct(old_act, arg2, 0);
8235                 pact = &act;
8236             } else {
8237                 pact = NULL;
8238             }
8239             ret = get_errno(do_sigaction(arg1, pact, &oact));
8240             if (!is_error(ret) && arg3) {
8241                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8242                     return -TARGET_EFAULT;
8243                 old_act->_sa_handler = oact._sa_handler;
8244                 old_act->sa_mask = oact.sa_mask.sig[0];
8245                 old_act->sa_flags = oact.sa_flags;
8246                 old_act->sa_restorer = oact.sa_restorer;
8247                 unlock_user_struct(old_act, arg3, 1);
8248             }
8249 #endif
8250         }
8251         return ret;
8252 #endif
8253     case TARGET_NR_rt_sigaction:
8254         {
8255 #if defined(TARGET_ALPHA)
8256             /* For Alpha and SPARC this is a 5 argument syscall, with
8257              * a 'restorer' parameter which must be copied into the
8258              * sa_restorer field of the sigaction struct.
8259              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8260              * and arg5 is the sigsetsize.
8261              * Alpha also has a separate rt_sigaction struct that it uses
8262              * here; SPARC uses the usual sigaction struct.
8263              */
8264             struct target_rt_sigaction *rt_act;
8265             struct target_sigaction act, oact, *pact = 0;
8266 
8267             if (arg4 != sizeof(target_sigset_t)) {
8268                 return -TARGET_EINVAL;
8269             }
8270             if (arg2) {
8271                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8272                     return -TARGET_EFAULT;
8273                 act._sa_handler = rt_act->_sa_handler;
8274                 act.sa_mask = rt_act->sa_mask;
8275                 act.sa_flags = rt_act->sa_flags;
8276                 act.sa_restorer = arg5;
8277                 unlock_user_struct(rt_act, arg2, 0);
8278                 pact = &act;
8279             }
8280             ret = get_errno(do_sigaction(arg1, pact, &oact));
8281             if (!is_error(ret) && arg3) {
8282                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8283                     return -TARGET_EFAULT;
8284                 rt_act->_sa_handler = oact._sa_handler;
8285                 rt_act->sa_mask = oact.sa_mask;
8286                 rt_act->sa_flags = oact.sa_flags;
8287                 unlock_user_struct(rt_act, arg3, 1);
8288             }
8289 #else
8290 #ifdef TARGET_SPARC
8291             target_ulong restorer = arg4;
8292             target_ulong sigsetsize = arg5;
8293 #else
8294             target_ulong sigsetsize = arg4;
8295 #endif
8296             struct target_sigaction *act;
8297             struct target_sigaction *oact;
8298 
8299             if (sigsetsize != sizeof(target_sigset_t)) {
8300                 return -TARGET_EINVAL;
8301             }
8302             if (arg2) {
8303                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8304                     return -TARGET_EFAULT;
8305                 }
8306 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8307                 act->ka_restorer = restorer;
8308 #endif
8309             } else {
8310                 act = NULL;
8311             }
8312             if (arg3) {
8313                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8314                     ret = -TARGET_EFAULT;
8315                     goto rt_sigaction_fail;
8316                 }
8317             } else
8318                 oact = NULL;
8319             ret = get_errno(do_sigaction(arg1, act, oact));
8320 	rt_sigaction_fail:
8321             if (act)
8322                 unlock_user_struct(act, arg2, 0);
8323             if (oact)
8324                 unlock_user_struct(oact, arg3, 1);
8325 #endif
8326         }
8327         return ret;
8328 #ifdef TARGET_NR_sgetmask /* not on alpha */
8329     case TARGET_NR_sgetmask:
8330         {
8331             sigset_t cur_set;
8332             abi_ulong target_set;
8333             ret = do_sigprocmask(0, NULL, &cur_set);
8334             if (!ret) {
8335                 host_to_target_old_sigset(&target_set, &cur_set);
8336                 ret = target_set;
8337             }
8338         }
8339         return ret;
8340 #endif
8341 #ifdef TARGET_NR_ssetmask /* not on alpha */
8342     case TARGET_NR_ssetmask:
8343         {
8344             sigset_t set, oset;
8345             abi_ulong target_set = arg1;
8346             target_to_host_old_sigset(&set, &target_set);
8347             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8348             if (!ret) {
8349                 host_to_target_old_sigset(&target_set, &oset);
8350                 ret = target_set;
8351             }
8352         }
8353         return ret;
8354 #endif
8355 #ifdef TARGET_NR_sigprocmask
8356     case TARGET_NR_sigprocmask:
8357         {
8358 #if defined(TARGET_ALPHA)
8359             sigset_t set, oldset;
8360             abi_ulong mask;
8361             int how;
8362 
8363             switch (arg1) {
8364             case TARGET_SIG_BLOCK:
8365                 how = SIG_BLOCK;
8366                 break;
8367             case TARGET_SIG_UNBLOCK:
8368                 how = SIG_UNBLOCK;
8369                 break;
8370             case TARGET_SIG_SETMASK:
8371                 how = SIG_SETMASK;
8372                 break;
8373             default:
8374                 return -TARGET_EINVAL;
8375             }
8376             mask = arg2;
8377             target_to_host_old_sigset(&set, &mask);
8378 
8379             ret = do_sigprocmask(how, &set, &oldset);
8380             if (!is_error(ret)) {
8381                 host_to_target_old_sigset(&mask, &oldset);
8382                 ret = mask;
8383                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8384             }
8385 #else
8386             sigset_t set, oldset, *set_ptr;
8387             int how;
8388 
8389             if (arg2) {
8390                 switch (arg1) {
8391                 case TARGET_SIG_BLOCK:
8392                     how = SIG_BLOCK;
8393                     break;
8394                 case TARGET_SIG_UNBLOCK:
8395                     how = SIG_UNBLOCK;
8396                     break;
8397                 case TARGET_SIG_SETMASK:
8398                     how = SIG_SETMASK;
8399                     break;
8400                 default:
8401                     return -TARGET_EINVAL;
8402                 }
8403                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8404                     return -TARGET_EFAULT;
8405                 target_to_host_old_sigset(&set, p);
8406                 unlock_user(p, arg2, 0);
8407                 set_ptr = &set;
8408             } else {
8409                 how = 0;
8410                 set_ptr = NULL;
8411             }
8412             ret = do_sigprocmask(how, set_ptr, &oldset);
8413             if (!is_error(ret) && arg3) {
8414                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8415                     return -TARGET_EFAULT;
8416                 host_to_target_old_sigset(p, &oldset);
8417                 unlock_user(p, arg3, sizeof(target_sigset_t));
8418             }
8419 #endif
8420         }
8421         return ret;
8422 #endif
8423     case TARGET_NR_rt_sigprocmask:
8424         {
8425             int how = arg1;
8426             sigset_t set, oldset, *set_ptr;
8427 
8428             if (arg4 != sizeof(target_sigset_t)) {
8429                 return -TARGET_EINVAL;
8430             }
8431 
8432             if (arg2) {
8433                 switch(how) {
8434                 case TARGET_SIG_BLOCK:
8435                     how = SIG_BLOCK;
8436                     break;
8437                 case TARGET_SIG_UNBLOCK:
8438                     how = SIG_UNBLOCK;
8439                     break;
8440                 case TARGET_SIG_SETMASK:
8441                     how = SIG_SETMASK;
8442                     break;
8443                 default:
8444                     return -TARGET_EINVAL;
8445                 }
8446                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8447                     return -TARGET_EFAULT;
8448                 target_to_host_sigset(&set, p);
8449                 unlock_user(p, arg2, 0);
8450                 set_ptr = &set;
8451             } else {
8452                 how = 0;
8453                 set_ptr = NULL;
8454             }
8455             ret = do_sigprocmask(how, set_ptr, &oldset);
8456             if (!is_error(ret) && arg3) {
8457                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8458                     return -TARGET_EFAULT;
8459                 host_to_target_sigset(p, &oldset);
8460                 unlock_user(p, arg3, sizeof(target_sigset_t));
8461             }
8462         }
8463         return ret;
8464 #ifdef TARGET_NR_sigpending
8465     case TARGET_NR_sigpending:
8466         {
8467             sigset_t set;
8468             ret = get_errno(sigpending(&set));
8469             if (!is_error(ret)) {
8470                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8471                     return -TARGET_EFAULT;
8472                 host_to_target_old_sigset(p, &set);
8473                 unlock_user(p, arg1, sizeof(target_sigset_t));
8474             }
8475         }
8476         return ret;
8477 #endif
8478     case TARGET_NR_rt_sigpending:
8479         {
8480             sigset_t set;
8481 
8482             /* Yes, this check is >, not != like most. We follow the kernel's
8483              * logic and it does it like this because it implements
8484              * NR_sigpending through the same code path, and in that case
8485              * the old_sigset_t is smaller in size.
8486              */
8487             if (arg2 > sizeof(target_sigset_t)) {
8488                 return -TARGET_EINVAL;
8489             }
8490 
8491             ret = get_errno(sigpending(&set));
8492             if (!is_error(ret)) {
8493                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8494                     return -TARGET_EFAULT;
8495                 host_to_target_sigset(p, &set);
8496                 unlock_user(p, arg1, sizeof(target_sigset_t));
8497             }
8498         }
8499         return ret;
8500 #ifdef TARGET_NR_sigsuspend
8501     case TARGET_NR_sigsuspend:
8502         {
8503             TaskState *ts = cpu->opaque;
8504 #if defined(TARGET_ALPHA)
8505             abi_ulong mask = arg1;
8506             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8507 #else
8508             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8509                 return -TARGET_EFAULT;
8510             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8511             unlock_user(p, arg1, 0);
8512 #endif
8513             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8514                                                SIGSET_T_SIZE));
8515             if (ret != -TARGET_ERESTARTSYS) {
8516                 ts->in_sigsuspend = 1;
8517             }
8518         }
8519         return ret;
8520 #endif
8521     case TARGET_NR_rt_sigsuspend:
8522         {
8523             TaskState *ts = cpu->opaque;
8524 
8525             if (arg2 != sizeof(target_sigset_t)) {
8526                 return -TARGET_EINVAL;
8527             }
8528             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8529                 return -TARGET_EFAULT;
8530             target_to_host_sigset(&ts->sigsuspend_mask, p);
8531             unlock_user(p, arg1, 0);
8532             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8533                                                SIGSET_T_SIZE));
8534             if (ret != -TARGET_ERESTARTSYS) {
8535                 ts->in_sigsuspend = 1;
8536             }
8537         }
8538         return ret;
8539 #ifdef TARGET_NR_rt_sigtimedwait
8540     case TARGET_NR_rt_sigtimedwait:
8541         {
8542             sigset_t set;
8543             struct timespec uts, *puts;
8544             siginfo_t uinfo;
8545 
8546             if (arg4 != sizeof(target_sigset_t)) {
8547                 return -TARGET_EINVAL;
8548             }
8549 
8550             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8551                 return -TARGET_EFAULT;
8552             target_to_host_sigset(&set, p);
8553             unlock_user(p, arg1, 0);
8554             if (arg3) {
8555                 puts = &uts;
8556                 target_to_host_timespec(puts, arg3);
8557             } else {
8558                 puts = NULL;
8559             }
8560             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8561                                                  SIGSET_T_SIZE));
8562             if (!is_error(ret)) {
8563                 if (arg2) {
8564                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8565                                   0);
8566                     if (!p) {
8567                         return -TARGET_EFAULT;
8568                     }
8569                     host_to_target_siginfo(p, &uinfo);
8570                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8571                 }
8572                 ret = host_to_target_signal(ret);
8573             }
8574         }
8575         return ret;
8576 #endif
8577     case TARGET_NR_rt_sigqueueinfo:
8578         {
8579             siginfo_t uinfo;
8580 
8581             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8582             if (!p) {
8583                 return -TARGET_EFAULT;
8584             }
8585             target_to_host_siginfo(&uinfo, p);
8586             unlock_user(p, arg3, 0);
8587             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8588         }
8589         return ret;
8590     case TARGET_NR_rt_tgsigqueueinfo:
8591         {
8592             siginfo_t uinfo;
8593 
8594             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8595             if (!p) {
8596                 return -TARGET_EFAULT;
8597             }
8598             target_to_host_siginfo(&uinfo, p);
8599             unlock_user(p, arg4, 0);
8600             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8601         }
8602         return ret;
8603 #ifdef TARGET_NR_sigreturn
8604     case TARGET_NR_sigreturn:
8605         if (block_signals()) {
8606             return -TARGET_ERESTARTSYS;
8607         }
8608         return do_sigreturn(cpu_env);
8609 #endif
8610     case TARGET_NR_rt_sigreturn:
8611         if (block_signals()) {
8612             return -TARGET_ERESTARTSYS;
8613         }
8614         return do_rt_sigreturn(cpu_env);
8615     case TARGET_NR_sethostname:
8616         if (!(p = lock_user_string(arg1)))
8617             return -TARGET_EFAULT;
8618         ret = get_errno(sethostname(p, arg2));
8619         unlock_user(p, arg1, 0);
8620         return ret;
8621 #ifdef TARGET_NR_setrlimit
8622     case TARGET_NR_setrlimit:
8623         {
8624             int resource = target_to_host_resource(arg1);
8625             struct target_rlimit *target_rlim;
8626             struct rlimit rlim;
8627             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8628                 return -TARGET_EFAULT;
8629             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8630             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8631             unlock_user_struct(target_rlim, arg2, 0);
8632             /*
8633              * If we just passed through resource limit settings for memory then
8634              * they would also apply to QEMU's own allocations, and QEMU will
8635              * crash or hang or die if its allocations fail. Ideally we would
8636              * track the guest allocations in QEMU and apply the limits ourselves.
8637              * For now, just tell the guest the call succeeded but don't actually
8638              * limit anything.
8639              */
8640             if (resource != RLIMIT_AS &&
8641                 resource != RLIMIT_DATA &&
8642                 resource != RLIMIT_STACK) {
8643                 return get_errno(setrlimit(resource, &rlim));
8644             } else {
8645                 return 0;
8646             }
8647         }
8648 #endif
8649 #ifdef TARGET_NR_getrlimit
8650     case TARGET_NR_getrlimit:
8651         {
8652             int resource = target_to_host_resource(arg1);
8653             struct target_rlimit *target_rlim;
8654             struct rlimit rlim;
8655 
8656             ret = get_errno(getrlimit(resource, &rlim));
8657             if (!is_error(ret)) {
8658                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8659                     return -TARGET_EFAULT;
8660                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8661                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8662                 unlock_user_struct(target_rlim, arg2, 1);
8663             }
8664         }
8665         return ret;
8666 #endif
8667     case TARGET_NR_getrusage:
8668         {
8669             struct rusage rusage;
8670             ret = get_errno(getrusage(arg1, &rusage));
8671             if (!is_error(ret)) {
8672                 ret = host_to_target_rusage(arg2, &rusage);
8673             }
8674         }
8675         return ret;
8676 #if defined(TARGET_NR_gettimeofday)
8677     case TARGET_NR_gettimeofday:
8678         {
8679             struct timeval tv;
8680             ret = get_errno(gettimeofday(&tv, NULL));
8681             if (!is_error(ret)) {
8682                 if (copy_to_user_timeval(arg1, &tv))
8683                     return -TARGET_EFAULT;
8684             }
8685         }
8686         return ret;
8687 #endif
8688 #if defined(TARGET_NR_settimeofday)
8689     case TARGET_NR_settimeofday:
8690         {
8691             struct timeval tv, *ptv = NULL;
8692             struct timezone tz, *ptz = NULL;
8693 
8694             if (arg1) {
8695                 if (copy_from_user_timeval(&tv, arg1)) {
8696                     return -TARGET_EFAULT;
8697                 }
8698                 ptv = &tv;
8699             }
8700 
8701             if (arg2) {
8702                 if (copy_from_user_timezone(&tz, arg2)) {
8703                     return -TARGET_EFAULT;
8704                 }
8705                 ptz = &tz;
8706             }
8707 
8708             return get_errno(settimeofday(ptv, ptz));
8709         }
8710 #endif
8711 #if defined(TARGET_NR_select)
8712     case TARGET_NR_select:
8713 #if defined(TARGET_WANT_NI_OLD_SELECT)
8714         /* some architectures used to have old_select here
8715          * but now ENOSYS it.
8716          */
8717         ret = -TARGET_ENOSYS;
8718 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8719         ret = do_old_select(arg1);
8720 #else
8721         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8722 #endif
8723         return ret;
8724 #endif
8725 #ifdef TARGET_NR_pselect6
8726     case TARGET_NR_pselect6:
8727         {
8728             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8729             fd_set rfds, wfds, efds;
8730             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8731             struct timespec ts, *ts_ptr;
8732 
8733             /*
8734              * The 6th arg is actually two args smashed together,
8735              * so we cannot use the C library.
8736              */
8737             sigset_t set;
8738             struct {
8739                 sigset_t *set;
8740                 size_t size;
8741             } sig, *sig_ptr;
8742 
8743             abi_ulong arg_sigset, arg_sigsize, *arg7;
8744             target_sigset_t *target_sigset;
8745 
8746             n = arg1;
8747             rfd_addr = arg2;
8748             wfd_addr = arg3;
8749             efd_addr = arg4;
8750             ts_addr = arg5;
8751 
8752             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8753             if (ret) {
8754                 return ret;
8755             }
8756             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8757             if (ret) {
8758                 return ret;
8759             }
8760             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8761             if (ret) {
8762                 return ret;
8763             }
8764 
8765             /*
8766              * This takes a timespec, and not a timeval, so we cannot
8767              * use the do_select() helper ...
8768              */
8769             if (ts_addr) {
8770                 if (target_to_host_timespec(&ts, ts_addr)) {
8771                     return -TARGET_EFAULT;
8772                 }
8773                 ts_ptr = &ts;
8774             } else {
8775                 ts_ptr = NULL;
8776             }
8777 
8778             /* Extract the two packed args for the sigset */
8779             if (arg6) {
8780                 sig_ptr = &sig;
8781                 sig.size = SIGSET_T_SIZE;
8782 
8783                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8784                 if (!arg7) {
8785                     return -TARGET_EFAULT;
8786                 }
8787                 arg_sigset = tswapal(arg7[0]);
8788                 arg_sigsize = tswapal(arg7[1]);
8789                 unlock_user(arg7, arg6, 0);
8790 
8791                 if (arg_sigset) {
8792                     sig.set = &set;
8793                     if (arg_sigsize != sizeof(*target_sigset)) {
8794                         /* Like the kernel, we enforce correct size sigsets */
8795                         return -TARGET_EINVAL;
8796                     }
8797                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8798                                               sizeof(*target_sigset), 1);
8799                     if (!target_sigset) {
8800                         return -TARGET_EFAULT;
8801                     }
8802                     target_to_host_sigset(&set, target_sigset);
8803                     unlock_user(target_sigset, arg_sigset, 0);
8804                 } else {
8805                     sig.set = NULL;
8806                 }
8807             } else {
8808                 sig_ptr = NULL;
8809             }
8810 
8811             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8812                                           ts_ptr, sig_ptr));
8813 
8814             if (!is_error(ret)) {
8815                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8816                     return -TARGET_EFAULT;
8817                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8818                     return -TARGET_EFAULT;
8819                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8820                     return -TARGET_EFAULT;
8821 
8822                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8823                     return -TARGET_EFAULT;
8824             }
8825         }
8826         return ret;
8827 #endif
8828 #ifdef TARGET_NR_symlink
8829     case TARGET_NR_symlink:
8830         {
8831             void *p2;
8832             p = lock_user_string(arg1);
8833             p2 = lock_user_string(arg2);
8834             if (!p || !p2)
8835                 ret = -TARGET_EFAULT;
8836             else
8837                 ret = get_errno(symlink(p, p2));
8838             unlock_user(p2, arg2, 0);
8839             unlock_user(p, arg1, 0);
8840         }
8841         return ret;
8842 #endif
8843 #if defined(TARGET_NR_symlinkat)
8844     case TARGET_NR_symlinkat:
8845         {
8846             void *p2;
8847             p  = lock_user_string(arg1);
8848             p2 = lock_user_string(arg3);
8849             if (!p || !p2)
8850                 ret = -TARGET_EFAULT;
8851             else
8852                 ret = get_errno(symlinkat(p, arg2, p2));
8853             unlock_user(p2, arg3, 0);
8854             unlock_user(p, arg1, 0);
8855         }
8856         return ret;
8857 #endif
8858 #ifdef TARGET_NR_readlink
8859     case TARGET_NR_readlink:
8860         {
8861             void *p2;
8862             p = lock_user_string(arg1);
8863             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8864             if (!p || !p2) {
8865                 ret = -TARGET_EFAULT;
8866             } else if (!arg3) {
8867                 /* Short circuit this for the magic exe check. */
8868                 ret = -TARGET_EINVAL;
8869             } else if (is_proc_myself((const char *)p, "exe")) {
8870                 char real[PATH_MAX], *temp;
8871                 temp = realpath(exec_path, real);
8872                 /* Return value is # of bytes that we wrote to the buffer. */
8873                 if (temp == NULL) {
8874                     ret = get_errno(-1);
8875                 } else {
8876                     /* Don't worry about sign mismatch as earlier mapping
8877                      * logic would have thrown a bad address error. */
8878                     ret = MIN(strlen(real), arg3);
8879                     /* We cannot NUL terminate the string. */
8880                     memcpy(p2, real, ret);
8881                 }
8882             } else {
8883                 ret = get_errno(readlink(path(p), p2, arg3));
8884             }
8885             unlock_user(p2, arg2, ret);
8886             unlock_user(p, arg1, 0);
8887         }
8888         return ret;
8889 #endif
8890 #if defined(TARGET_NR_readlinkat)
8891     case TARGET_NR_readlinkat:
8892         {
8893             void *p2;
8894             p  = lock_user_string(arg2);
8895             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8896             if (!p || !p2) {
8897                 ret = -TARGET_EFAULT;
8898             } else if (is_proc_myself((const char *)p, "exe")) {
8899                 char real[PATH_MAX], *temp;
8900                 temp = realpath(exec_path, real);
8901                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8902                 snprintf((char *)p2, arg4, "%s", real);
8903             } else {
8904                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8905             }
8906             unlock_user(p2, arg3, ret);
8907             unlock_user(p, arg2, 0);
8908         }
8909         return ret;
8910 #endif
8911 #ifdef TARGET_NR_swapon
8912     case TARGET_NR_swapon:
8913         if (!(p = lock_user_string(arg1)))
8914             return -TARGET_EFAULT;
8915         ret = get_errno(swapon(p, arg2));
8916         unlock_user(p, arg1, 0);
8917         return ret;
8918 #endif
8919     case TARGET_NR_reboot:
8920         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8921            /* arg4 must be ignored in all other cases */
8922            p = lock_user_string(arg4);
8923            if (!p) {
8924                return -TARGET_EFAULT;
8925            }
8926            ret = get_errno(reboot(arg1, arg2, arg3, p));
8927            unlock_user(p, arg4, 0);
8928         } else {
8929            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8930         }
8931         return ret;
8932 #ifdef TARGET_NR_mmap
8933     case TARGET_NR_mmap:
8934 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8935     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8936     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8937     || defined(TARGET_S390X)
8938         {
8939             abi_ulong *v;
8940             abi_ulong v1, v2, v3, v4, v5, v6;
8941             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8942                 return -TARGET_EFAULT;
8943             v1 = tswapal(v[0]);
8944             v2 = tswapal(v[1]);
8945             v3 = tswapal(v[2]);
8946             v4 = tswapal(v[3]);
8947             v5 = tswapal(v[4]);
8948             v6 = tswapal(v[5]);
8949             unlock_user(v, arg1, 0);
8950             ret = get_errno(target_mmap(v1, v2, v3,
8951                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8952                                         v5, v6));
8953         }
8954 #else
8955         ret = get_errno(target_mmap(arg1, arg2, arg3,
8956                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8957                                     arg5,
8958                                     arg6));
8959 #endif
8960         return ret;
8961 #endif
8962 #ifdef TARGET_NR_mmap2
8963     case TARGET_NR_mmap2:
8964 #ifndef MMAP_SHIFT
8965 #define MMAP_SHIFT 12
8966 #endif
8967         ret = target_mmap(arg1, arg2, arg3,
8968                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8969                           arg5, arg6 << MMAP_SHIFT);
8970         return get_errno(ret);
8971 #endif
8972     case TARGET_NR_munmap:
8973         return get_errno(target_munmap(arg1, arg2));
8974     case TARGET_NR_mprotect:
8975         {
8976             TaskState *ts = cpu->opaque;
8977             /* Special hack to detect libc making the stack executable.  */
8978             if ((arg3 & PROT_GROWSDOWN)
8979                 && arg1 >= ts->info->stack_limit
8980                 && arg1 <= ts->info->start_stack) {
8981                 arg3 &= ~PROT_GROWSDOWN;
8982                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8983                 arg1 = ts->info->stack_limit;
8984             }
8985         }
8986         return get_errno(target_mprotect(arg1, arg2, arg3));
8987 #ifdef TARGET_NR_mremap
8988     case TARGET_NR_mremap:
8989         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8990 #endif
8991         /* ??? msync/mlock/munlock are broken for softmmu.  */
8992 #ifdef TARGET_NR_msync
8993     case TARGET_NR_msync:
8994         return get_errno(msync(g2h(arg1), arg2, arg3));
8995 #endif
8996 #ifdef TARGET_NR_mlock
8997     case TARGET_NR_mlock:
8998         return get_errno(mlock(g2h(arg1), arg2));
8999 #endif
9000 #ifdef TARGET_NR_munlock
9001     case TARGET_NR_munlock:
9002         return get_errno(munlock(g2h(arg1), arg2));
9003 #endif
9004 #ifdef TARGET_NR_mlockall
9005     case TARGET_NR_mlockall:
9006         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9007 #endif
9008 #ifdef TARGET_NR_munlockall
9009     case TARGET_NR_munlockall:
9010         return get_errno(munlockall());
9011 #endif
9012 #ifdef TARGET_NR_truncate
9013     case TARGET_NR_truncate:
9014         if (!(p = lock_user_string(arg1)))
9015             return -TARGET_EFAULT;
9016         ret = get_errno(truncate(p, arg2));
9017         unlock_user(p, arg1, 0);
9018         return ret;
9019 #endif
9020 #ifdef TARGET_NR_ftruncate
9021     case TARGET_NR_ftruncate:
9022         return get_errno(ftruncate(arg1, arg2));
9023 #endif
9024     case TARGET_NR_fchmod:
9025         return get_errno(fchmod(arg1, arg2));
9026 #if defined(TARGET_NR_fchmodat)
9027     case TARGET_NR_fchmodat:
9028         if (!(p = lock_user_string(arg2)))
9029             return -TARGET_EFAULT;
9030         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9031         unlock_user(p, arg2, 0);
9032         return ret;
9033 #endif
9034     case TARGET_NR_getpriority:
9035         /* Note that negative values are valid for getpriority, so we must
9036            differentiate based on errno settings.  */
9037         errno = 0;
9038         ret = getpriority(arg1, arg2);
9039         if (ret == -1 && errno != 0) {
9040             return -host_to_target_errno(errno);
9041         }
9042 #ifdef TARGET_ALPHA
9043         /* Return value is the unbiased priority.  Signal no error.  */
9044         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9045 #else
9046         /* Return value is a biased priority to avoid negative numbers.  */
9047         ret = 20 - ret;
9048 #endif
9049         return ret;
9050     case TARGET_NR_setpriority:
9051         return get_errno(setpriority(arg1, arg2, arg3));
9052 #ifdef TARGET_NR_statfs
9053     case TARGET_NR_statfs:
9054         if (!(p = lock_user_string(arg1))) {
9055             return -TARGET_EFAULT;
9056         }
9057         ret = get_errno(statfs(path(p), &stfs));
9058         unlock_user(p, arg1, 0);
9059     convert_statfs:
9060         if (!is_error(ret)) {
9061             struct target_statfs *target_stfs;
9062 
9063             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9064                 return -TARGET_EFAULT;
9065             __put_user(stfs.f_type, &target_stfs->f_type);
9066             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9067             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9068             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9069             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9070             __put_user(stfs.f_files, &target_stfs->f_files);
9071             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9072             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9073             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9074             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9075             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9076 #ifdef _STATFS_F_FLAGS
9077             __put_user(stfs.f_flags, &target_stfs->f_flags);
9078 #else
9079             __put_user(0, &target_stfs->f_flags);
9080 #endif
9081             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9082             unlock_user_struct(target_stfs, arg2, 1);
9083         }
9084         return ret;
9085 #endif
9086 #ifdef TARGET_NR_fstatfs
9087     case TARGET_NR_fstatfs:
9088         ret = get_errno(fstatfs(arg1, &stfs));
9089         goto convert_statfs;
9090 #endif
9091 #ifdef TARGET_NR_statfs64
9092     case TARGET_NR_statfs64:
9093         if (!(p = lock_user_string(arg1))) {
9094             return -TARGET_EFAULT;
9095         }
9096         ret = get_errno(statfs(path(p), &stfs));
9097         unlock_user(p, arg1, 0);
9098     convert_statfs64:
9099         if (!is_error(ret)) {
9100             struct target_statfs64 *target_stfs;
9101 
9102             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9103                 return -TARGET_EFAULT;
9104             __put_user(stfs.f_type, &target_stfs->f_type);
9105             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9106             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9107             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9108             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9109             __put_user(stfs.f_files, &target_stfs->f_files);
9110             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9111             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9112             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9113             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9114             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9115             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9116             unlock_user_struct(target_stfs, arg3, 1);
9117         }
9118         return ret;
9119     case TARGET_NR_fstatfs64:
9120         ret = get_errno(fstatfs(arg1, &stfs));
9121         goto convert_statfs64;
9122 #endif
9123 #ifdef TARGET_NR_socketcall
9124     case TARGET_NR_socketcall:
9125         return do_socketcall(arg1, arg2);
9126 #endif
9127 #ifdef TARGET_NR_accept
9128     case TARGET_NR_accept:
9129         return do_accept4(arg1, arg2, arg3, 0);
9130 #endif
9131 #ifdef TARGET_NR_accept4
9132     case TARGET_NR_accept4:
9133         return do_accept4(arg1, arg2, arg3, arg4);
9134 #endif
9135 #ifdef TARGET_NR_bind
9136     case TARGET_NR_bind:
9137         return do_bind(arg1, arg2, arg3);
9138 #endif
9139 #ifdef TARGET_NR_connect
9140     case TARGET_NR_connect:
9141         return do_connect(arg1, arg2, arg3);
9142 #endif
9143 #ifdef TARGET_NR_getpeername
9144     case TARGET_NR_getpeername:
9145         return do_getpeername(arg1, arg2, arg3);
9146 #endif
9147 #ifdef TARGET_NR_getsockname
9148     case TARGET_NR_getsockname:
9149         return do_getsockname(arg1, arg2, arg3);
9150 #endif
9151 #ifdef TARGET_NR_getsockopt
9152     case TARGET_NR_getsockopt:
9153         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9154 #endif
9155 #ifdef TARGET_NR_listen
9156     case TARGET_NR_listen:
9157         return get_errno(listen(arg1, arg2));
9158 #endif
9159 #ifdef TARGET_NR_recv
9160     case TARGET_NR_recv:
9161         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9162 #endif
9163 #ifdef TARGET_NR_recvfrom
9164     case TARGET_NR_recvfrom:
9165         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9166 #endif
9167 #ifdef TARGET_NR_recvmsg
9168     case TARGET_NR_recvmsg:
9169         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9170 #endif
9171 #ifdef TARGET_NR_send
9172     case TARGET_NR_send:
9173         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9174 #endif
9175 #ifdef TARGET_NR_sendmsg
9176     case TARGET_NR_sendmsg:
9177         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9178 #endif
9179 #ifdef TARGET_NR_sendmmsg
9180     case TARGET_NR_sendmmsg:
9181         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9182 #endif
9183 #ifdef TARGET_NR_recvmmsg
9184     case TARGET_NR_recvmmsg:
9185         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9186 #endif
9187 #ifdef TARGET_NR_sendto
9188     case TARGET_NR_sendto:
9189         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9190 #endif
9191 #ifdef TARGET_NR_shutdown
9192     case TARGET_NR_shutdown:
9193         return get_errno(shutdown(arg1, arg2));
9194 #endif
9195 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9196     case TARGET_NR_getrandom:
9197         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9198         if (!p) {
9199             return -TARGET_EFAULT;
9200         }
9201         ret = get_errno(getrandom(p, arg2, arg3));
9202         unlock_user(p, arg1, ret);
9203         return ret;
9204 #endif
9205 #ifdef TARGET_NR_socket
9206     case TARGET_NR_socket:
9207         return do_socket(arg1, arg2, arg3);
9208 #endif
9209 #ifdef TARGET_NR_socketpair
9210     case TARGET_NR_socketpair:
9211         return do_socketpair(arg1, arg2, arg3, arg4);
9212 #endif
9213 #ifdef TARGET_NR_setsockopt
9214     case TARGET_NR_setsockopt:
9215         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9216 #endif
9217 #if defined(TARGET_NR_syslog)
9218     case TARGET_NR_syslog:
9219         {
9220             int len = arg2;
9221 
9222             switch (arg1) {
9223             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9224             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9225             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9226             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9227             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9228             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9229             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9230             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9231                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9232             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9233             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9234             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9235                 {
9236                     if (len < 0) {
9237                         return -TARGET_EINVAL;
9238                     }
9239                     if (len == 0) {
9240                         return 0;
9241                     }
9242                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9243                     if (!p) {
9244                         return -TARGET_EFAULT;
9245                     }
9246                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9247                     unlock_user(p, arg2, arg3);
9248                 }
9249                 return ret;
9250             default:
9251                 return -TARGET_EINVAL;
9252             }
9253         }
9254         break;
9255 #endif
9256     case TARGET_NR_setitimer:
9257         {
9258             struct itimerval value, ovalue, *pvalue;
9259 
9260             if (arg2) {
9261                 pvalue = &value;
9262                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9263                     || copy_from_user_timeval(&pvalue->it_value,
9264                                               arg2 + sizeof(struct target_timeval)))
9265                     return -TARGET_EFAULT;
9266             } else {
9267                 pvalue = NULL;
9268             }
9269             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9270             if (!is_error(ret) && arg3) {
9271                 if (copy_to_user_timeval(arg3,
9272                                          &ovalue.it_interval)
9273                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9274                                             &ovalue.it_value))
9275                     return -TARGET_EFAULT;
9276             }
9277         }
9278         return ret;
9279     case TARGET_NR_getitimer:
9280         {
9281             struct itimerval value;
9282 
9283             ret = get_errno(getitimer(arg1, &value));
9284             if (!is_error(ret) && arg2) {
9285                 if (copy_to_user_timeval(arg2,
9286                                          &value.it_interval)
9287                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9288                                             &value.it_value))
9289                     return -TARGET_EFAULT;
9290             }
9291         }
9292         return ret;
9293 #ifdef TARGET_NR_stat
9294     case TARGET_NR_stat:
9295         if (!(p = lock_user_string(arg1))) {
9296             return -TARGET_EFAULT;
9297         }
9298         ret = get_errno(stat(path(p), &st));
9299         unlock_user(p, arg1, 0);
9300         goto do_stat;
9301 #endif
9302 #ifdef TARGET_NR_lstat
9303     case TARGET_NR_lstat:
9304         if (!(p = lock_user_string(arg1))) {
9305             return -TARGET_EFAULT;
9306         }
9307         ret = get_errno(lstat(path(p), &st));
9308         unlock_user(p, arg1, 0);
9309         goto do_stat;
9310 #endif
9311 #ifdef TARGET_NR_fstat
9312     case TARGET_NR_fstat:
9313         {
9314             ret = get_errno(fstat(arg1, &st));
9315 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9316         do_stat:
9317 #endif
9318             if (!is_error(ret)) {
9319                 struct target_stat *target_st;
9320 
9321                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9322                     return -TARGET_EFAULT;
9323                 memset(target_st, 0, sizeof(*target_st));
9324                 __put_user(st.st_dev, &target_st->st_dev);
9325                 __put_user(st.st_ino, &target_st->st_ino);
9326                 __put_user(st.st_mode, &target_st->st_mode);
9327                 __put_user(st.st_uid, &target_st->st_uid);
9328                 __put_user(st.st_gid, &target_st->st_gid);
9329                 __put_user(st.st_nlink, &target_st->st_nlink);
9330                 __put_user(st.st_rdev, &target_st->st_rdev);
9331                 __put_user(st.st_size, &target_st->st_size);
9332                 __put_user(st.st_blksize, &target_st->st_blksize);
9333                 __put_user(st.st_blocks, &target_st->st_blocks);
9334                 __put_user(st.st_atime, &target_st->target_st_atime);
9335                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9336                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9337 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9338     defined(TARGET_STAT_HAVE_NSEC)
9339                 __put_user(st.st_atim.tv_nsec,
9340                            &target_st->target_st_atime_nsec);
9341                 __put_user(st.st_mtim.tv_nsec,
9342                            &target_st->target_st_mtime_nsec);
9343                 __put_user(st.st_ctim.tv_nsec,
9344                            &target_st->target_st_ctime_nsec);
9345 #endif
9346                 unlock_user_struct(target_st, arg2, 1);
9347             }
9348         }
9349         return ret;
9350 #endif
9351     case TARGET_NR_vhangup:
9352         return get_errno(vhangup());
9353 #ifdef TARGET_NR_syscall
9354     case TARGET_NR_syscall:
9355         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9356                           arg6, arg7, arg8, 0);
9357 #endif
9358 #if defined(TARGET_NR_wait4)
9359     case TARGET_NR_wait4:
9360         {
9361             int status;
9362             abi_long status_ptr = arg2;
9363             struct rusage rusage, *rusage_ptr;
9364             abi_ulong target_rusage = arg4;
9365             abi_long rusage_err;
9366             if (target_rusage)
9367                 rusage_ptr = &rusage;
9368             else
9369                 rusage_ptr = NULL;
9370             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9371             if (!is_error(ret)) {
9372                 if (status_ptr && ret) {
9373                     status = host_to_target_waitstatus(status);
9374                     if (put_user_s32(status, status_ptr))
9375                         return -TARGET_EFAULT;
9376                 }
9377                 if (target_rusage) {
9378                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9379                     if (rusage_err) {
9380                         ret = rusage_err;
9381                     }
9382                 }
9383             }
9384         }
9385         return ret;
9386 #endif
9387 #ifdef TARGET_NR_swapoff
9388     case TARGET_NR_swapoff:
9389         if (!(p = lock_user_string(arg1)))
9390             return -TARGET_EFAULT;
9391         ret = get_errno(swapoff(p));
9392         unlock_user(p, arg1, 0);
9393         return ret;
9394 #endif
9395     case TARGET_NR_sysinfo:
9396         {
9397             struct target_sysinfo *target_value;
9398             struct sysinfo value;
9399             ret = get_errno(sysinfo(&value));
9400             if (!is_error(ret) && arg1)
9401             {
9402                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9403                     return -TARGET_EFAULT;
9404                 __put_user(value.uptime, &target_value->uptime);
9405                 __put_user(value.loads[0], &target_value->loads[0]);
9406                 __put_user(value.loads[1], &target_value->loads[1]);
9407                 __put_user(value.loads[2], &target_value->loads[2]);
9408                 __put_user(value.totalram, &target_value->totalram);
9409                 __put_user(value.freeram, &target_value->freeram);
9410                 __put_user(value.sharedram, &target_value->sharedram);
9411                 __put_user(value.bufferram, &target_value->bufferram);
9412                 __put_user(value.totalswap, &target_value->totalswap);
9413                 __put_user(value.freeswap, &target_value->freeswap);
9414                 __put_user(value.procs, &target_value->procs);
9415                 __put_user(value.totalhigh, &target_value->totalhigh);
9416                 __put_user(value.freehigh, &target_value->freehigh);
9417                 __put_user(value.mem_unit, &target_value->mem_unit);
9418                 unlock_user_struct(target_value, arg1, 1);
9419             }
9420         }
9421         return ret;
9422 #ifdef TARGET_NR_ipc
9423     case TARGET_NR_ipc:
9424         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9425 #endif
9426 #ifdef TARGET_NR_semget
9427     case TARGET_NR_semget:
9428         return get_errno(semget(arg1, arg2, arg3));
9429 #endif
9430 #ifdef TARGET_NR_semop
9431     case TARGET_NR_semop:
9432         return do_semop(arg1, arg2, arg3);
9433 #endif
9434 #ifdef TARGET_NR_semctl
9435     case TARGET_NR_semctl:
9436         return do_semctl(arg1, arg2, arg3, arg4);
9437 #endif
9438 #ifdef TARGET_NR_msgctl
9439     case TARGET_NR_msgctl:
9440         return do_msgctl(arg1, arg2, arg3);
9441 #endif
9442 #ifdef TARGET_NR_msgget
9443     case TARGET_NR_msgget:
9444         return get_errno(msgget(arg1, arg2));
9445 #endif
9446 #ifdef TARGET_NR_msgrcv
9447     case TARGET_NR_msgrcv:
9448         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9449 #endif
9450 #ifdef TARGET_NR_msgsnd
9451     case TARGET_NR_msgsnd:
9452         return do_msgsnd(arg1, arg2, arg3, arg4);
9453 #endif
9454 #ifdef TARGET_NR_shmget
9455     case TARGET_NR_shmget:
9456         return get_errno(shmget(arg1, arg2, arg3));
9457 #endif
9458 #ifdef TARGET_NR_shmctl
9459     case TARGET_NR_shmctl:
9460         return do_shmctl(arg1, arg2, arg3);
9461 #endif
9462 #ifdef TARGET_NR_shmat
9463     case TARGET_NR_shmat:
9464         return do_shmat(cpu_env, arg1, arg2, arg3);
9465 #endif
9466 #ifdef TARGET_NR_shmdt
9467     case TARGET_NR_shmdt:
9468         return do_shmdt(arg1);
9469 #endif
9470     case TARGET_NR_fsync:
9471         return get_errno(fsync(arg1));
9472     case TARGET_NR_clone:
9473         /* Linux manages to have three different orderings for its
9474          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9475          * match the kernel's CONFIG_CLONE_* settings.
9476          * Microblaze is further special in that it uses a sixth
9477          * implicit argument to clone for the TLS pointer.
9478          */
9479 #if defined(TARGET_MICROBLAZE)
9480         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9481 #elif defined(TARGET_CLONE_BACKWARDS)
9482         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9483 #elif defined(TARGET_CLONE_BACKWARDS2)
9484         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9485 #else
9486         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9487 #endif
9488         return ret;
9489 #ifdef __NR_exit_group
9490         /* new thread calls */
9491     case TARGET_NR_exit_group:
9492         preexit_cleanup(cpu_env, arg1);
9493         return get_errno(exit_group(arg1));
9494 #endif
9495     case TARGET_NR_setdomainname:
9496         if (!(p = lock_user_string(arg1)))
9497             return -TARGET_EFAULT;
9498         ret = get_errno(setdomainname(p, arg2));
9499         unlock_user(p, arg1, 0);
9500         return ret;
9501     case TARGET_NR_uname:
9502         /* no need to transcode because we use the linux syscall */
9503         {
9504             struct new_utsname * buf;
9505 
9506             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9507                 return -TARGET_EFAULT;
9508             ret = get_errno(sys_uname(buf));
9509             if (!is_error(ret)) {
9510                 /* Overwrite the native machine name with whatever is being
9511                    emulated. */
9512                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9513                           sizeof(buf->machine));
9514                 /* Allow the user to override the reported release.  */
9515                 if (qemu_uname_release && *qemu_uname_release) {
9516                     g_strlcpy(buf->release, qemu_uname_release,
9517                               sizeof(buf->release));
9518                 }
9519             }
9520             unlock_user_struct(buf, arg1, 1);
9521         }
9522         return ret;
9523 #ifdef TARGET_I386
9524     case TARGET_NR_modify_ldt:
9525         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9526 #if !defined(TARGET_X86_64)
9527     case TARGET_NR_vm86:
9528         return do_vm86(cpu_env, arg1, arg2);
9529 #endif
9530 #endif
9531 #if defined(TARGET_NR_adjtimex)
9532     case TARGET_NR_adjtimex:
9533         {
9534             struct timex host_buf;
9535 
9536             if (target_to_host_timex(&host_buf, arg1) != 0) {
9537                 return -TARGET_EFAULT;
9538             }
9539             ret = get_errno(adjtimex(&host_buf));
9540             if (!is_error(ret)) {
9541                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9542                     return -TARGET_EFAULT;
9543                 }
9544             }
9545         }
9546         return ret;
9547 #endif
9548 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9549     case TARGET_NR_clock_adjtime:
9550         {
9551             struct timex htx, *phtx = &htx;
9552 
9553             if (target_to_host_timex(phtx, arg2) != 0) {
9554                 return -TARGET_EFAULT;
9555             }
9556             ret = get_errno(clock_adjtime(arg1, phtx));
9557             if (!is_error(ret) && phtx) {
9558                 if (host_to_target_timex(arg2, phtx) != 0) {
9559                     return -TARGET_EFAULT;
9560                 }
9561             }
9562         }
9563         return ret;
9564 #endif
9565     case TARGET_NR_getpgid:
9566         return get_errno(getpgid(arg1));
9567     case TARGET_NR_fchdir:
9568         return get_errno(fchdir(arg1));
9569     case TARGET_NR_personality:
9570         return get_errno(personality(arg1));
9571 #ifdef TARGET_NR__llseek /* Not on alpha */
9572     case TARGET_NR__llseek:
9573         {
9574             int64_t res;
9575 #if !defined(__NR_llseek)
9576             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9577             if (res == -1) {
9578                 ret = get_errno(res);
9579             } else {
9580                 ret = 0;
9581             }
9582 #else
9583             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9584 #endif
9585             if ((ret == 0) && put_user_s64(res, arg4)) {
9586                 return -TARGET_EFAULT;
9587             }
9588         }
9589         return ret;
9590 #endif
9591 #ifdef TARGET_NR_getdents
9592     case TARGET_NR_getdents:
9593 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9594 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9595         {
9596             struct target_dirent *target_dirp;
9597             struct linux_dirent *dirp;
9598             abi_long count = arg3;
9599 
9600             dirp = g_try_malloc(count);
9601             if (!dirp) {
9602                 return -TARGET_ENOMEM;
9603             }
9604 
9605             ret = get_errno(sys_getdents(arg1, dirp, count));
9606             if (!is_error(ret)) {
9607                 struct linux_dirent *de;
9608 		struct target_dirent *tde;
9609                 int len = ret;
9610                 int reclen, treclen;
9611 		int count1, tnamelen;
9612 
9613 		count1 = 0;
9614                 de = dirp;
9615                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9616                     return -TARGET_EFAULT;
9617 		tde = target_dirp;
9618                 while (len > 0) {
9619                     reclen = de->d_reclen;
9620                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9621                     assert(tnamelen >= 0);
9622                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9623                     assert(count1 + treclen <= count);
9624                     tde->d_reclen = tswap16(treclen);
9625                     tde->d_ino = tswapal(de->d_ino);
9626                     tde->d_off = tswapal(de->d_off);
9627                     memcpy(tde->d_name, de->d_name, tnamelen);
9628                     de = (struct linux_dirent *)((char *)de + reclen);
9629                     len -= reclen;
9630                     tde = (struct target_dirent *)((char *)tde + treclen);
9631 		    count1 += treclen;
9632                 }
9633 		ret = count1;
9634                 unlock_user(target_dirp, arg2, ret);
9635             }
9636             g_free(dirp);
9637         }
9638 #else
9639         {
9640             struct linux_dirent *dirp;
9641             abi_long count = arg3;
9642 
9643             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9644                 return -TARGET_EFAULT;
9645             ret = get_errno(sys_getdents(arg1, dirp, count));
9646             if (!is_error(ret)) {
9647                 struct linux_dirent *de;
9648                 int len = ret;
9649                 int reclen;
9650                 de = dirp;
9651                 while (len > 0) {
9652                     reclen = de->d_reclen;
9653                     if (reclen > len)
9654                         break;
9655                     de->d_reclen = tswap16(reclen);
9656                     tswapls(&de->d_ino);
9657                     tswapls(&de->d_off);
9658                     de = (struct linux_dirent *)((char *)de + reclen);
9659                     len -= reclen;
9660                 }
9661             }
9662             unlock_user(dirp, arg2, ret);
9663         }
9664 #endif
9665 #else
9666         /* Implement getdents in terms of getdents64 */
9667         {
9668             struct linux_dirent64 *dirp;
9669             abi_long count = arg3;
9670 
9671             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9672             if (!dirp) {
9673                 return -TARGET_EFAULT;
9674             }
9675             ret = get_errno(sys_getdents64(arg1, dirp, count));
9676             if (!is_error(ret)) {
9677                 /* Convert the dirent64 structs to target dirent.  We do this
9678                  * in-place, since we can guarantee that a target_dirent is no
9679                  * larger than a dirent64; however this means we have to be
9680                  * careful to read everything before writing in the new format.
9681                  */
9682                 struct linux_dirent64 *de;
9683                 struct target_dirent *tde;
9684                 int len = ret;
9685                 int tlen = 0;
9686 
9687                 de = dirp;
9688                 tde = (struct target_dirent *)dirp;
9689                 while (len > 0) {
9690                     int namelen, treclen;
9691                     int reclen = de->d_reclen;
9692                     uint64_t ino = de->d_ino;
9693                     int64_t off = de->d_off;
9694                     uint8_t type = de->d_type;
9695 
9696                     namelen = strlen(de->d_name);
9697                     treclen = offsetof(struct target_dirent, d_name)
9698                         + namelen + 2;
9699                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9700 
9701                     memmove(tde->d_name, de->d_name, namelen + 1);
9702                     tde->d_ino = tswapal(ino);
9703                     tde->d_off = tswapal(off);
9704                     tde->d_reclen = tswap16(treclen);
9705                     /* The target_dirent type is in what was formerly a padding
9706                      * byte at the end of the structure:
9707                      */
9708                     *(((char *)tde) + treclen - 1) = type;
9709 
9710                     de = (struct linux_dirent64 *)((char *)de + reclen);
9711                     tde = (struct target_dirent *)((char *)tde + treclen);
9712                     len -= reclen;
9713                     tlen += treclen;
9714                 }
9715                 ret = tlen;
9716             }
9717             unlock_user(dirp, arg2, ret);
9718         }
9719 #endif
9720         return ret;
9721 #endif /* TARGET_NR_getdents */
9722 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9723     case TARGET_NR_getdents64:
9724         {
9725             struct linux_dirent64 *dirp;
9726             abi_long count = arg3;
9727             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9728                 return -TARGET_EFAULT;
9729             ret = get_errno(sys_getdents64(arg1, dirp, count));
9730             if (!is_error(ret)) {
9731                 struct linux_dirent64 *de;
9732                 int len = ret;
9733                 int reclen;
9734                 de = dirp;
9735                 while (len > 0) {
9736                     reclen = de->d_reclen;
9737                     if (reclen > len)
9738                         break;
9739                     de->d_reclen = tswap16(reclen);
9740                     tswap64s((uint64_t *)&de->d_ino);
9741                     tswap64s((uint64_t *)&de->d_off);
9742                     de = (struct linux_dirent64 *)((char *)de + reclen);
9743                     len -= reclen;
9744                 }
9745             }
9746             unlock_user(dirp, arg2, ret);
9747         }
9748         return ret;
9749 #endif /* TARGET_NR_getdents64 */
9750 #if defined(TARGET_NR__newselect)
9751     case TARGET_NR__newselect:
9752         return do_select(arg1, arg2, arg3, arg4, arg5);
9753 #endif
9754 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9755 # ifdef TARGET_NR_poll
9756     case TARGET_NR_poll:
9757 # endif
9758 # ifdef TARGET_NR_ppoll
9759     case TARGET_NR_ppoll:
9760 # endif
9761         {
9762             struct target_pollfd *target_pfd;
9763             unsigned int nfds = arg2;
9764             struct pollfd *pfd;
9765             unsigned int i;
9766 
9767             pfd = NULL;
9768             target_pfd = NULL;
9769             if (nfds) {
9770                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9771                     return -TARGET_EINVAL;
9772                 }
9773 
9774                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9775                                        sizeof(struct target_pollfd) * nfds, 1);
9776                 if (!target_pfd) {
9777                     return -TARGET_EFAULT;
9778                 }
9779 
9780                 pfd = alloca(sizeof(struct pollfd) * nfds);
9781                 for (i = 0; i < nfds; i++) {
9782                     pfd[i].fd = tswap32(target_pfd[i].fd);
9783                     pfd[i].events = tswap16(target_pfd[i].events);
9784                 }
9785             }
9786 
9787             switch (num) {
9788 # ifdef TARGET_NR_ppoll
9789             case TARGET_NR_ppoll:
9790             {
9791                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9792                 target_sigset_t *target_set;
9793                 sigset_t _set, *set = &_set;
9794 
9795                 if (arg3) {
9796                     if (target_to_host_timespec(timeout_ts, arg3)) {
9797                         unlock_user(target_pfd, arg1, 0);
9798                         return -TARGET_EFAULT;
9799                     }
9800                 } else {
9801                     timeout_ts = NULL;
9802                 }
9803 
9804                 if (arg4) {
9805                     if (arg5 != sizeof(target_sigset_t)) {
9806                         unlock_user(target_pfd, arg1, 0);
9807                         return -TARGET_EINVAL;
9808                     }
9809 
9810                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9811                     if (!target_set) {
9812                         unlock_user(target_pfd, arg1, 0);
9813                         return -TARGET_EFAULT;
9814                     }
9815                     target_to_host_sigset(set, target_set);
9816                 } else {
9817                     set = NULL;
9818                 }
9819 
9820                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9821                                            set, SIGSET_T_SIZE));
9822 
9823                 if (!is_error(ret) && arg3) {
9824                     host_to_target_timespec(arg3, timeout_ts);
9825                 }
9826                 if (arg4) {
9827                     unlock_user(target_set, arg4, 0);
9828                 }
9829                 break;
9830             }
9831 # endif
9832 # ifdef TARGET_NR_poll
9833             case TARGET_NR_poll:
9834             {
9835                 struct timespec ts, *pts;
9836 
9837                 if (arg3 >= 0) {
9838                     /* Convert ms to secs, ns */
9839                     ts.tv_sec = arg3 / 1000;
9840                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9841                     pts = &ts;
9842                 } else {
9843                     /* -ve poll() timeout means "infinite" */
9844                     pts = NULL;
9845                 }
9846                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9847                 break;
9848             }
9849 # endif
9850             default:
9851                 g_assert_not_reached();
9852             }
9853 
9854             if (!is_error(ret)) {
9855                 for(i = 0; i < nfds; i++) {
9856                     target_pfd[i].revents = tswap16(pfd[i].revents);
9857                 }
9858             }
9859             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9860         }
9861         return ret;
9862 #endif
9863     case TARGET_NR_flock:
9864         /* NOTE: the flock constant seems to be the same for every
9865            Linux platform */
9866         return get_errno(safe_flock(arg1, arg2));
9867     case TARGET_NR_readv:
9868         {
9869             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9870             if (vec != NULL) {
9871                 ret = get_errno(safe_readv(arg1, vec, arg3));
9872                 unlock_iovec(vec, arg2, arg3, 1);
9873             } else {
9874                 ret = -host_to_target_errno(errno);
9875             }
9876         }
9877         return ret;
9878     case TARGET_NR_writev:
9879         {
9880             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9881             if (vec != NULL) {
9882                 ret = get_errno(safe_writev(arg1, vec, arg3));
9883                 unlock_iovec(vec, arg2, arg3, 0);
9884             } else {
9885                 ret = -host_to_target_errno(errno);
9886             }
9887         }
9888         return ret;
9889 #if defined(TARGET_NR_preadv)
9890     case TARGET_NR_preadv:
9891         {
9892             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9893             if (vec != NULL) {
9894                 unsigned long low, high;
9895 
9896                 target_to_host_low_high(arg4, arg5, &low, &high);
9897                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9898                 unlock_iovec(vec, arg2, arg3, 1);
9899             } else {
9900                 ret = -host_to_target_errno(errno);
9901            }
9902         }
9903         return ret;
9904 #endif
9905 #if defined(TARGET_NR_pwritev)
9906     case TARGET_NR_pwritev:
9907         {
9908             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9909             if (vec != NULL) {
9910                 unsigned long low, high;
9911 
9912                 target_to_host_low_high(arg4, arg5, &low, &high);
9913                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9914                 unlock_iovec(vec, arg2, arg3, 0);
9915             } else {
9916                 ret = -host_to_target_errno(errno);
9917            }
9918         }
9919         return ret;
9920 #endif
9921     case TARGET_NR_getsid:
9922         return get_errno(getsid(arg1));
9923 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9924     case TARGET_NR_fdatasync:
9925         return get_errno(fdatasync(arg1));
9926 #endif
9927 #ifdef TARGET_NR__sysctl
9928     case TARGET_NR__sysctl:
9929         /* We don't implement this, but ENOTDIR is always a safe
9930            return value. */
9931         return -TARGET_ENOTDIR;
9932 #endif
9933     case TARGET_NR_sched_getaffinity:
9934         {
9935             unsigned int mask_size;
9936             unsigned long *mask;
9937 
9938             /*
9939              * sched_getaffinity needs multiples of ulong, so need to take
9940              * care of mismatches between target ulong and host ulong sizes.
9941              */
9942             if (arg2 & (sizeof(abi_ulong) - 1)) {
9943                 return -TARGET_EINVAL;
9944             }
9945             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9946 
9947             mask = alloca(mask_size);
9948             memset(mask, 0, mask_size);
9949             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9950 
9951             if (!is_error(ret)) {
9952                 if (ret > arg2) {
9953                     /* More data returned than the caller's buffer will fit.
9954                      * This only happens if sizeof(abi_long) < sizeof(long)
9955                      * and the caller passed us a buffer holding an odd number
9956                      * of abi_longs. If the host kernel is actually using the
9957                      * extra 4 bytes then fail EINVAL; otherwise we can just
9958                      * ignore them and only copy the interesting part.
9959                      */
9960                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9961                     if (numcpus > arg2 * 8) {
9962                         return -TARGET_EINVAL;
9963                     }
9964                     ret = arg2;
9965                 }
9966 
9967                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9968                     return -TARGET_EFAULT;
9969                 }
9970             }
9971         }
9972         return ret;
9973     case TARGET_NR_sched_setaffinity:
9974         {
9975             unsigned int mask_size;
9976             unsigned long *mask;
9977 
9978             /*
9979              * sched_setaffinity needs multiples of ulong, so need to take
9980              * care of mismatches between target ulong and host ulong sizes.
9981              */
9982             if (arg2 & (sizeof(abi_ulong) - 1)) {
9983                 return -TARGET_EINVAL;
9984             }
9985             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9986             mask = alloca(mask_size);
9987 
9988             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9989             if (ret) {
9990                 return ret;
9991             }
9992 
9993             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9994         }
9995     case TARGET_NR_getcpu:
9996         {
9997             unsigned cpu, node;
9998             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9999                                        arg2 ? &node : NULL,
10000                                        NULL));
10001             if (is_error(ret)) {
10002                 return ret;
10003             }
10004             if (arg1 && put_user_u32(cpu, arg1)) {
10005                 return -TARGET_EFAULT;
10006             }
10007             if (arg2 && put_user_u32(node, arg2)) {
10008                 return -TARGET_EFAULT;
10009             }
10010         }
10011         return ret;
10012     case TARGET_NR_sched_setparam:
10013         {
10014             struct sched_param *target_schp;
10015             struct sched_param schp;
10016 
10017             if (arg2 == 0) {
10018                 return -TARGET_EINVAL;
10019             }
10020             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10021                 return -TARGET_EFAULT;
10022             schp.sched_priority = tswap32(target_schp->sched_priority);
10023             unlock_user_struct(target_schp, arg2, 0);
10024             return get_errno(sched_setparam(arg1, &schp));
10025         }
10026     case TARGET_NR_sched_getparam:
10027         {
10028             struct sched_param *target_schp;
10029             struct sched_param schp;
10030 
10031             if (arg2 == 0) {
10032                 return -TARGET_EINVAL;
10033             }
10034             ret = get_errno(sched_getparam(arg1, &schp));
10035             if (!is_error(ret)) {
10036                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10037                     return -TARGET_EFAULT;
10038                 target_schp->sched_priority = tswap32(schp.sched_priority);
10039                 unlock_user_struct(target_schp, arg2, 1);
10040             }
10041         }
10042         return ret;
10043     case TARGET_NR_sched_setscheduler:
10044         {
10045             struct sched_param *target_schp;
10046             struct sched_param schp;
10047             if (arg3 == 0) {
10048                 return -TARGET_EINVAL;
10049             }
10050             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10051                 return -TARGET_EFAULT;
10052             schp.sched_priority = tswap32(target_schp->sched_priority);
10053             unlock_user_struct(target_schp, arg3, 0);
10054             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10055         }
10056     case TARGET_NR_sched_getscheduler:
10057         return get_errno(sched_getscheduler(arg1));
10058     case TARGET_NR_sched_yield:
10059         return get_errno(sched_yield());
10060     case TARGET_NR_sched_get_priority_max:
10061         return get_errno(sched_get_priority_max(arg1));
10062     case TARGET_NR_sched_get_priority_min:
10063         return get_errno(sched_get_priority_min(arg1));
10064 #ifdef TARGET_NR_sched_rr_get_interval
10065     case TARGET_NR_sched_rr_get_interval:
10066         {
10067             struct timespec ts;
10068             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10069             if (!is_error(ret)) {
10070                 ret = host_to_target_timespec(arg2, &ts);
10071             }
10072         }
10073         return ret;
10074 #endif
10075 #if defined(TARGET_NR_nanosleep)
10076     case TARGET_NR_nanosleep:
10077         {
10078             struct timespec req, rem;
10079             target_to_host_timespec(&req, arg1);
10080             ret = get_errno(safe_nanosleep(&req, &rem));
10081             if (is_error(ret) && arg2) {
10082                 host_to_target_timespec(arg2, &rem);
10083             }
10084         }
10085         return ret;
10086 #endif
10087     case TARGET_NR_prctl:
10088         switch (arg1) {
10089         case PR_GET_PDEATHSIG:
10090         {
10091             int deathsig;
10092             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10093             if (!is_error(ret) && arg2
10094                 && put_user_ual(deathsig, arg2)) {
10095                 return -TARGET_EFAULT;
10096             }
10097             return ret;
10098         }
10099 #ifdef PR_GET_NAME
10100         case PR_GET_NAME:
10101         {
10102             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10103             if (!name) {
10104                 return -TARGET_EFAULT;
10105             }
10106             ret = get_errno(prctl(arg1, (unsigned long)name,
10107                                   arg3, arg4, arg5));
10108             unlock_user(name, arg2, 16);
10109             return ret;
10110         }
10111         case PR_SET_NAME:
10112         {
10113             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10114             if (!name) {
10115                 return -TARGET_EFAULT;
10116             }
10117             ret = get_errno(prctl(arg1, (unsigned long)name,
10118                                   arg3, arg4, arg5));
10119             unlock_user(name, arg2, 0);
10120             return ret;
10121         }
10122 #endif
10123 #ifdef TARGET_MIPS
10124         case TARGET_PR_GET_FP_MODE:
10125         {
10126             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10127             ret = 0;
10128             if (env->CP0_Status & (1 << CP0St_FR)) {
10129                 ret |= TARGET_PR_FP_MODE_FR;
10130             }
10131             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10132                 ret |= TARGET_PR_FP_MODE_FRE;
10133             }
10134             return ret;
10135         }
10136         case TARGET_PR_SET_FP_MODE:
10137         {
10138             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10139             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10140             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10141             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10142             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10143 
10144             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10145                                             TARGET_PR_FP_MODE_FRE;
10146 
10147             /* If nothing to change, return right away, successfully.  */
10148             if (old_fr == new_fr && old_fre == new_fre) {
10149                 return 0;
10150             }
10151             /* Check the value is valid */
10152             if (arg2 & ~known_bits) {
10153                 return -TARGET_EOPNOTSUPP;
10154             }
10155             /* Setting FRE without FR is not supported.  */
10156             if (new_fre && !new_fr) {
10157                 return -TARGET_EOPNOTSUPP;
10158             }
10159             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10160                 /* FR1 is not supported */
10161                 return -TARGET_EOPNOTSUPP;
10162             }
10163             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10164                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10165                 /* cannot set FR=0 */
10166                 return -TARGET_EOPNOTSUPP;
10167             }
10168             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10169                 /* Cannot set FRE=1 */
10170                 return -TARGET_EOPNOTSUPP;
10171             }
10172 
10173             int i;
10174             fpr_t *fpr = env->active_fpu.fpr;
10175             for (i = 0; i < 32 ; i += 2) {
10176                 if (!old_fr && new_fr) {
10177                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10178                 } else if (old_fr && !new_fr) {
10179                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10180                 }
10181             }
10182 
10183             if (new_fr) {
10184                 env->CP0_Status |= (1 << CP0St_FR);
10185                 env->hflags |= MIPS_HFLAG_F64;
10186             } else {
10187                 env->CP0_Status &= ~(1 << CP0St_FR);
10188                 env->hflags &= ~MIPS_HFLAG_F64;
10189             }
10190             if (new_fre) {
10191                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10192                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10193                     env->hflags |= MIPS_HFLAG_FRE;
10194                 }
10195             } else {
10196                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10197                 env->hflags &= ~MIPS_HFLAG_FRE;
10198             }
10199 
10200             return 0;
10201         }
10202 #endif /* MIPS */
10203 #ifdef TARGET_AARCH64
10204         case TARGET_PR_SVE_SET_VL:
10205             /*
10206              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10207              * PR_SVE_VL_INHERIT.  Note the kernel definition
10208              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10209              * even though the current architectural maximum is VQ=16.
10210              */
10211             ret = -TARGET_EINVAL;
10212             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10213                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10214                 CPUARMState *env = cpu_env;
10215                 ARMCPU *cpu = env_archcpu(env);
10216                 uint32_t vq, old_vq;
10217 
10218                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10219                 vq = MAX(arg2 / 16, 1);
10220                 vq = MIN(vq, cpu->sve_max_vq);
10221 
10222                 if (vq < old_vq) {
10223                     aarch64_sve_narrow_vq(env, vq);
10224                 }
10225                 env->vfp.zcr_el[1] = vq - 1;
10226                 arm_rebuild_hflags(env);
10227                 ret = vq * 16;
10228             }
10229             return ret;
10230         case TARGET_PR_SVE_GET_VL:
10231             ret = -TARGET_EINVAL;
10232             {
10233                 ARMCPU *cpu = env_archcpu(cpu_env);
10234                 if (cpu_isar_feature(aa64_sve, cpu)) {
10235                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10236                 }
10237             }
10238             return ret;
10239         case TARGET_PR_PAC_RESET_KEYS:
10240             {
10241                 CPUARMState *env = cpu_env;
10242                 ARMCPU *cpu = env_archcpu(env);
10243 
10244                 if (arg3 || arg4 || arg5) {
10245                     return -TARGET_EINVAL;
10246                 }
10247                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10248                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10249                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10250                                TARGET_PR_PAC_APGAKEY);
10251                     int ret = 0;
10252                     Error *err = NULL;
10253 
10254                     if (arg2 == 0) {
10255                         arg2 = all;
10256                     } else if (arg2 & ~all) {
10257                         return -TARGET_EINVAL;
10258                     }
10259                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10260                         ret |= qemu_guest_getrandom(&env->keys.apia,
10261                                                     sizeof(ARMPACKey), &err);
10262                     }
10263                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10264                         ret |= qemu_guest_getrandom(&env->keys.apib,
10265                                                     sizeof(ARMPACKey), &err);
10266                     }
10267                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10268                         ret |= qemu_guest_getrandom(&env->keys.apda,
10269                                                     sizeof(ARMPACKey), &err);
10270                     }
10271                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10272                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10273                                                     sizeof(ARMPACKey), &err);
10274                     }
10275                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10276                         ret |= qemu_guest_getrandom(&env->keys.apga,
10277                                                     sizeof(ARMPACKey), &err);
10278                     }
10279                     if (ret != 0) {
10280                         /*
10281                          * Some unknown failure in the crypto.  The best
10282                          * we can do is log it and fail the syscall.
10283                          * The real syscall cannot fail this way.
10284                          */
10285                         qemu_log_mask(LOG_UNIMP,
10286                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10287                                       error_get_pretty(err));
10288                         error_free(err);
10289                         return -TARGET_EIO;
10290                     }
10291                     return 0;
10292                 }
10293             }
10294             return -TARGET_EINVAL;
10295 #endif /* AARCH64 */
10296         case PR_GET_SECCOMP:
10297         case PR_SET_SECCOMP:
10298             /* Disable seccomp to prevent the target disabling syscalls we
10299              * need. */
10300             return -TARGET_EINVAL;
10301         default:
10302             /* Most prctl options have no pointer arguments */
10303             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10304         }
10305         break;
10306 #ifdef TARGET_NR_arch_prctl
10307     case TARGET_NR_arch_prctl:
10308 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10309         return do_arch_prctl(cpu_env, arg1, arg2);
10310 #else
10311 #error unreachable
10312 #endif
10313 #endif
10314 #ifdef TARGET_NR_pread64
10315     case TARGET_NR_pread64:
10316         if (regpairs_aligned(cpu_env, num)) {
10317             arg4 = arg5;
10318             arg5 = arg6;
10319         }
10320         if (arg2 == 0 && arg3 == 0) {
10321             /* Special-case NULL buffer and zero length, which should succeed */
10322             p = 0;
10323         } else {
10324             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10325             if (!p) {
10326                 return -TARGET_EFAULT;
10327             }
10328         }
10329         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10330         unlock_user(p, arg2, ret);
10331         return ret;
10332     case TARGET_NR_pwrite64:
10333         if (regpairs_aligned(cpu_env, num)) {
10334             arg4 = arg5;
10335             arg5 = arg6;
10336         }
10337         if (arg2 == 0 && arg3 == 0) {
10338             /* Special-case NULL buffer and zero length, which should succeed */
10339             p = 0;
10340         } else {
10341             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10342             if (!p) {
10343                 return -TARGET_EFAULT;
10344             }
10345         }
10346         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10347         unlock_user(p, arg2, 0);
10348         return ret;
10349 #endif
10350     case TARGET_NR_getcwd:
10351         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10352             return -TARGET_EFAULT;
10353         ret = get_errno(sys_getcwd1(p, arg2));
10354         unlock_user(p, arg1, ret);
10355         return ret;
10356     case TARGET_NR_capget:
10357     case TARGET_NR_capset:
10358     {
10359         struct target_user_cap_header *target_header;
10360         struct target_user_cap_data *target_data = NULL;
10361         struct __user_cap_header_struct header;
10362         struct __user_cap_data_struct data[2];
10363         struct __user_cap_data_struct *dataptr = NULL;
10364         int i, target_datalen;
10365         int data_items = 1;
10366 
10367         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10368             return -TARGET_EFAULT;
10369         }
10370         header.version = tswap32(target_header->version);
10371         header.pid = tswap32(target_header->pid);
10372 
10373         if (header.version != _LINUX_CAPABILITY_VERSION) {
10374             /* Version 2 and up takes pointer to two user_data structs */
10375             data_items = 2;
10376         }
10377 
10378         target_datalen = sizeof(*target_data) * data_items;
10379 
10380         if (arg2) {
10381             if (num == TARGET_NR_capget) {
10382                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10383             } else {
10384                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10385             }
10386             if (!target_data) {
10387                 unlock_user_struct(target_header, arg1, 0);
10388                 return -TARGET_EFAULT;
10389             }
10390 
10391             if (num == TARGET_NR_capset) {
10392                 for (i = 0; i < data_items; i++) {
10393                     data[i].effective = tswap32(target_data[i].effective);
10394                     data[i].permitted = tswap32(target_data[i].permitted);
10395                     data[i].inheritable = tswap32(target_data[i].inheritable);
10396                 }
10397             }
10398 
10399             dataptr = data;
10400         }
10401 
10402         if (num == TARGET_NR_capget) {
10403             ret = get_errno(capget(&header, dataptr));
10404         } else {
10405             ret = get_errno(capset(&header, dataptr));
10406         }
10407 
10408         /* The kernel always updates version for both capget and capset */
10409         target_header->version = tswap32(header.version);
10410         unlock_user_struct(target_header, arg1, 1);
10411 
10412         if (arg2) {
10413             if (num == TARGET_NR_capget) {
10414                 for (i = 0; i < data_items; i++) {
10415                     target_data[i].effective = tswap32(data[i].effective);
10416                     target_data[i].permitted = tswap32(data[i].permitted);
10417                     target_data[i].inheritable = tswap32(data[i].inheritable);
10418                 }
10419                 unlock_user(target_data, arg2, target_datalen);
10420             } else {
10421                 unlock_user(target_data, arg2, 0);
10422             }
10423         }
10424         return ret;
10425     }
10426     case TARGET_NR_sigaltstack:
10427         return do_sigaltstack(arg1, arg2,
10428                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10429 
10430 #ifdef CONFIG_SENDFILE
10431 #ifdef TARGET_NR_sendfile
10432     case TARGET_NR_sendfile:
10433     {
10434         off_t *offp = NULL;
10435         off_t off;
10436         if (arg3) {
10437             ret = get_user_sal(off, arg3);
10438             if (is_error(ret)) {
10439                 return ret;
10440             }
10441             offp = &off;
10442         }
10443         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10444         if (!is_error(ret) && arg3) {
10445             abi_long ret2 = put_user_sal(off, arg3);
10446             if (is_error(ret2)) {
10447                 ret = ret2;
10448             }
10449         }
10450         return ret;
10451     }
10452 #endif
10453 #ifdef TARGET_NR_sendfile64
10454     case TARGET_NR_sendfile64:
10455     {
10456         off_t *offp = NULL;
10457         off_t off;
10458         if (arg3) {
10459             ret = get_user_s64(off, arg3);
10460             if (is_error(ret)) {
10461                 return ret;
10462             }
10463             offp = &off;
10464         }
10465         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10466         if (!is_error(ret) && arg3) {
10467             abi_long ret2 = put_user_s64(off, arg3);
10468             if (is_error(ret2)) {
10469                 ret = ret2;
10470             }
10471         }
10472         return ret;
10473     }
10474 #endif
10475 #endif
10476 #ifdef TARGET_NR_vfork
10477     case TARGET_NR_vfork:
10478         return get_errno(do_fork(cpu_env,
10479                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10480                          0, 0, 0, 0));
10481 #endif
10482 #ifdef TARGET_NR_ugetrlimit
10483     case TARGET_NR_ugetrlimit:
10484     {
10485 	struct rlimit rlim;
10486 	int resource = target_to_host_resource(arg1);
10487 	ret = get_errno(getrlimit(resource, &rlim));
10488 	if (!is_error(ret)) {
10489 	    struct target_rlimit *target_rlim;
10490             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10491                 return -TARGET_EFAULT;
10492 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10493 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10494             unlock_user_struct(target_rlim, arg2, 1);
10495 	}
10496         return ret;
10497     }
10498 #endif
10499 #ifdef TARGET_NR_truncate64
10500     case TARGET_NR_truncate64:
10501         if (!(p = lock_user_string(arg1)))
10502             return -TARGET_EFAULT;
10503 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10504         unlock_user(p, arg1, 0);
10505         return ret;
10506 #endif
10507 #ifdef TARGET_NR_ftruncate64
10508     case TARGET_NR_ftruncate64:
10509         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10510 #endif
10511 #ifdef TARGET_NR_stat64
10512     case TARGET_NR_stat64:
10513         if (!(p = lock_user_string(arg1))) {
10514             return -TARGET_EFAULT;
10515         }
10516         ret = get_errno(stat(path(p), &st));
10517         unlock_user(p, arg1, 0);
10518         if (!is_error(ret))
10519             ret = host_to_target_stat64(cpu_env, arg2, &st);
10520         return ret;
10521 #endif
10522 #ifdef TARGET_NR_lstat64
10523     case TARGET_NR_lstat64:
10524         if (!(p = lock_user_string(arg1))) {
10525             return -TARGET_EFAULT;
10526         }
10527         ret = get_errno(lstat(path(p), &st));
10528         unlock_user(p, arg1, 0);
10529         if (!is_error(ret))
10530             ret = host_to_target_stat64(cpu_env, arg2, &st);
10531         return ret;
10532 #endif
10533 #ifdef TARGET_NR_fstat64
10534     case TARGET_NR_fstat64:
10535         ret = get_errno(fstat(arg1, &st));
10536         if (!is_error(ret))
10537             ret = host_to_target_stat64(cpu_env, arg2, &st);
10538         return ret;
10539 #endif
10540 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10541 #ifdef TARGET_NR_fstatat64
10542     case TARGET_NR_fstatat64:
10543 #endif
10544 #ifdef TARGET_NR_newfstatat
10545     case TARGET_NR_newfstatat:
10546 #endif
10547         if (!(p = lock_user_string(arg2))) {
10548             return -TARGET_EFAULT;
10549         }
10550         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10551         unlock_user(p, arg2, 0);
10552         if (!is_error(ret))
10553             ret = host_to_target_stat64(cpu_env, arg3, &st);
10554         return ret;
10555 #endif
10556 #if defined(TARGET_NR_statx)
10557     case TARGET_NR_statx:
10558         {
10559             struct target_statx *target_stx;
10560             int dirfd = arg1;
10561             int flags = arg3;
10562 
10563             p = lock_user_string(arg2);
10564             if (p == NULL) {
10565                 return -TARGET_EFAULT;
10566             }
10567 #if defined(__NR_statx)
10568             {
10569                 /*
10570                  * It is assumed that struct statx is architecture independent.
10571                  */
10572                 struct target_statx host_stx;
10573                 int mask = arg4;
10574 
10575                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10576                 if (!is_error(ret)) {
10577                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10578                         unlock_user(p, arg2, 0);
10579                         return -TARGET_EFAULT;
10580                     }
10581                 }
10582 
10583                 if (ret != -TARGET_ENOSYS) {
10584                     unlock_user(p, arg2, 0);
10585                     return ret;
10586                 }
10587             }
10588 #endif
10589             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10590             unlock_user(p, arg2, 0);
10591 
10592             if (!is_error(ret)) {
10593                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10594                     return -TARGET_EFAULT;
10595                 }
10596                 memset(target_stx, 0, sizeof(*target_stx));
10597                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10598                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10599                 __put_user(st.st_ino, &target_stx->stx_ino);
10600                 __put_user(st.st_mode, &target_stx->stx_mode);
10601                 __put_user(st.st_uid, &target_stx->stx_uid);
10602                 __put_user(st.st_gid, &target_stx->stx_gid);
10603                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10604                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10605                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10606                 __put_user(st.st_size, &target_stx->stx_size);
10607                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10608                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10609                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10610                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10611                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10612                 unlock_user_struct(target_stx, arg5, 1);
10613             }
10614         }
10615         return ret;
10616 #endif
10617 #ifdef TARGET_NR_lchown
10618     case TARGET_NR_lchown:
10619         if (!(p = lock_user_string(arg1)))
10620             return -TARGET_EFAULT;
10621         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10622         unlock_user(p, arg1, 0);
10623         return ret;
10624 #endif
10625 #ifdef TARGET_NR_getuid
10626     case TARGET_NR_getuid:
10627         return get_errno(high2lowuid(getuid()));
10628 #endif
10629 #ifdef TARGET_NR_getgid
10630     case TARGET_NR_getgid:
10631         return get_errno(high2lowgid(getgid()));
10632 #endif
10633 #ifdef TARGET_NR_geteuid
10634     case TARGET_NR_geteuid:
10635         return get_errno(high2lowuid(geteuid()));
10636 #endif
10637 #ifdef TARGET_NR_getegid
10638     case TARGET_NR_getegid:
10639         return get_errno(high2lowgid(getegid()));
10640 #endif
10641     case TARGET_NR_setreuid:
10642         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10643     case TARGET_NR_setregid:
10644         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10645     case TARGET_NR_getgroups:
10646         {
10647             int gidsetsize = arg1;
10648             target_id *target_grouplist;
10649             gid_t *grouplist;
10650             int i;
10651 
10652             grouplist = alloca(gidsetsize * sizeof(gid_t));
10653             ret = get_errno(getgroups(gidsetsize, grouplist));
10654             if (gidsetsize == 0)
10655                 return ret;
10656             if (!is_error(ret)) {
10657                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10658                 if (!target_grouplist)
10659                     return -TARGET_EFAULT;
10660                 for(i = 0;i < ret; i++)
10661                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10662                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10663             }
10664         }
10665         return ret;
10666     case TARGET_NR_setgroups:
10667         {
10668             int gidsetsize = arg1;
10669             target_id *target_grouplist;
10670             gid_t *grouplist = NULL;
10671             int i;
10672             if (gidsetsize) {
10673                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10674                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10675                 if (!target_grouplist) {
10676                     return -TARGET_EFAULT;
10677                 }
10678                 for (i = 0; i < gidsetsize; i++) {
10679                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10680                 }
10681                 unlock_user(target_grouplist, arg2, 0);
10682             }
10683             return get_errno(setgroups(gidsetsize, grouplist));
10684         }
10685     case TARGET_NR_fchown:
10686         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10687 #if defined(TARGET_NR_fchownat)
10688     case TARGET_NR_fchownat:
10689         if (!(p = lock_user_string(arg2)))
10690             return -TARGET_EFAULT;
10691         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10692                                  low2highgid(arg4), arg5));
10693         unlock_user(p, arg2, 0);
10694         return ret;
10695 #endif
10696 #ifdef TARGET_NR_setresuid
10697     case TARGET_NR_setresuid:
10698         return get_errno(sys_setresuid(low2highuid(arg1),
10699                                        low2highuid(arg2),
10700                                        low2highuid(arg3)));
10701 #endif
10702 #ifdef TARGET_NR_getresuid
10703     case TARGET_NR_getresuid:
10704         {
10705             uid_t ruid, euid, suid;
10706             ret = get_errno(getresuid(&ruid, &euid, &suid));
10707             if (!is_error(ret)) {
10708                 if (put_user_id(high2lowuid(ruid), arg1)
10709                     || put_user_id(high2lowuid(euid), arg2)
10710                     || put_user_id(high2lowuid(suid), arg3))
10711                     return -TARGET_EFAULT;
10712             }
10713         }
10714         return ret;
10715 #endif
10716 #ifdef TARGET_NR_getresgid
10717     case TARGET_NR_setresgid:
10718         return get_errno(sys_setresgid(low2highgid(arg1),
10719                                        low2highgid(arg2),
10720                                        low2highgid(arg3)));
10721 #endif
10722 #ifdef TARGET_NR_getresgid
10723     case TARGET_NR_getresgid:
10724         {
10725             gid_t rgid, egid, sgid;
10726             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10727             if (!is_error(ret)) {
10728                 if (put_user_id(high2lowgid(rgid), arg1)
10729                     || put_user_id(high2lowgid(egid), arg2)
10730                     || put_user_id(high2lowgid(sgid), arg3))
10731                     return -TARGET_EFAULT;
10732             }
10733         }
10734         return ret;
10735 #endif
10736 #ifdef TARGET_NR_chown
10737     case TARGET_NR_chown:
10738         if (!(p = lock_user_string(arg1)))
10739             return -TARGET_EFAULT;
10740         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10741         unlock_user(p, arg1, 0);
10742         return ret;
10743 #endif
10744     case TARGET_NR_setuid:
10745         return get_errno(sys_setuid(low2highuid(arg1)));
10746     case TARGET_NR_setgid:
10747         return get_errno(sys_setgid(low2highgid(arg1)));
10748     case TARGET_NR_setfsuid:
10749         return get_errno(setfsuid(arg1));
10750     case TARGET_NR_setfsgid:
10751         return get_errno(setfsgid(arg1));
10752 
10753 #ifdef TARGET_NR_lchown32
10754     case TARGET_NR_lchown32:
10755         if (!(p = lock_user_string(arg1)))
10756             return -TARGET_EFAULT;
10757         ret = get_errno(lchown(p, arg2, arg3));
10758         unlock_user(p, arg1, 0);
10759         return ret;
10760 #endif
10761 #ifdef TARGET_NR_getuid32
10762     case TARGET_NR_getuid32:
10763         return get_errno(getuid());
10764 #endif
10765 
10766 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10767    /* Alpha specific */
10768     case TARGET_NR_getxuid:
10769          {
10770             uid_t euid;
10771             euid=geteuid();
10772             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10773          }
10774         return get_errno(getuid());
10775 #endif
10776 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10777    /* Alpha specific */
10778     case TARGET_NR_getxgid:
10779          {
10780             uid_t egid;
10781             egid=getegid();
10782             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10783          }
10784         return get_errno(getgid());
10785 #endif
10786 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10787     /* Alpha specific */
10788     case TARGET_NR_osf_getsysinfo:
10789         ret = -TARGET_EOPNOTSUPP;
10790         switch (arg1) {
10791           case TARGET_GSI_IEEE_FP_CONTROL:
10792             {
10793                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10794                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10795 
10796                 swcr &= ~SWCR_STATUS_MASK;
10797                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10798 
10799                 if (put_user_u64 (swcr, arg2))
10800                         return -TARGET_EFAULT;
10801                 ret = 0;
10802             }
10803             break;
10804 
10805           /* case GSI_IEEE_STATE_AT_SIGNAL:
10806              -- Not implemented in linux kernel.
10807              case GSI_UACPROC:
10808              -- Retrieves current unaligned access state; not much used.
10809              case GSI_PROC_TYPE:
10810              -- Retrieves implver information; surely not used.
10811              case GSI_GET_HWRPB:
10812              -- Grabs a copy of the HWRPB; surely not used.
10813           */
10814         }
10815         return ret;
10816 #endif
10817 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10818     /* Alpha specific */
10819     case TARGET_NR_osf_setsysinfo:
10820         ret = -TARGET_EOPNOTSUPP;
10821         switch (arg1) {
10822           case TARGET_SSI_IEEE_FP_CONTROL:
10823             {
10824                 uint64_t swcr, fpcr;
10825 
10826                 if (get_user_u64 (swcr, arg2)) {
10827                     return -TARGET_EFAULT;
10828                 }
10829 
10830                 /*
10831                  * The kernel calls swcr_update_status to update the
10832                  * status bits from the fpcr at every point that it
10833                  * could be queried.  Therefore, we store the status
10834                  * bits only in FPCR.
10835                  */
10836                 ((CPUAlphaState *)cpu_env)->swcr
10837                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10838 
10839                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10840                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10841                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10842                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10843                 ret = 0;
10844             }
10845             break;
10846 
10847           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10848             {
10849                 uint64_t exc, fpcr, fex;
10850 
10851                 if (get_user_u64(exc, arg2)) {
10852                     return -TARGET_EFAULT;
10853                 }
10854                 exc &= SWCR_STATUS_MASK;
10855                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10856 
10857                 /* Old exceptions are not signaled.  */
10858                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10859                 fex = exc & ~fex;
10860                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10861                 fex &= ((CPUArchState *)cpu_env)->swcr;
10862 
10863                 /* Update the hardware fpcr.  */
10864                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10865                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10866 
10867                 if (fex) {
10868                     int si_code = TARGET_FPE_FLTUNK;
10869                     target_siginfo_t info;
10870 
10871                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10872                         si_code = TARGET_FPE_FLTUND;
10873                     }
10874                     if (fex & SWCR_TRAP_ENABLE_INE) {
10875                         si_code = TARGET_FPE_FLTRES;
10876                     }
10877                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10878                         si_code = TARGET_FPE_FLTUND;
10879                     }
10880                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10881                         si_code = TARGET_FPE_FLTOVF;
10882                     }
10883                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10884                         si_code = TARGET_FPE_FLTDIV;
10885                     }
10886                     if (fex & SWCR_TRAP_ENABLE_INV) {
10887                         si_code = TARGET_FPE_FLTINV;
10888                     }
10889 
10890                     info.si_signo = SIGFPE;
10891                     info.si_errno = 0;
10892                     info.si_code = si_code;
10893                     info._sifields._sigfault._addr
10894                         = ((CPUArchState *)cpu_env)->pc;
10895                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10896                                  QEMU_SI_FAULT, &info);
10897                 }
10898                 ret = 0;
10899             }
10900             break;
10901 
10902           /* case SSI_NVPAIRS:
10903              -- Used with SSIN_UACPROC to enable unaligned accesses.
10904              case SSI_IEEE_STATE_AT_SIGNAL:
10905              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10906              -- Not implemented in linux kernel
10907           */
10908         }
10909         return ret;
10910 #endif
10911 #ifdef TARGET_NR_osf_sigprocmask
10912     /* Alpha specific.  */
10913     case TARGET_NR_osf_sigprocmask:
10914         {
10915             abi_ulong mask;
10916             int how;
10917             sigset_t set, oldset;
10918 
10919             switch(arg1) {
10920             case TARGET_SIG_BLOCK:
10921                 how = SIG_BLOCK;
10922                 break;
10923             case TARGET_SIG_UNBLOCK:
10924                 how = SIG_UNBLOCK;
10925                 break;
10926             case TARGET_SIG_SETMASK:
10927                 how = SIG_SETMASK;
10928                 break;
10929             default:
10930                 return -TARGET_EINVAL;
10931             }
10932             mask = arg2;
10933             target_to_host_old_sigset(&set, &mask);
10934             ret = do_sigprocmask(how, &set, &oldset);
10935             if (!ret) {
10936                 host_to_target_old_sigset(&mask, &oldset);
10937                 ret = mask;
10938             }
10939         }
10940         return ret;
10941 #endif
10942 
10943 #ifdef TARGET_NR_getgid32
10944     case TARGET_NR_getgid32:
10945         return get_errno(getgid());
10946 #endif
10947 #ifdef TARGET_NR_geteuid32
10948     case TARGET_NR_geteuid32:
10949         return get_errno(geteuid());
10950 #endif
10951 #ifdef TARGET_NR_getegid32
10952     case TARGET_NR_getegid32:
10953         return get_errno(getegid());
10954 #endif
10955 #ifdef TARGET_NR_setreuid32
10956     case TARGET_NR_setreuid32:
10957         return get_errno(setreuid(arg1, arg2));
10958 #endif
10959 #ifdef TARGET_NR_setregid32
10960     case TARGET_NR_setregid32:
10961         return get_errno(setregid(arg1, arg2));
10962 #endif
10963 #ifdef TARGET_NR_getgroups32
10964     case TARGET_NR_getgroups32:
10965         {
10966             int gidsetsize = arg1;
10967             uint32_t *target_grouplist;
10968             gid_t *grouplist;
10969             int i;
10970 
10971             grouplist = alloca(gidsetsize * sizeof(gid_t));
10972             ret = get_errno(getgroups(gidsetsize, grouplist));
10973             if (gidsetsize == 0)
10974                 return ret;
10975             if (!is_error(ret)) {
10976                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10977                 if (!target_grouplist) {
10978                     return -TARGET_EFAULT;
10979                 }
10980                 for(i = 0;i < ret; i++)
10981                     target_grouplist[i] = tswap32(grouplist[i]);
10982                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10983             }
10984         }
10985         return ret;
10986 #endif
10987 #ifdef TARGET_NR_setgroups32
10988     case TARGET_NR_setgroups32:
10989         {
10990             int gidsetsize = arg1;
10991             uint32_t *target_grouplist;
10992             gid_t *grouplist;
10993             int i;
10994 
10995             grouplist = alloca(gidsetsize * sizeof(gid_t));
10996             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10997             if (!target_grouplist) {
10998                 return -TARGET_EFAULT;
10999             }
11000             for(i = 0;i < gidsetsize; i++)
11001                 grouplist[i] = tswap32(target_grouplist[i]);
11002             unlock_user(target_grouplist, arg2, 0);
11003             return get_errno(setgroups(gidsetsize, grouplist));
11004         }
11005 #endif
11006 #ifdef TARGET_NR_fchown32
11007     case TARGET_NR_fchown32:
11008         return get_errno(fchown(arg1, arg2, arg3));
11009 #endif
11010 #ifdef TARGET_NR_setresuid32
11011     case TARGET_NR_setresuid32:
11012         return get_errno(sys_setresuid(arg1, arg2, arg3));
11013 #endif
11014 #ifdef TARGET_NR_getresuid32
11015     case TARGET_NR_getresuid32:
11016         {
11017             uid_t ruid, euid, suid;
11018             ret = get_errno(getresuid(&ruid, &euid, &suid));
11019             if (!is_error(ret)) {
11020                 if (put_user_u32(ruid, arg1)
11021                     || put_user_u32(euid, arg2)
11022                     || put_user_u32(suid, arg3))
11023                     return -TARGET_EFAULT;
11024             }
11025         }
11026         return ret;
11027 #endif
11028 #ifdef TARGET_NR_setresgid32
11029     case TARGET_NR_setresgid32:
11030         return get_errno(sys_setresgid(arg1, arg2, arg3));
11031 #endif
11032 #ifdef TARGET_NR_getresgid32
11033     case TARGET_NR_getresgid32:
11034         {
11035             gid_t rgid, egid, sgid;
11036             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11037             if (!is_error(ret)) {
11038                 if (put_user_u32(rgid, arg1)
11039                     || put_user_u32(egid, arg2)
11040                     || put_user_u32(sgid, arg3))
11041                     return -TARGET_EFAULT;
11042             }
11043         }
11044         return ret;
11045 #endif
11046 #ifdef TARGET_NR_chown32
11047     case TARGET_NR_chown32:
11048         if (!(p = lock_user_string(arg1)))
11049             return -TARGET_EFAULT;
11050         ret = get_errno(chown(p, arg2, arg3));
11051         unlock_user(p, arg1, 0);
11052         return ret;
11053 #endif
11054 #ifdef TARGET_NR_setuid32
11055     case TARGET_NR_setuid32:
11056         return get_errno(sys_setuid(arg1));
11057 #endif
11058 #ifdef TARGET_NR_setgid32
11059     case TARGET_NR_setgid32:
11060         return get_errno(sys_setgid(arg1));
11061 #endif
11062 #ifdef TARGET_NR_setfsuid32
11063     case TARGET_NR_setfsuid32:
11064         return get_errno(setfsuid(arg1));
11065 #endif
11066 #ifdef TARGET_NR_setfsgid32
11067     case TARGET_NR_setfsgid32:
11068         return get_errno(setfsgid(arg1));
11069 #endif
11070 #ifdef TARGET_NR_mincore
11071     case TARGET_NR_mincore:
11072         {
11073             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11074             if (!a) {
11075                 return -TARGET_ENOMEM;
11076             }
11077             p = lock_user_string(arg3);
11078             if (!p) {
11079                 ret = -TARGET_EFAULT;
11080             } else {
11081                 ret = get_errno(mincore(a, arg2, p));
11082                 unlock_user(p, arg3, ret);
11083             }
11084             unlock_user(a, arg1, 0);
11085         }
11086         return ret;
11087 #endif
11088 #ifdef TARGET_NR_arm_fadvise64_64
11089     case TARGET_NR_arm_fadvise64_64:
11090         /* arm_fadvise64_64 looks like fadvise64_64 but
11091          * with different argument order: fd, advice, offset, len
11092          * rather than the usual fd, offset, len, advice.
11093          * Note that offset and len are both 64-bit so appear as
11094          * pairs of 32-bit registers.
11095          */
11096         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11097                             target_offset64(arg5, arg6), arg2);
11098         return -host_to_target_errno(ret);
11099 #endif
11100 
11101 #if TARGET_ABI_BITS == 32
11102 
11103 #ifdef TARGET_NR_fadvise64_64
11104     case TARGET_NR_fadvise64_64:
11105 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11106         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11107         ret = arg2;
11108         arg2 = arg3;
11109         arg3 = arg4;
11110         arg4 = arg5;
11111         arg5 = arg6;
11112         arg6 = ret;
11113 #else
11114         /* 6 args: fd, offset (high, low), len (high, low), advice */
11115         if (regpairs_aligned(cpu_env, num)) {
11116             /* offset is in (3,4), len in (5,6) and advice in 7 */
11117             arg2 = arg3;
11118             arg3 = arg4;
11119             arg4 = arg5;
11120             arg5 = arg6;
11121             arg6 = arg7;
11122         }
11123 #endif
11124         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11125                             target_offset64(arg4, arg5), arg6);
11126         return -host_to_target_errno(ret);
11127 #endif
11128 
11129 #ifdef TARGET_NR_fadvise64
11130     case TARGET_NR_fadvise64:
11131         /* 5 args: fd, offset (high, low), len, advice */
11132         if (regpairs_aligned(cpu_env, num)) {
11133             /* offset is in (3,4), len in 5 and advice in 6 */
11134             arg2 = arg3;
11135             arg3 = arg4;
11136             arg4 = arg5;
11137             arg5 = arg6;
11138         }
11139         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11140         return -host_to_target_errno(ret);
11141 #endif
11142 
11143 #else /* not a 32-bit ABI */
11144 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11145 #ifdef TARGET_NR_fadvise64_64
11146     case TARGET_NR_fadvise64_64:
11147 #endif
11148 #ifdef TARGET_NR_fadvise64
11149     case TARGET_NR_fadvise64:
11150 #endif
11151 #ifdef TARGET_S390X
11152         switch (arg4) {
11153         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11154         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11155         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11156         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11157         default: break;
11158         }
11159 #endif
11160         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11161 #endif
11162 #endif /* end of 64-bit ABI fadvise handling */
11163 
11164 #ifdef TARGET_NR_madvise
11165     case TARGET_NR_madvise:
11166         /* A straight passthrough may not be safe because qemu sometimes
11167            turns private file-backed mappings into anonymous mappings.
11168            This will break MADV_DONTNEED.
11169            This is a hint, so ignoring and returning success is ok.  */
11170         return 0;
11171 #endif
11172 #if TARGET_ABI_BITS == 32
11173     case TARGET_NR_fcntl64:
11174     {
11175 	int cmd;
11176 	struct flock64 fl;
11177         from_flock64_fn *copyfrom = copy_from_user_flock64;
11178         to_flock64_fn *copyto = copy_to_user_flock64;
11179 
11180 #ifdef TARGET_ARM
11181         if (!((CPUARMState *)cpu_env)->eabi) {
11182             copyfrom = copy_from_user_oabi_flock64;
11183             copyto = copy_to_user_oabi_flock64;
11184         }
11185 #endif
11186 
11187 	cmd = target_to_host_fcntl_cmd(arg2);
11188         if (cmd == -TARGET_EINVAL) {
11189             return cmd;
11190         }
11191 
11192         switch(arg2) {
11193         case TARGET_F_GETLK64:
11194             ret = copyfrom(&fl, arg3);
11195             if (ret) {
11196                 break;
11197             }
11198             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11199             if (ret == 0) {
11200                 ret = copyto(arg3, &fl);
11201             }
11202 	    break;
11203 
11204         case TARGET_F_SETLK64:
11205         case TARGET_F_SETLKW64:
11206             ret = copyfrom(&fl, arg3);
11207             if (ret) {
11208                 break;
11209             }
11210             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11211 	    break;
11212         default:
11213             ret = do_fcntl(arg1, arg2, arg3);
11214             break;
11215         }
11216         return ret;
11217     }
11218 #endif
11219 #ifdef TARGET_NR_cacheflush
11220     case TARGET_NR_cacheflush:
11221         /* self-modifying code is handled automatically, so nothing needed */
11222         return 0;
11223 #endif
11224 #ifdef TARGET_NR_getpagesize
11225     case TARGET_NR_getpagesize:
11226         return TARGET_PAGE_SIZE;
11227 #endif
11228     case TARGET_NR_gettid:
11229         return get_errno(sys_gettid());
11230 #ifdef TARGET_NR_readahead
11231     case TARGET_NR_readahead:
11232 #if TARGET_ABI_BITS == 32
11233         if (regpairs_aligned(cpu_env, num)) {
11234             arg2 = arg3;
11235             arg3 = arg4;
11236             arg4 = arg5;
11237         }
11238         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11239 #else
11240         ret = get_errno(readahead(arg1, arg2, arg3));
11241 #endif
11242         return ret;
11243 #endif
11244 #ifdef CONFIG_ATTR
11245 #ifdef TARGET_NR_setxattr
11246     case TARGET_NR_listxattr:
11247     case TARGET_NR_llistxattr:
11248     {
11249         void *p, *b = 0;
11250         if (arg2) {
11251             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11252             if (!b) {
11253                 return -TARGET_EFAULT;
11254             }
11255         }
11256         p = lock_user_string(arg1);
11257         if (p) {
11258             if (num == TARGET_NR_listxattr) {
11259                 ret = get_errno(listxattr(p, b, arg3));
11260             } else {
11261                 ret = get_errno(llistxattr(p, b, arg3));
11262             }
11263         } else {
11264             ret = -TARGET_EFAULT;
11265         }
11266         unlock_user(p, arg1, 0);
11267         unlock_user(b, arg2, arg3);
11268         return ret;
11269     }
11270     case TARGET_NR_flistxattr:
11271     {
11272         void *b = 0;
11273         if (arg2) {
11274             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11275             if (!b) {
11276                 return -TARGET_EFAULT;
11277             }
11278         }
11279         ret = get_errno(flistxattr(arg1, b, arg3));
11280         unlock_user(b, arg2, arg3);
11281         return ret;
11282     }
11283     case TARGET_NR_setxattr:
11284     case TARGET_NR_lsetxattr:
11285         {
11286             void *p, *n, *v = 0;
11287             if (arg3) {
11288                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11289                 if (!v) {
11290                     return -TARGET_EFAULT;
11291                 }
11292             }
11293             p = lock_user_string(arg1);
11294             n = lock_user_string(arg2);
11295             if (p && n) {
11296                 if (num == TARGET_NR_setxattr) {
11297                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11298                 } else {
11299                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11300                 }
11301             } else {
11302                 ret = -TARGET_EFAULT;
11303             }
11304             unlock_user(p, arg1, 0);
11305             unlock_user(n, arg2, 0);
11306             unlock_user(v, arg3, 0);
11307         }
11308         return ret;
11309     case TARGET_NR_fsetxattr:
11310         {
11311             void *n, *v = 0;
11312             if (arg3) {
11313                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11314                 if (!v) {
11315                     return -TARGET_EFAULT;
11316                 }
11317             }
11318             n = lock_user_string(arg2);
11319             if (n) {
11320                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11321             } else {
11322                 ret = -TARGET_EFAULT;
11323             }
11324             unlock_user(n, arg2, 0);
11325             unlock_user(v, arg3, 0);
11326         }
11327         return ret;
11328     case TARGET_NR_getxattr:
11329     case TARGET_NR_lgetxattr:
11330         {
11331             void *p, *n, *v = 0;
11332             if (arg3) {
11333                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11334                 if (!v) {
11335                     return -TARGET_EFAULT;
11336                 }
11337             }
11338             p = lock_user_string(arg1);
11339             n = lock_user_string(arg2);
11340             if (p && n) {
11341                 if (num == TARGET_NR_getxattr) {
11342                     ret = get_errno(getxattr(p, n, v, arg4));
11343                 } else {
11344                     ret = get_errno(lgetxattr(p, n, v, arg4));
11345                 }
11346             } else {
11347                 ret = -TARGET_EFAULT;
11348             }
11349             unlock_user(p, arg1, 0);
11350             unlock_user(n, arg2, 0);
11351             unlock_user(v, arg3, arg4);
11352         }
11353         return ret;
11354     case TARGET_NR_fgetxattr:
11355         {
11356             void *n, *v = 0;
11357             if (arg3) {
11358                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11359                 if (!v) {
11360                     return -TARGET_EFAULT;
11361                 }
11362             }
11363             n = lock_user_string(arg2);
11364             if (n) {
11365                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11366             } else {
11367                 ret = -TARGET_EFAULT;
11368             }
11369             unlock_user(n, arg2, 0);
11370             unlock_user(v, arg3, arg4);
11371         }
11372         return ret;
11373     case TARGET_NR_removexattr:
11374     case TARGET_NR_lremovexattr:
11375         {
11376             void *p, *n;
11377             p = lock_user_string(arg1);
11378             n = lock_user_string(arg2);
11379             if (p && n) {
11380                 if (num == TARGET_NR_removexattr) {
11381                     ret = get_errno(removexattr(p, n));
11382                 } else {
11383                     ret = get_errno(lremovexattr(p, n));
11384                 }
11385             } else {
11386                 ret = -TARGET_EFAULT;
11387             }
11388             unlock_user(p, arg1, 0);
11389             unlock_user(n, arg2, 0);
11390         }
11391         return ret;
11392     case TARGET_NR_fremovexattr:
11393         {
11394             void *n;
11395             n = lock_user_string(arg2);
11396             if (n) {
11397                 ret = get_errno(fremovexattr(arg1, n));
11398             } else {
11399                 ret = -TARGET_EFAULT;
11400             }
11401             unlock_user(n, arg2, 0);
11402         }
11403         return ret;
11404 #endif
11405 #endif /* CONFIG_ATTR */
11406 #ifdef TARGET_NR_set_thread_area
11407     case TARGET_NR_set_thread_area:
11408 #if defined(TARGET_MIPS)
11409       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11410       return 0;
11411 #elif defined(TARGET_CRIS)
11412       if (arg1 & 0xff)
11413           ret = -TARGET_EINVAL;
11414       else {
11415           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11416           ret = 0;
11417       }
11418       return ret;
11419 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11420       return do_set_thread_area(cpu_env, arg1);
11421 #elif defined(TARGET_M68K)
11422       {
11423           TaskState *ts = cpu->opaque;
11424           ts->tp_value = arg1;
11425           return 0;
11426       }
11427 #else
11428       return -TARGET_ENOSYS;
11429 #endif
11430 #endif
11431 #ifdef TARGET_NR_get_thread_area
11432     case TARGET_NR_get_thread_area:
11433 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11434         return do_get_thread_area(cpu_env, arg1);
11435 #elif defined(TARGET_M68K)
11436         {
11437             TaskState *ts = cpu->opaque;
11438             return ts->tp_value;
11439         }
11440 #else
11441         return -TARGET_ENOSYS;
11442 #endif
11443 #endif
11444 #ifdef TARGET_NR_getdomainname
11445     case TARGET_NR_getdomainname:
11446         return -TARGET_ENOSYS;
11447 #endif
11448 
11449 #ifdef TARGET_NR_clock_settime
11450     case TARGET_NR_clock_settime:
11451     {
11452         struct timespec ts;
11453 
11454         ret = target_to_host_timespec(&ts, arg2);
11455         if (!is_error(ret)) {
11456             ret = get_errno(clock_settime(arg1, &ts));
11457         }
11458         return ret;
11459     }
11460 #endif
11461 #ifdef TARGET_NR_clock_gettime
11462     case TARGET_NR_clock_gettime:
11463     {
11464         struct timespec ts;
11465         ret = get_errno(clock_gettime(arg1, &ts));
11466         if (!is_error(ret)) {
11467             ret = host_to_target_timespec(arg2, &ts);
11468         }
11469         return ret;
11470     }
11471 #endif
11472 #ifdef TARGET_NR_clock_getres
11473     case TARGET_NR_clock_getres:
11474     {
11475         struct timespec ts;
11476         ret = get_errno(clock_getres(arg1, &ts));
11477         if (!is_error(ret)) {
11478             host_to_target_timespec(arg2, &ts);
11479         }
11480         return ret;
11481     }
11482 #endif
11483 #ifdef TARGET_NR_clock_nanosleep
11484     case TARGET_NR_clock_nanosleep:
11485     {
11486         struct timespec ts;
11487         target_to_host_timespec(&ts, arg3);
11488         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11489                                              &ts, arg4 ? &ts : NULL));
11490         if (arg4)
11491             host_to_target_timespec(arg4, &ts);
11492 
11493 #if defined(TARGET_PPC)
11494         /* clock_nanosleep is odd in that it returns positive errno values.
11495          * On PPC, CR0 bit 3 should be set in such a situation. */
11496         if (ret && ret != -TARGET_ERESTARTSYS) {
11497             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11498         }
11499 #endif
11500         return ret;
11501     }
11502 #endif
11503 
11504 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11505     case TARGET_NR_set_tid_address:
11506         return get_errno(set_tid_address((int *)g2h(arg1)));
11507 #endif
11508 
11509     case TARGET_NR_tkill:
11510         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11511 
11512     case TARGET_NR_tgkill:
11513         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11514                          target_to_host_signal(arg3)));
11515 
11516 #ifdef TARGET_NR_set_robust_list
11517     case TARGET_NR_set_robust_list:
11518     case TARGET_NR_get_robust_list:
11519         /* The ABI for supporting robust futexes has userspace pass
11520          * the kernel a pointer to a linked list which is updated by
11521          * userspace after the syscall; the list is walked by the kernel
11522          * when the thread exits. Since the linked list in QEMU guest
11523          * memory isn't a valid linked list for the host and we have
11524          * no way to reliably intercept the thread-death event, we can't
11525          * support these. Silently return ENOSYS so that guest userspace
11526          * falls back to a non-robust futex implementation (which should
11527          * be OK except in the corner case of the guest crashing while
11528          * holding a mutex that is shared with another process via
11529          * shared memory).
11530          */
11531         return -TARGET_ENOSYS;
11532 #endif
11533 
11534 #if defined(TARGET_NR_utimensat)
11535     case TARGET_NR_utimensat:
11536         {
11537             struct timespec *tsp, ts[2];
11538             if (!arg3) {
11539                 tsp = NULL;
11540             } else {
11541                 target_to_host_timespec(ts, arg3);
11542                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11543                 tsp = ts;
11544             }
11545             if (!arg2)
11546                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11547             else {
11548                 if (!(p = lock_user_string(arg2))) {
11549                     return -TARGET_EFAULT;
11550                 }
11551                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11552                 unlock_user(p, arg2, 0);
11553             }
11554         }
11555         return ret;
11556 #endif
11557 #ifdef TARGET_NR_futex
11558     case TARGET_NR_futex:
11559         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11560 #endif
11561 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11562     case TARGET_NR_inotify_init:
11563         ret = get_errno(sys_inotify_init());
11564         if (ret >= 0) {
11565             fd_trans_register(ret, &target_inotify_trans);
11566         }
11567         return ret;
11568 #endif
11569 #ifdef CONFIG_INOTIFY1
11570 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11571     case TARGET_NR_inotify_init1:
11572         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11573                                           fcntl_flags_tbl)));
11574         if (ret >= 0) {
11575             fd_trans_register(ret, &target_inotify_trans);
11576         }
11577         return ret;
11578 #endif
11579 #endif
11580 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11581     case TARGET_NR_inotify_add_watch:
11582         p = lock_user_string(arg2);
11583         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11584         unlock_user(p, arg2, 0);
11585         return ret;
11586 #endif
11587 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11588     case TARGET_NR_inotify_rm_watch:
11589         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11590 #endif
11591 
11592 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11593     case TARGET_NR_mq_open:
11594         {
11595             struct mq_attr posix_mq_attr;
11596             struct mq_attr *pposix_mq_attr;
11597             int host_flags;
11598 
11599             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11600             pposix_mq_attr = NULL;
11601             if (arg4) {
11602                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11603                     return -TARGET_EFAULT;
11604                 }
11605                 pposix_mq_attr = &posix_mq_attr;
11606             }
11607             p = lock_user_string(arg1 - 1);
11608             if (!p) {
11609                 return -TARGET_EFAULT;
11610             }
11611             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11612             unlock_user (p, arg1, 0);
11613         }
11614         return ret;
11615 
11616     case TARGET_NR_mq_unlink:
11617         p = lock_user_string(arg1 - 1);
11618         if (!p) {
11619             return -TARGET_EFAULT;
11620         }
11621         ret = get_errno(mq_unlink(p));
11622         unlock_user (p, arg1, 0);
11623         return ret;
11624 
11625 #ifdef TARGET_NR_mq_timedsend
11626     case TARGET_NR_mq_timedsend:
11627         {
11628             struct timespec ts;
11629 
11630             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11631             if (arg5 != 0) {
11632                 target_to_host_timespec(&ts, arg5);
11633                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11634                 host_to_target_timespec(arg5, &ts);
11635             } else {
11636                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11637             }
11638             unlock_user (p, arg2, arg3);
11639         }
11640         return ret;
11641 #endif
11642 
11643 #ifdef TARGET_NR_mq_timedreceive
11644     case TARGET_NR_mq_timedreceive:
11645         {
11646             struct timespec ts;
11647             unsigned int prio;
11648 
11649             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11650             if (arg5 != 0) {
11651                 target_to_host_timespec(&ts, arg5);
11652                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11653                                                      &prio, &ts));
11654                 host_to_target_timespec(arg5, &ts);
11655             } else {
11656                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11657                                                      &prio, NULL));
11658             }
11659             unlock_user (p, arg2, arg3);
11660             if (arg4 != 0)
11661                 put_user_u32(prio, arg4);
11662         }
11663         return ret;
11664 #endif
11665 
11666     /* Not implemented for now... */
11667 /*     case TARGET_NR_mq_notify: */
11668 /*         break; */
11669 
11670     case TARGET_NR_mq_getsetattr:
11671         {
11672             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11673             ret = 0;
11674             if (arg2 != 0) {
11675                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11676                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11677                                            &posix_mq_attr_out));
11678             } else if (arg3 != 0) {
11679                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11680             }
11681             if (ret == 0 && arg3 != 0) {
11682                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11683             }
11684         }
11685         return ret;
11686 #endif
11687 
11688 #ifdef CONFIG_SPLICE
11689 #ifdef TARGET_NR_tee
11690     case TARGET_NR_tee:
11691         {
11692             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11693         }
11694         return ret;
11695 #endif
11696 #ifdef TARGET_NR_splice
11697     case TARGET_NR_splice:
11698         {
11699             loff_t loff_in, loff_out;
11700             loff_t *ploff_in = NULL, *ploff_out = NULL;
11701             if (arg2) {
11702                 if (get_user_u64(loff_in, arg2)) {
11703                     return -TARGET_EFAULT;
11704                 }
11705                 ploff_in = &loff_in;
11706             }
11707             if (arg4) {
11708                 if (get_user_u64(loff_out, arg4)) {
11709                     return -TARGET_EFAULT;
11710                 }
11711                 ploff_out = &loff_out;
11712             }
11713             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11714             if (arg2) {
11715                 if (put_user_u64(loff_in, arg2)) {
11716                     return -TARGET_EFAULT;
11717                 }
11718             }
11719             if (arg4) {
11720                 if (put_user_u64(loff_out, arg4)) {
11721                     return -TARGET_EFAULT;
11722                 }
11723             }
11724         }
11725         return ret;
11726 #endif
11727 #ifdef TARGET_NR_vmsplice
11728 	case TARGET_NR_vmsplice:
11729         {
11730             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11731             if (vec != NULL) {
11732                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11733                 unlock_iovec(vec, arg2, arg3, 0);
11734             } else {
11735                 ret = -host_to_target_errno(errno);
11736             }
11737         }
11738         return ret;
11739 #endif
11740 #endif /* CONFIG_SPLICE */
11741 #ifdef CONFIG_EVENTFD
11742 #if defined(TARGET_NR_eventfd)
11743     case TARGET_NR_eventfd:
11744         ret = get_errno(eventfd(arg1, 0));
11745         if (ret >= 0) {
11746             fd_trans_register(ret, &target_eventfd_trans);
11747         }
11748         return ret;
11749 #endif
11750 #if defined(TARGET_NR_eventfd2)
11751     case TARGET_NR_eventfd2:
11752     {
11753         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11754         if (arg2 & TARGET_O_NONBLOCK) {
11755             host_flags |= O_NONBLOCK;
11756         }
11757         if (arg2 & TARGET_O_CLOEXEC) {
11758             host_flags |= O_CLOEXEC;
11759         }
11760         ret = get_errno(eventfd(arg1, host_flags));
11761         if (ret >= 0) {
11762             fd_trans_register(ret, &target_eventfd_trans);
11763         }
11764         return ret;
11765     }
11766 #endif
11767 #endif /* CONFIG_EVENTFD  */
11768 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11769     case TARGET_NR_fallocate:
11770 #if TARGET_ABI_BITS == 32
11771         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11772                                   target_offset64(arg5, arg6)));
11773 #else
11774         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11775 #endif
11776         return ret;
11777 #endif
11778 #if defined(CONFIG_SYNC_FILE_RANGE)
11779 #if defined(TARGET_NR_sync_file_range)
11780     case TARGET_NR_sync_file_range:
11781 #if TARGET_ABI_BITS == 32
11782 #if defined(TARGET_MIPS)
11783         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11784                                         target_offset64(arg5, arg6), arg7));
11785 #else
11786         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11787                                         target_offset64(arg4, arg5), arg6));
11788 #endif /* !TARGET_MIPS */
11789 #else
11790         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11791 #endif
11792         return ret;
11793 #endif
11794 #if defined(TARGET_NR_sync_file_range2)
11795     case TARGET_NR_sync_file_range2:
11796         /* This is like sync_file_range but the arguments are reordered */
11797 #if TARGET_ABI_BITS == 32
11798         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11799                                         target_offset64(arg5, arg6), arg2));
11800 #else
11801         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11802 #endif
11803         return ret;
11804 #endif
11805 #endif
11806 #if defined(TARGET_NR_signalfd4)
11807     case TARGET_NR_signalfd4:
11808         return do_signalfd4(arg1, arg2, arg4);
11809 #endif
11810 #if defined(TARGET_NR_signalfd)
11811     case TARGET_NR_signalfd:
11812         return do_signalfd4(arg1, arg2, 0);
11813 #endif
11814 #if defined(CONFIG_EPOLL)
11815 #if defined(TARGET_NR_epoll_create)
11816     case TARGET_NR_epoll_create:
11817         return get_errno(epoll_create(arg1));
11818 #endif
11819 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11820     case TARGET_NR_epoll_create1:
11821         return get_errno(epoll_create1(arg1));
11822 #endif
11823 #if defined(TARGET_NR_epoll_ctl)
11824     case TARGET_NR_epoll_ctl:
11825     {
11826         struct epoll_event ep;
11827         struct epoll_event *epp = 0;
11828         if (arg4) {
11829             struct target_epoll_event *target_ep;
11830             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11831                 return -TARGET_EFAULT;
11832             }
11833             ep.events = tswap32(target_ep->events);
11834             /* The epoll_data_t union is just opaque data to the kernel,
11835              * so we transfer all 64 bits across and need not worry what
11836              * actual data type it is.
11837              */
11838             ep.data.u64 = tswap64(target_ep->data.u64);
11839             unlock_user_struct(target_ep, arg4, 0);
11840             epp = &ep;
11841         }
11842         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11843     }
11844 #endif
11845 
11846 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11847 #if defined(TARGET_NR_epoll_wait)
11848     case TARGET_NR_epoll_wait:
11849 #endif
11850 #if defined(TARGET_NR_epoll_pwait)
11851     case TARGET_NR_epoll_pwait:
11852 #endif
11853     {
11854         struct target_epoll_event *target_ep;
11855         struct epoll_event *ep;
11856         int epfd = arg1;
11857         int maxevents = arg3;
11858         int timeout = arg4;
11859 
11860         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11861             return -TARGET_EINVAL;
11862         }
11863 
11864         target_ep = lock_user(VERIFY_WRITE, arg2,
11865                               maxevents * sizeof(struct target_epoll_event), 1);
11866         if (!target_ep) {
11867             return -TARGET_EFAULT;
11868         }
11869 
11870         ep = g_try_new(struct epoll_event, maxevents);
11871         if (!ep) {
11872             unlock_user(target_ep, arg2, 0);
11873             return -TARGET_ENOMEM;
11874         }
11875 
11876         switch (num) {
11877 #if defined(TARGET_NR_epoll_pwait)
11878         case TARGET_NR_epoll_pwait:
11879         {
11880             target_sigset_t *target_set;
11881             sigset_t _set, *set = &_set;
11882 
11883             if (arg5) {
11884                 if (arg6 != sizeof(target_sigset_t)) {
11885                     ret = -TARGET_EINVAL;
11886                     break;
11887                 }
11888 
11889                 target_set = lock_user(VERIFY_READ, arg5,
11890                                        sizeof(target_sigset_t), 1);
11891                 if (!target_set) {
11892                     ret = -TARGET_EFAULT;
11893                     break;
11894                 }
11895                 target_to_host_sigset(set, target_set);
11896                 unlock_user(target_set, arg5, 0);
11897             } else {
11898                 set = NULL;
11899             }
11900 
11901             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11902                                              set, SIGSET_T_SIZE));
11903             break;
11904         }
11905 #endif
11906 #if defined(TARGET_NR_epoll_wait)
11907         case TARGET_NR_epoll_wait:
11908             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11909                                              NULL, 0));
11910             break;
11911 #endif
11912         default:
11913             ret = -TARGET_ENOSYS;
11914         }
11915         if (!is_error(ret)) {
11916             int i;
11917             for (i = 0; i < ret; i++) {
11918                 target_ep[i].events = tswap32(ep[i].events);
11919                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11920             }
11921             unlock_user(target_ep, arg2,
11922                         ret * sizeof(struct target_epoll_event));
11923         } else {
11924             unlock_user(target_ep, arg2, 0);
11925         }
11926         g_free(ep);
11927         return ret;
11928     }
11929 #endif
11930 #endif
11931 #ifdef TARGET_NR_prlimit64
11932     case TARGET_NR_prlimit64:
11933     {
11934         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11935         struct target_rlimit64 *target_rnew, *target_rold;
11936         struct host_rlimit64 rnew, rold, *rnewp = 0;
11937         int resource = target_to_host_resource(arg2);
11938 
11939         if (arg3 && (resource != RLIMIT_AS &&
11940                      resource != RLIMIT_DATA &&
11941                      resource != RLIMIT_STACK)) {
11942             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11943                 return -TARGET_EFAULT;
11944             }
11945             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11946             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11947             unlock_user_struct(target_rnew, arg3, 0);
11948             rnewp = &rnew;
11949         }
11950 
11951         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11952         if (!is_error(ret) && arg4) {
11953             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11954                 return -TARGET_EFAULT;
11955             }
11956             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11957             target_rold->rlim_max = tswap64(rold.rlim_max);
11958             unlock_user_struct(target_rold, arg4, 1);
11959         }
11960         return ret;
11961     }
11962 #endif
11963 #ifdef TARGET_NR_gethostname
11964     case TARGET_NR_gethostname:
11965     {
11966         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11967         if (name) {
11968             ret = get_errno(gethostname(name, arg2));
11969             unlock_user(name, arg1, arg2);
11970         } else {
11971             ret = -TARGET_EFAULT;
11972         }
11973         return ret;
11974     }
11975 #endif
11976 #ifdef TARGET_NR_atomic_cmpxchg_32
11977     case TARGET_NR_atomic_cmpxchg_32:
11978     {
11979         /* should use start_exclusive from main.c */
11980         abi_ulong mem_value;
11981         if (get_user_u32(mem_value, arg6)) {
11982             target_siginfo_t info;
11983             info.si_signo = SIGSEGV;
11984             info.si_errno = 0;
11985             info.si_code = TARGET_SEGV_MAPERR;
11986             info._sifields._sigfault._addr = arg6;
11987             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11988                          QEMU_SI_FAULT, &info);
11989             ret = 0xdeadbeef;
11990 
11991         }
11992         if (mem_value == arg2)
11993             put_user_u32(arg1, arg6);
11994         return mem_value;
11995     }
11996 #endif
11997 #ifdef TARGET_NR_atomic_barrier
11998     case TARGET_NR_atomic_barrier:
11999         /* Like the kernel implementation and the
12000            qemu arm barrier, no-op this? */
12001         return 0;
12002 #endif
12003 
12004 #ifdef TARGET_NR_timer_create
12005     case TARGET_NR_timer_create:
12006     {
12007         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12008 
12009         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12010 
12011         int clkid = arg1;
12012         int timer_index = next_free_host_timer();
12013 
12014         if (timer_index < 0) {
12015             ret = -TARGET_EAGAIN;
12016         } else {
12017             timer_t *phtimer = g_posix_timers  + timer_index;
12018 
12019             if (arg2) {
12020                 phost_sevp = &host_sevp;
12021                 ret = target_to_host_sigevent(phost_sevp, arg2);
12022                 if (ret != 0) {
12023                     return ret;
12024                 }
12025             }
12026 
12027             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12028             if (ret) {
12029                 phtimer = NULL;
12030             } else {
12031                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12032                     return -TARGET_EFAULT;
12033                 }
12034             }
12035         }
12036         return ret;
12037     }
12038 #endif
12039 
12040 #ifdef TARGET_NR_timer_settime
12041     case TARGET_NR_timer_settime:
12042     {
12043         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12044          * struct itimerspec * old_value */
12045         target_timer_t timerid = get_timer_id(arg1);
12046 
12047         if (timerid < 0) {
12048             ret = timerid;
12049         } else if (arg3 == 0) {
12050             ret = -TARGET_EINVAL;
12051         } else {
12052             timer_t htimer = g_posix_timers[timerid];
12053             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12054 
12055             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12056                 return -TARGET_EFAULT;
12057             }
12058             ret = get_errno(
12059                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12060             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12061                 return -TARGET_EFAULT;
12062             }
12063         }
12064         return ret;
12065     }
12066 #endif
12067 
12068 #ifdef TARGET_NR_timer_gettime
12069     case TARGET_NR_timer_gettime:
12070     {
12071         /* args: timer_t timerid, struct itimerspec *curr_value */
12072         target_timer_t timerid = get_timer_id(arg1);
12073 
12074         if (timerid < 0) {
12075             ret = timerid;
12076         } else if (!arg2) {
12077             ret = -TARGET_EFAULT;
12078         } else {
12079             timer_t htimer = g_posix_timers[timerid];
12080             struct itimerspec hspec;
12081             ret = get_errno(timer_gettime(htimer, &hspec));
12082 
12083             if (host_to_target_itimerspec(arg2, &hspec)) {
12084                 ret = -TARGET_EFAULT;
12085             }
12086         }
12087         return ret;
12088     }
12089 #endif
12090 
12091 #ifdef TARGET_NR_timer_getoverrun
12092     case TARGET_NR_timer_getoverrun:
12093     {
12094         /* args: timer_t timerid */
12095         target_timer_t timerid = get_timer_id(arg1);
12096 
12097         if (timerid < 0) {
12098             ret = timerid;
12099         } else {
12100             timer_t htimer = g_posix_timers[timerid];
12101             ret = get_errno(timer_getoverrun(htimer));
12102         }
12103         return ret;
12104     }
12105 #endif
12106 
12107 #ifdef TARGET_NR_timer_delete
12108     case TARGET_NR_timer_delete:
12109     {
12110         /* args: timer_t timerid */
12111         target_timer_t timerid = get_timer_id(arg1);
12112 
12113         if (timerid < 0) {
12114             ret = timerid;
12115         } else {
12116             timer_t htimer = g_posix_timers[timerid];
12117             ret = get_errno(timer_delete(htimer));
12118             g_posix_timers[timerid] = 0;
12119         }
12120         return ret;
12121     }
12122 #endif
12123 
12124 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12125     case TARGET_NR_timerfd_create:
12126         return get_errno(timerfd_create(arg1,
12127                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12128 #endif
12129 
12130 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12131     case TARGET_NR_timerfd_gettime:
12132         {
12133             struct itimerspec its_curr;
12134 
12135             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12136 
12137             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12138                 return -TARGET_EFAULT;
12139             }
12140         }
12141         return ret;
12142 #endif
12143 
12144 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12145     case TARGET_NR_timerfd_settime:
12146         {
12147             struct itimerspec its_new, its_old, *p_new;
12148 
12149             if (arg3) {
12150                 if (target_to_host_itimerspec(&its_new, arg3)) {
12151                     return -TARGET_EFAULT;
12152                 }
12153                 p_new = &its_new;
12154             } else {
12155                 p_new = NULL;
12156             }
12157 
12158             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12159 
12160             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12161                 return -TARGET_EFAULT;
12162             }
12163         }
12164         return ret;
12165 #endif
12166 
12167 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12168     case TARGET_NR_ioprio_get:
12169         return get_errno(ioprio_get(arg1, arg2));
12170 #endif
12171 
12172 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12173     case TARGET_NR_ioprio_set:
12174         return get_errno(ioprio_set(arg1, arg2, arg3));
12175 #endif
12176 
12177 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12178     case TARGET_NR_setns:
12179         return get_errno(setns(arg1, arg2));
12180 #endif
12181 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12182     case TARGET_NR_unshare:
12183         return get_errno(unshare(arg1));
12184 #endif
12185 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12186     case TARGET_NR_kcmp:
12187         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12188 #endif
12189 #ifdef TARGET_NR_swapcontext
12190     case TARGET_NR_swapcontext:
12191         /* PowerPC specific.  */
12192         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12193 #endif
12194 #ifdef TARGET_NR_memfd_create
12195     case TARGET_NR_memfd_create:
12196         p = lock_user_string(arg1);
12197         if (!p) {
12198             return -TARGET_EFAULT;
12199         }
12200         ret = get_errno(memfd_create(p, arg2));
12201         fd_trans_unregister(ret);
12202         unlock_user(p, arg1, 0);
12203         return ret;
12204 #endif
12205 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12206     case TARGET_NR_membarrier:
12207         return get_errno(membarrier(arg1, arg2));
12208 #endif
12209 
12210     default:
12211         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12212         return -TARGET_ENOSYS;
12213     }
12214     return ret;
12215 }
12216 
12217 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12218                     abi_long arg2, abi_long arg3, abi_long arg4,
12219                     abi_long arg5, abi_long arg6, abi_long arg7,
12220                     abi_long arg8)
12221 {
12222     CPUState *cpu = env_cpu(cpu_env);
12223     abi_long ret;
12224 
12225 #ifdef DEBUG_ERESTARTSYS
12226     /* Debug-only code for exercising the syscall-restart code paths
12227      * in the per-architecture cpu main loops: restart every syscall
12228      * the guest makes once before letting it through.
12229      */
12230     {
12231         static bool flag;
12232         flag = !flag;
12233         if (flag) {
12234             return -TARGET_ERESTARTSYS;
12235         }
12236     }
12237 #endif
12238 
12239     record_syscall_start(cpu, num, arg1,
12240                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12241 
12242     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12243         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12244     }
12245 
12246     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12247                       arg5, arg6, arg7, arg8);
12248 
12249     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12250         print_syscall_ret(num, ret);
12251     }
12252 
12253     record_syscall_return(cpu, num, ret);
12254     return ret;
12255 }
12256