xref: /openbmc/qemu/linux-user/syscall.c (revision 19f70347)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include "linux_loop.h"
115 #include "uname.h"
116 
117 #include "qemu.h"
118 #include "qemu/guest-random.h"
119 #include "user/syscall-trace.h"
120 #include "qapi/error.h"
121 #include "fd-trans.h"
122 #include "tcg/tcg.h"
123 
124 #ifndef CLONE_IO
125 #define CLONE_IO                0x80000000      /* Clone io context */
126 #endif
127 
128 /* We can't directly call the host clone syscall, because this will
129  * badly confuse libc (breaking mutexes, for example). So we must
130  * divide clone flags into:
131  *  * flag combinations that look like pthread_create()
132  *  * flag combinations that look like fork()
133  *  * flags we can implement within QEMU itself
134  *  * flags we can't support and will return an error for
135  */
136 /* For thread creation, all these flags must be present; for
137  * fork, none must be present.
138  */
139 #define CLONE_THREAD_FLAGS                              \
140     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
141      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
142 
143 /* These flags are ignored:
144  * CLONE_DETACHED is now ignored by the kernel;
145  * CLONE_IO is just an optimisation hint to the I/O scheduler
146  */
147 #define CLONE_IGNORED_FLAGS                     \
148     (CLONE_DETACHED | CLONE_IO)
149 
150 /* Flags for fork which we can implement within QEMU itself */
151 #define CLONE_OPTIONAL_FORK_FLAGS               \
152     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
153      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
154 
155 /* Flags for thread creation which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
157     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
158      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
159 
160 #define CLONE_INVALID_FORK_FLAGS                                        \
161     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
162 
163 #define CLONE_INVALID_THREAD_FLAGS                                      \
164     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
165        CLONE_IGNORED_FLAGS))
166 
167 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
168  * have almost all been allocated. We cannot support any of
169  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
170  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
171  * The checks against the invalid thread masks above will catch these.
172  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
173  */
174 
175 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
176  * once. This exercises the codepaths for restart.
177  */
178 //#define DEBUG_ERESTARTSYS
179 
180 //#include <linux/msdos_fs.h>
181 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
182 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
183 
184 #undef _syscall0
185 #undef _syscall1
186 #undef _syscall2
187 #undef _syscall3
188 #undef _syscall4
189 #undef _syscall5
190 #undef _syscall6
191 
192 #define _syscall0(type,name)		\
193 static type name (void)			\
194 {					\
195 	return syscall(__NR_##name);	\
196 }
197 
198 #define _syscall1(type,name,type1,arg1)		\
199 static type name (type1 arg1)			\
200 {						\
201 	return syscall(__NR_##name, arg1);	\
202 }
203 
204 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
205 static type name (type1 arg1,type2 arg2)		\
206 {							\
207 	return syscall(__NR_##name, arg1, arg2);	\
208 }
209 
210 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
211 static type name (type1 arg1,type2 arg2,type3 arg3)		\
212 {								\
213 	return syscall(__NR_##name, arg1, arg2, arg3);		\
214 }
215 
216 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
217 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
218 {										\
219 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
220 }
221 
222 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
223 		  type5,arg5)							\
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
225 {										\
226 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
227 }
228 
229 
230 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
231 		  type5,arg5,type6,arg6)					\
232 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
233                   type6 arg6)							\
234 {										\
235 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
236 }
237 
238 
239 #define __NR_sys_uname __NR_uname
240 #define __NR_sys_getcwd1 __NR_getcwd
241 #define __NR_sys_getdents __NR_getdents
242 #define __NR_sys_getdents64 __NR_getdents64
243 #define __NR_sys_getpriority __NR_getpriority
244 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
245 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
246 #define __NR_sys_syslog __NR_syslog
247 #define __NR_sys_futex __NR_futex
248 #define __NR_sys_inotify_init __NR_inotify_init
249 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
250 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
251 #define __NR_sys_statx __NR_statx
252 
253 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
254 #define __NR__llseek __NR_lseek
255 #endif
256 
257 /* Newer kernel ports have llseek() instead of _llseek() */
258 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
259 #define TARGET_NR__llseek TARGET_NR_llseek
260 #endif
261 
262 #define __NR_sys_gettid __NR_gettid
263 _syscall0(int, sys_gettid)
264 
265 /* For the 64-bit guest on 32-bit host case we must emulate
266  * getdents using getdents64, because otherwise the host
267  * might hand us back more dirent records than we can fit
268  * into the guest buffer after structure format conversion.
269  * Otherwise we emulate getdents with getdents if the host has it.
270  */
271 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
272 #define EMULATE_GETDENTS_WITH_GETDENTS
273 #endif
274 
275 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
276 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
277 #endif
278 #if (defined(TARGET_NR_getdents) && \
279       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
280     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
281 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
282 #endif
283 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
284 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
285           loff_t *, res, uint, wh);
286 #endif
287 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
288 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
289           siginfo_t *, uinfo)
290 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
291 #ifdef __NR_exit_group
292 _syscall1(int,exit_group,int,error_code)
293 #endif
294 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
295 _syscall1(int,set_tid_address,int *,tidptr)
296 #endif
297 #if defined(TARGET_NR_futex) && defined(__NR_futex)
298 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
299           const struct timespec *,timeout,int *,uaddr2,int,val3)
300 #endif
301 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
302 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
303           unsigned long *, user_mask_ptr);
304 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
305 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
306           unsigned long *, user_mask_ptr);
307 #define __NR_sys_getcpu __NR_getcpu
308 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
309 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
310           void *, arg);
311 _syscall2(int, capget, struct __user_cap_header_struct *, header,
312           struct __user_cap_data_struct *, data);
313 _syscall2(int, capset, struct __user_cap_header_struct *, header,
314           struct __user_cap_data_struct *, data);
315 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
316 _syscall2(int, ioprio_get, int, which, int, who)
317 #endif
318 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
319 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
320 #endif
321 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
322 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
323 #endif
324 
325 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
326 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
327           unsigned long, idx1, unsigned long, idx2)
328 #endif
329 
330 /*
331  * It is assumed that struct statx is architecture independent.
332  */
333 #if defined(TARGET_NR_statx) && defined(__NR_statx)
334 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
335           unsigned int, mask, struct target_statx *, statxbuf)
336 #endif
337 
338 static bitmask_transtbl fcntl_flags_tbl[] = {
339   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
340   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
341   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
342   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
343   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
344   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
345   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
346   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
347   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
348   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
349   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
350   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
351   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
352 #if defined(O_DIRECT)
353   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
354 #endif
355 #if defined(O_NOATIME)
356   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
357 #endif
358 #if defined(O_CLOEXEC)
359   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
360 #endif
361 #if defined(O_PATH)
362   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
363 #endif
364 #if defined(O_TMPFILE)
365   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
366 #endif
367   /* Don't terminate the list prematurely on 64-bit host+guest.  */
368 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
369   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
370 #endif
371   { 0, 0, 0, 0 }
372 };
373 
374 static int sys_getcwd1(char *buf, size_t size)
375 {
376   if (getcwd(buf, size) == NULL) {
377       /* getcwd() sets errno */
378       return (-1);
379   }
380   return strlen(buf)+1;
381 }
382 
383 #ifdef TARGET_NR_utimensat
384 #if defined(__NR_utimensat)
385 #define __NR_sys_utimensat __NR_utimensat
386 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
387           const struct timespec *,tsp,int,flags)
388 #else
389 static int sys_utimensat(int dirfd, const char *pathname,
390                          const struct timespec times[2], int flags)
391 {
392     errno = ENOSYS;
393     return -1;
394 }
395 #endif
396 #endif /* TARGET_NR_utimensat */
397 
398 #ifdef TARGET_NR_renameat2
399 #if defined(__NR_renameat2)
400 #define __NR_sys_renameat2 __NR_renameat2
401 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
402           const char *, new, unsigned int, flags)
403 #else
404 static int sys_renameat2(int oldfd, const char *old,
405                          int newfd, const char *new, int flags)
406 {
407     if (flags == 0) {
408         return renameat(oldfd, old, newfd, new);
409     }
410     errno = ENOSYS;
411     return -1;
412 }
413 #endif
414 #endif /* TARGET_NR_renameat2 */
415 
416 #ifdef CONFIG_INOTIFY
417 #include <sys/inotify.h>
418 
419 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
420 static int sys_inotify_init(void)
421 {
422   return (inotify_init());
423 }
424 #endif
425 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
426 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
427 {
428   return (inotify_add_watch(fd, pathname, mask));
429 }
430 #endif
431 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
432 static int sys_inotify_rm_watch(int fd, int32_t wd)
433 {
434   return (inotify_rm_watch(fd, wd));
435 }
436 #endif
437 #ifdef CONFIG_INOTIFY1
438 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
439 static int sys_inotify_init1(int flags)
440 {
441   return (inotify_init1(flags));
442 }
443 #endif
444 #endif
445 #else
446 /* Userspace can usually survive runtime without inotify */
447 #undef TARGET_NR_inotify_init
448 #undef TARGET_NR_inotify_init1
449 #undef TARGET_NR_inotify_add_watch
450 #undef TARGET_NR_inotify_rm_watch
451 #endif /* CONFIG_INOTIFY  */
452 
453 #if defined(TARGET_NR_prlimit64)
454 #ifndef __NR_prlimit64
455 # define __NR_prlimit64 -1
456 #endif
457 #define __NR_sys_prlimit64 __NR_prlimit64
458 /* The glibc rlimit structure may not be that used by the underlying syscall */
459 struct host_rlimit64 {
460     uint64_t rlim_cur;
461     uint64_t rlim_max;
462 };
463 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
464           const struct host_rlimit64 *, new_limit,
465           struct host_rlimit64 *, old_limit)
466 #endif
467 
468 
469 #if defined(TARGET_NR_timer_create)
470 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
471 static timer_t g_posix_timers[32] = { 0, } ;
472 
473 static inline int next_free_host_timer(void)
474 {
475     int k ;
476     /* FIXME: Does finding the next free slot require a lock? */
477     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
478         if (g_posix_timers[k] == 0) {
479             g_posix_timers[k] = (timer_t) 1;
480             return k;
481         }
482     }
483     return -1;
484 }
485 #endif
486 
487 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
488 #ifdef TARGET_ARM
489 static inline int regpairs_aligned(void *cpu_env, int num)
490 {
491     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
492 }
493 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
494 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
495 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
496 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
497  * of registers which translates to the same as ARM/MIPS, because we start with
498  * r3 as arg1 */
499 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
500 #elif defined(TARGET_SH4)
501 /* SH4 doesn't align register pairs, except for p{read,write}64 */
502 static inline int regpairs_aligned(void *cpu_env, int num)
503 {
504     switch (num) {
505     case TARGET_NR_pread64:
506     case TARGET_NR_pwrite64:
507         return 1;
508 
509     default:
510         return 0;
511     }
512 }
513 #elif defined(TARGET_XTENSA)
514 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
515 #else
516 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
517 #endif
518 
519 #define ERRNO_TABLE_SIZE 1200
520 
521 /* target_to_host_errno_table[] is initialized from
522  * host_to_target_errno_table[] in syscall_init(). */
523 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
524 };
525 
526 /*
527  * This list is the union of errno values overridden in asm-<arch>/errno.h
528  * minus the errnos that are not actually generic to all archs.
529  */
530 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
531     [EAGAIN]		= TARGET_EAGAIN,
532     [EIDRM]		= TARGET_EIDRM,
533     [ECHRNG]		= TARGET_ECHRNG,
534     [EL2NSYNC]		= TARGET_EL2NSYNC,
535     [EL3HLT]		= TARGET_EL3HLT,
536     [EL3RST]		= TARGET_EL3RST,
537     [ELNRNG]		= TARGET_ELNRNG,
538     [EUNATCH]		= TARGET_EUNATCH,
539     [ENOCSI]		= TARGET_ENOCSI,
540     [EL2HLT]		= TARGET_EL2HLT,
541     [EDEADLK]		= TARGET_EDEADLK,
542     [ENOLCK]		= TARGET_ENOLCK,
543     [EBADE]		= TARGET_EBADE,
544     [EBADR]		= TARGET_EBADR,
545     [EXFULL]		= TARGET_EXFULL,
546     [ENOANO]		= TARGET_ENOANO,
547     [EBADRQC]		= TARGET_EBADRQC,
548     [EBADSLT]		= TARGET_EBADSLT,
549     [EBFONT]		= TARGET_EBFONT,
550     [ENOSTR]		= TARGET_ENOSTR,
551     [ENODATA]		= TARGET_ENODATA,
552     [ETIME]		= TARGET_ETIME,
553     [ENOSR]		= TARGET_ENOSR,
554     [ENONET]		= TARGET_ENONET,
555     [ENOPKG]		= TARGET_ENOPKG,
556     [EREMOTE]		= TARGET_EREMOTE,
557     [ENOLINK]		= TARGET_ENOLINK,
558     [EADV]		= TARGET_EADV,
559     [ESRMNT]		= TARGET_ESRMNT,
560     [ECOMM]		= TARGET_ECOMM,
561     [EPROTO]		= TARGET_EPROTO,
562     [EDOTDOT]		= TARGET_EDOTDOT,
563     [EMULTIHOP]		= TARGET_EMULTIHOP,
564     [EBADMSG]		= TARGET_EBADMSG,
565     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
566     [EOVERFLOW]		= TARGET_EOVERFLOW,
567     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
568     [EBADFD]		= TARGET_EBADFD,
569     [EREMCHG]		= TARGET_EREMCHG,
570     [ELIBACC]		= TARGET_ELIBACC,
571     [ELIBBAD]		= TARGET_ELIBBAD,
572     [ELIBSCN]		= TARGET_ELIBSCN,
573     [ELIBMAX]		= TARGET_ELIBMAX,
574     [ELIBEXEC]		= TARGET_ELIBEXEC,
575     [EILSEQ]		= TARGET_EILSEQ,
576     [ENOSYS]		= TARGET_ENOSYS,
577     [ELOOP]		= TARGET_ELOOP,
578     [ERESTART]		= TARGET_ERESTART,
579     [ESTRPIPE]		= TARGET_ESTRPIPE,
580     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
581     [EUSERS]		= TARGET_EUSERS,
582     [ENOTSOCK]		= TARGET_ENOTSOCK,
583     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
584     [EMSGSIZE]		= TARGET_EMSGSIZE,
585     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
586     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
587     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
588     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
589     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
590     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
591     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
592     [EADDRINUSE]	= TARGET_EADDRINUSE,
593     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
594     [ENETDOWN]		= TARGET_ENETDOWN,
595     [ENETUNREACH]	= TARGET_ENETUNREACH,
596     [ENETRESET]		= TARGET_ENETRESET,
597     [ECONNABORTED]	= TARGET_ECONNABORTED,
598     [ECONNRESET]	= TARGET_ECONNRESET,
599     [ENOBUFS]		= TARGET_ENOBUFS,
600     [EISCONN]		= TARGET_EISCONN,
601     [ENOTCONN]		= TARGET_ENOTCONN,
602     [EUCLEAN]		= TARGET_EUCLEAN,
603     [ENOTNAM]		= TARGET_ENOTNAM,
604     [ENAVAIL]		= TARGET_ENAVAIL,
605     [EISNAM]		= TARGET_EISNAM,
606     [EREMOTEIO]		= TARGET_EREMOTEIO,
607     [EDQUOT]            = TARGET_EDQUOT,
608     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
609     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
610     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
611     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
612     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
613     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
614     [EALREADY]		= TARGET_EALREADY,
615     [EINPROGRESS]	= TARGET_EINPROGRESS,
616     [ESTALE]		= TARGET_ESTALE,
617     [ECANCELED]		= TARGET_ECANCELED,
618     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
619     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
620 #ifdef ENOKEY
621     [ENOKEY]		= TARGET_ENOKEY,
622 #endif
623 #ifdef EKEYEXPIRED
624     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
625 #endif
626 #ifdef EKEYREVOKED
627     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
628 #endif
629 #ifdef EKEYREJECTED
630     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
631 #endif
632 #ifdef EOWNERDEAD
633     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
634 #endif
635 #ifdef ENOTRECOVERABLE
636     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
637 #endif
638 #ifdef ENOMSG
639     [ENOMSG]            = TARGET_ENOMSG,
640 #endif
641 #ifdef ERKFILL
642     [ERFKILL]           = TARGET_ERFKILL,
643 #endif
644 #ifdef EHWPOISON
645     [EHWPOISON]         = TARGET_EHWPOISON,
646 #endif
647 };
648 
649 static inline int host_to_target_errno(int err)
650 {
651     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
652         host_to_target_errno_table[err]) {
653         return host_to_target_errno_table[err];
654     }
655     return err;
656 }
657 
658 static inline int target_to_host_errno(int err)
659 {
660     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
661         target_to_host_errno_table[err]) {
662         return target_to_host_errno_table[err];
663     }
664     return err;
665 }
666 
667 static inline abi_long get_errno(abi_long ret)
668 {
669     if (ret == -1)
670         return -host_to_target_errno(errno);
671     else
672         return ret;
673 }
674 
675 const char *target_strerror(int err)
676 {
677     if (err == TARGET_ERESTARTSYS) {
678         return "To be restarted";
679     }
680     if (err == TARGET_QEMU_ESIGRETURN) {
681         return "Successful exit from sigreturn";
682     }
683 
684     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
685         return NULL;
686     }
687     return strerror(target_to_host_errno(err));
688 }
689 
690 #define safe_syscall0(type, name) \
691 static type safe_##name(void) \
692 { \
693     return safe_syscall(__NR_##name); \
694 }
695 
696 #define safe_syscall1(type, name, type1, arg1) \
697 static type safe_##name(type1 arg1) \
698 { \
699     return safe_syscall(__NR_##name, arg1); \
700 }
701 
702 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
703 static type safe_##name(type1 arg1, type2 arg2) \
704 { \
705     return safe_syscall(__NR_##name, arg1, arg2); \
706 }
707 
708 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
709 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
710 { \
711     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
712 }
713 
714 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
715     type4, arg4) \
716 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
717 { \
718     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
719 }
720 
721 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
722     type4, arg4, type5, arg5) \
723 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
724     type5 arg5) \
725 { \
726     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
727 }
728 
729 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
730     type4, arg4, type5, arg5, type6, arg6) \
731 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
732     type5 arg5, type6 arg6) \
733 { \
734     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
735 }
736 
737 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
738 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
739 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
740               int, flags, mode_t, mode)
741 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
742               struct rusage *, rusage)
743 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
744               int, options, struct rusage *, rusage)
745 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
746 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
747               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
748 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
749               struct timespec *, tsp, const sigset_t *, sigmask,
750               size_t, sigsetsize)
751 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
752               int, maxevents, int, timeout, const sigset_t *, sigmask,
753               size_t, sigsetsize)
754 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
755               const struct timespec *,timeout,int *,uaddr2,int,val3)
756 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
757 safe_syscall2(int, kill, pid_t, pid, int, sig)
758 safe_syscall2(int, tkill, int, tid, int, sig)
759 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
760 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
761 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
762 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
763               unsigned long, pos_l, unsigned long, pos_h)
764 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
765               unsigned long, pos_l, unsigned long, pos_h)
766 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
767               socklen_t, addrlen)
768 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
769               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
770 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
771               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
772 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
773 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
774 safe_syscall2(int, flock, int, fd, int, operation)
775 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
776               const struct timespec *, uts, size_t, sigsetsize)
777 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
778               int, flags)
779 safe_syscall2(int, nanosleep, const struct timespec *, req,
780               struct timespec *, rem)
781 #ifdef TARGET_NR_clock_nanosleep
782 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
783               const struct timespec *, req, struct timespec *, rem)
784 #endif
785 #ifdef __NR_ipc
786 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
787               void *, ptr, long, fifth)
788 #endif
789 #ifdef __NR_msgsnd
790 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
791               int, flags)
792 #endif
793 #ifdef __NR_msgrcv
794 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
795               long, msgtype, int, flags)
796 #endif
797 #ifdef __NR_semtimedop
798 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
799               unsigned, nsops, const struct timespec *, timeout)
800 #endif
801 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
802 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
803               size_t, len, unsigned, prio, const struct timespec *, timeout)
804 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
805               size_t, len, unsigned *, prio, const struct timespec *, timeout)
806 #endif
807 /* We do ioctl like this rather than via safe_syscall3 to preserve the
808  * "third argument might be integer or pointer or not present" behaviour of
809  * the libc function.
810  */
811 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
812 /* Similarly for fcntl. Note that callers must always:
813  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
814  *  use the flock64 struct rather than unsuffixed flock
815  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
816  */
817 #ifdef __NR_fcntl64
818 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
819 #else
820 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
821 #endif
822 
823 static inline int host_to_target_sock_type(int host_type)
824 {
825     int target_type;
826 
827     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
828     case SOCK_DGRAM:
829         target_type = TARGET_SOCK_DGRAM;
830         break;
831     case SOCK_STREAM:
832         target_type = TARGET_SOCK_STREAM;
833         break;
834     default:
835         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
836         break;
837     }
838 
839 #if defined(SOCK_CLOEXEC)
840     if (host_type & SOCK_CLOEXEC) {
841         target_type |= TARGET_SOCK_CLOEXEC;
842     }
843 #endif
844 
845 #if defined(SOCK_NONBLOCK)
846     if (host_type & SOCK_NONBLOCK) {
847         target_type |= TARGET_SOCK_NONBLOCK;
848     }
849 #endif
850 
851     return target_type;
852 }
853 
854 static abi_ulong target_brk;
855 static abi_ulong target_original_brk;
856 static abi_ulong brk_page;
857 
858 void target_set_brk(abi_ulong new_brk)
859 {
860     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
861     brk_page = HOST_PAGE_ALIGN(target_brk);
862 }
863 
864 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
865 #define DEBUGF_BRK(message, args...)
866 
867 /* do_brk() must return target values and target errnos. */
868 abi_long do_brk(abi_ulong new_brk)
869 {
870     abi_long mapped_addr;
871     abi_ulong new_alloc_size;
872 
873     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
874 
875     if (!new_brk) {
876         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
877         return target_brk;
878     }
879     if (new_brk < target_original_brk) {
880         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
881                    target_brk);
882         return target_brk;
883     }
884 
885     /* If the new brk is less than the highest page reserved to the
886      * target heap allocation, set it and we're almost done...  */
887     if (new_brk <= brk_page) {
888         /* Heap contents are initialized to zero, as for anonymous
889          * mapped pages.  */
890         if (new_brk > target_brk) {
891             memset(g2h(target_brk), 0, new_brk - target_brk);
892         }
893 	target_brk = new_brk;
894         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
895 	return target_brk;
896     }
897 
898     /* We need to allocate more memory after the brk... Note that
899      * we don't use MAP_FIXED because that will map over the top of
900      * any existing mapping (like the one with the host libc or qemu
901      * itself); instead we treat "mapped but at wrong address" as
902      * a failure and unmap again.
903      */
904     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
905     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
906                                         PROT_READ|PROT_WRITE,
907                                         MAP_ANON|MAP_PRIVATE, 0, 0));
908 
909     if (mapped_addr == brk_page) {
910         /* Heap contents are initialized to zero, as for anonymous
911          * mapped pages.  Technically the new pages are already
912          * initialized to zero since they *are* anonymous mapped
913          * pages, however we have to take care with the contents that
914          * come from the remaining part of the previous page: it may
915          * contains garbage data due to a previous heap usage (grown
916          * then shrunken).  */
917         memset(g2h(target_brk), 0, brk_page - target_brk);
918 
919         target_brk = new_brk;
920         brk_page = HOST_PAGE_ALIGN(target_brk);
921         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
922             target_brk);
923         return target_brk;
924     } else if (mapped_addr != -1) {
925         /* Mapped but at wrong address, meaning there wasn't actually
926          * enough space for this brk.
927          */
928         target_munmap(mapped_addr, new_alloc_size);
929         mapped_addr = -1;
930         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
931     }
932     else {
933         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
934     }
935 
936 #if defined(TARGET_ALPHA)
937     /* We (partially) emulate OSF/1 on Alpha, which requires we
938        return a proper errno, not an unchanged brk value.  */
939     return -TARGET_ENOMEM;
940 #endif
941     /* For everything else, return the previous break. */
942     return target_brk;
943 }
944 
945 static inline abi_long copy_from_user_fdset(fd_set *fds,
946                                             abi_ulong target_fds_addr,
947                                             int n)
948 {
949     int i, nw, j, k;
950     abi_ulong b, *target_fds;
951 
952     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
953     if (!(target_fds = lock_user(VERIFY_READ,
954                                  target_fds_addr,
955                                  sizeof(abi_ulong) * nw,
956                                  1)))
957         return -TARGET_EFAULT;
958 
959     FD_ZERO(fds);
960     k = 0;
961     for (i = 0; i < nw; i++) {
962         /* grab the abi_ulong */
963         __get_user(b, &target_fds[i]);
964         for (j = 0; j < TARGET_ABI_BITS; j++) {
965             /* check the bit inside the abi_ulong */
966             if ((b >> j) & 1)
967                 FD_SET(k, fds);
968             k++;
969         }
970     }
971 
972     unlock_user(target_fds, target_fds_addr, 0);
973 
974     return 0;
975 }
976 
977 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
978                                                  abi_ulong target_fds_addr,
979                                                  int n)
980 {
981     if (target_fds_addr) {
982         if (copy_from_user_fdset(fds, target_fds_addr, n))
983             return -TARGET_EFAULT;
984         *fds_ptr = fds;
985     } else {
986         *fds_ptr = NULL;
987     }
988     return 0;
989 }
990 
991 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
992                                           const fd_set *fds,
993                                           int n)
994 {
995     int i, nw, j, k;
996     abi_long v;
997     abi_ulong *target_fds;
998 
999     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1000     if (!(target_fds = lock_user(VERIFY_WRITE,
1001                                  target_fds_addr,
1002                                  sizeof(abi_ulong) * nw,
1003                                  0)))
1004         return -TARGET_EFAULT;
1005 
1006     k = 0;
1007     for (i = 0; i < nw; i++) {
1008         v = 0;
1009         for (j = 0; j < TARGET_ABI_BITS; j++) {
1010             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1011             k++;
1012         }
1013         __put_user(v, &target_fds[i]);
1014     }
1015 
1016     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1017 
1018     return 0;
1019 }
1020 
1021 #if defined(__alpha__)
1022 #define HOST_HZ 1024
1023 #else
1024 #define HOST_HZ 100
1025 #endif
1026 
1027 static inline abi_long host_to_target_clock_t(long ticks)
1028 {
1029 #if HOST_HZ == TARGET_HZ
1030     return ticks;
1031 #else
1032     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1033 #endif
1034 }
1035 
1036 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1037                                              const struct rusage *rusage)
1038 {
1039     struct target_rusage *target_rusage;
1040 
1041     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1042         return -TARGET_EFAULT;
1043     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1044     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1045     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1046     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1047     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1048     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1049     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1050     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1051     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1052     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1053     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1054     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1055     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1056     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1057     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1058     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1059     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1060     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1061     unlock_user_struct(target_rusage, target_addr, 1);
1062 
1063     return 0;
1064 }
1065 
1066 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1067 {
1068     abi_ulong target_rlim_swap;
1069     rlim_t result;
1070 
1071     target_rlim_swap = tswapal(target_rlim);
1072     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1073         return RLIM_INFINITY;
1074 
1075     result = target_rlim_swap;
1076     if (target_rlim_swap != (rlim_t)result)
1077         return RLIM_INFINITY;
1078 
1079     return result;
1080 }
1081 
1082 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1083 {
1084     abi_ulong target_rlim_swap;
1085     abi_ulong result;
1086 
1087     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1088         target_rlim_swap = TARGET_RLIM_INFINITY;
1089     else
1090         target_rlim_swap = rlim;
1091     result = tswapal(target_rlim_swap);
1092 
1093     return result;
1094 }
1095 
1096 static inline int target_to_host_resource(int code)
1097 {
1098     switch (code) {
1099     case TARGET_RLIMIT_AS:
1100         return RLIMIT_AS;
1101     case TARGET_RLIMIT_CORE:
1102         return RLIMIT_CORE;
1103     case TARGET_RLIMIT_CPU:
1104         return RLIMIT_CPU;
1105     case TARGET_RLIMIT_DATA:
1106         return RLIMIT_DATA;
1107     case TARGET_RLIMIT_FSIZE:
1108         return RLIMIT_FSIZE;
1109     case TARGET_RLIMIT_LOCKS:
1110         return RLIMIT_LOCKS;
1111     case TARGET_RLIMIT_MEMLOCK:
1112         return RLIMIT_MEMLOCK;
1113     case TARGET_RLIMIT_MSGQUEUE:
1114         return RLIMIT_MSGQUEUE;
1115     case TARGET_RLIMIT_NICE:
1116         return RLIMIT_NICE;
1117     case TARGET_RLIMIT_NOFILE:
1118         return RLIMIT_NOFILE;
1119     case TARGET_RLIMIT_NPROC:
1120         return RLIMIT_NPROC;
1121     case TARGET_RLIMIT_RSS:
1122         return RLIMIT_RSS;
1123     case TARGET_RLIMIT_RTPRIO:
1124         return RLIMIT_RTPRIO;
1125     case TARGET_RLIMIT_SIGPENDING:
1126         return RLIMIT_SIGPENDING;
1127     case TARGET_RLIMIT_STACK:
1128         return RLIMIT_STACK;
1129     default:
1130         return code;
1131     }
1132 }
1133 
1134 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1135                                               abi_ulong target_tv_addr)
1136 {
1137     struct target_timeval *target_tv;
1138 
1139     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1140         return -TARGET_EFAULT;
1141     }
1142 
1143     __get_user(tv->tv_sec, &target_tv->tv_sec);
1144     __get_user(tv->tv_usec, &target_tv->tv_usec);
1145 
1146     unlock_user_struct(target_tv, target_tv_addr, 0);
1147 
1148     return 0;
1149 }
1150 
1151 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1152                                             const struct timeval *tv)
1153 {
1154     struct target_timeval *target_tv;
1155 
1156     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1157         return -TARGET_EFAULT;
1158     }
1159 
1160     __put_user(tv->tv_sec, &target_tv->tv_sec);
1161     __put_user(tv->tv_usec, &target_tv->tv_usec);
1162 
1163     unlock_user_struct(target_tv, target_tv_addr, 1);
1164 
1165     return 0;
1166 }
1167 
1168 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1169                                              const struct timeval *tv)
1170 {
1171     struct target__kernel_sock_timeval *target_tv;
1172 
1173     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1174         return -TARGET_EFAULT;
1175     }
1176 
1177     __put_user(tv->tv_sec, &target_tv->tv_sec);
1178     __put_user(tv->tv_usec, &target_tv->tv_usec);
1179 
1180     unlock_user_struct(target_tv, target_tv_addr, 1);
1181 
1182     return 0;
1183 }
1184 
1185 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1186                                                abi_ulong target_addr)
1187 {
1188     struct target_timespec *target_ts;
1189 
1190     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1191         return -TARGET_EFAULT;
1192     }
1193     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1194     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1195     unlock_user_struct(target_ts, target_addr, 0);
1196     return 0;
1197 }
1198 
1199 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1200                                                struct timespec *host_ts)
1201 {
1202     struct target_timespec *target_ts;
1203 
1204     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1205         return -TARGET_EFAULT;
1206     }
1207     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1208     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1209     unlock_user_struct(target_ts, target_addr, 1);
1210     return 0;
1211 }
1212 
1213 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1214                                                  struct timespec *host_ts)
1215 {
1216     struct target__kernel_timespec *target_ts;
1217 
1218     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1219         return -TARGET_EFAULT;
1220     }
1221     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1222     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1223     unlock_user_struct(target_ts, target_addr, 1);
1224     return 0;
1225 }
1226 
1227 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1228                                                abi_ulong target_tz_addr)
1229 {
1230     struct target_timezone *target_tz;
1231 
1232     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1233         return -TARGET_EFAULT;
1234     }
1235 
1236     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1237     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1238 
1239     unlock_user_struct(target_tz, target_tz_addr, 0);
1240 
1241     return 0;
1242 }
1243 
1244 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1245 #include <mqueue.h>
1246 
1247 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1248                                               abi_ulong target_mq_attr_addr)
1249 {
1250     struct target_mq_attr *target_mq_attr;
1251 
1252     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1253                           target_mq_attr_addr, 1))
1254         return -TARGET_EFAULT;
1255 
1256     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1257     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1258     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1259     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1260 
1261     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1262 
1263     return 0;
1264 }
1265 
1266 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1267                                             const struct mq_attr *attr)
1268 {
1269     struct target_mq_attr *target_mq_attr;
1270 
1271     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1272                           target_mq_attr_addr, 0))
1273         return -TARGET_EFAULT;
1274 
1275     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1276     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1277     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1278     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1279 
1280     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1281 
1282     return 0;
1283 }
1284 #endif
1285 
1286 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1287 /* do_select() must return target values and target errnos. */
1288 static abi_long do_select(int n,
1289                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1290                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1291 {
1292     fd_set rfds, wfds, efds;
1293     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1294     struct timeval tv;
1295     struct timespec ts, *ts_ptr;
1296     abi_long ret;
1297 
1298     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1299     if (ret) {
1300         return ret;
1301     }
1302     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1303     if (ret) {
1304         return ret;
1305     }
1306     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1307     if (ret) {
1308         return ret;
1309     }
1310 
1311     if (target_tv_addr) {
1312         if (copy_from_user_timeval(&tv, target_tv_addr))
1313             return -TARGET_EFAULT;
1314         ts.tv_sec = tv.tv_sec;
1315         ts.tv_nsec = tv.tv_usec * 1000;
1316         ts_ptr = &ts;
1317     } else {
1318         ts_ptr = NULL;
1319     }
1320 
1321     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1322                                   ts_ptr, NULL));
1323 
1324     if (!is_error(ret)) {
1325         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1326             return -TARGET_EFAULT;
1327         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1328             return -TARGET_EFAULT;
1329         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1330             return -TARGET_EFAULT;
1331 
1332         if (target_tv_addr) {
1333             tv.tv_sec = ts.tv_sec;
1334             tv.tv_usec = ts.tv_nsec / 1000;
1335             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1336                 return -TARGET_EFAULT;
1337             }
1338         }
1339     }
1340 
1341     return ret;
1342 }
1343 
1344 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1345 static abi_long do_old_select(abi_ulong arg1)
1346 {
1347     struct target_sel_arg_struct *sel;
1348     abi_ulong inp, outp, exp, tvp;
1349     long nsel;
1350 
1351     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1352         return -TARGET_EFAULT;
1353     }
1354 
1355     nsel = tswapal(sel->n);
1356     inp = tswapal(sel->inp);
1357     outp = tswapal(sel->outp);
1358     exp = tswapal(sel->exp);
1359     tvp = tswapal(sel->tvp);
1360 
1361     unlock_user_struct(sel, arg1, 0);
1362 
1363     return do_select(nsel, inp, outp, exp, tvp);
1364 }
1365 #endif
1366 #endif
1367 
1368 static abi_long do_pipe2(int host_pipe[], int flags)
1369 {
1370 #ifdef CONFIG_PIPE2
1371     return pipe2(host_pipe, flags);
1372 #else
1373     return -ENOSYS;
1374 #endif
1375 }
1376 
1377 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1378                         int flags, int is_pipe2)
1379 {
1380     int host_pipe[2];
1381     abi_long ret;
1382     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1383 
1384     if (is_error(ret))
1385         return get_errno(ret);
1386 
1387     /* Several targets have special calling conventions for the original
1388        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1389     if (!is_pipe2) {
1390 #if defined(TARGET_ALPHA)
1391         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1392         return host_pipe[0];
1393 #elif defined(TARGET_MIPS)
1394         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1395         return host_pipe[0];
1396 #elif defined(TARGET_SH4)
1397         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1398         return host_pipe[0];
1399 #elif defined(TARGET_SPARC)
1400         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1401         return host_pipe[0];
1402 #endif
1403     }
1404 
1405     if (put_user_s32(host_pipe[0], pipedes)
1406         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1407         return -TARGET_EFAULT;
1408     return get_errno(ret);
1409 }
1410 
1411 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1412                                               abi_ulong target_addr,
1413                                               socklen_t len)
1414 {
1415     struct target_ip_mreqn *target_smreqn;
1416 
1417     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1418     if (!target_smreqn)
1419         return -TARGET_EFAULT;
1420     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1421     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1422     if (len == sizeof(struct target_ip_mreqn))
1423         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1424     unlock_user(target_smreqn, target_addr, 0);
1425 
1426     return 0;
1427 }
1428 
1429 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1430                                                abi_ulong target_addr,
1431                                                socklen_t len)
1432 {
1433     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1434     sa_family_t sa_family;
1435     struct target_sockaddr *target_saddr;
1436 
1437     if (fd_trans_target_to_host_addr(fd)) {
1438         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1439     }
1440 
1441     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1442     if (!target_saddr)
1443         return -TARGET_EFAULT;
1444 
1445     sa_family = tswap16(target_saddr->sa_family);
1446 
1447     /* Oops. The caller might send a incomplete sun_path; sun_path
1448      * must be terminated by \0 (see the manual page), but
1449      * unfortunately it is quite common to specify sockaddr_un
1450      * length as "strlen(x->sun_path)" while it should be
1451      * "strlen(...) + 1". We'll fix that here if needed.
1452      * Linux kernel has a similar feature.
1453      */
1454 
1455     if (sa_family == AF_UNIX) {
1456         if (len < unix_maxlen && len > 0) {
1457             char *cp = (char*)target_saddr;
1458 
1459             if ( cp[len-1] && !cp[len] )
1460                 len++;
1461         }
1462         if (len > unix_maxlen)
1463             len = unix_maxlen;
1464     }
1465 
1466     memcpy(addr, target_saddr, len);
1467     addr->sa_family = sa_family;
1468     if (sa_family == AF_NETLINK) {
1469         struct sockaddr_nl *nladdr;
1470 
1471         nladdr = (struct sockaddr_nl *)addr;
1472         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1473         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1474     } else if (sa_family == AF_PACKET) {
1475 	struct target_sockaddr_ll *lladdr;
1476 
1477 	lladdr = (struct target_sockaddr_ll *)addr;
1478 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1479 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1480     }
1481     unlock_user(target_saddr, target_addr, 0);
1482 
1483     return 0;
1484 }
1485 
1486 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1487                                                struct sockaddr *addr,
1488                                                socklen_t len)
1489 {
1490     struct target_sockaddr *target_saddr;
1491 
1492     if (len == 0) {
1493         return 0;
1494     }
1495     assert(addr);
1496 
1497     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1498     if (!target_saddr)
1499         return -TARGET_EFAULT;
1500     memcpy(target_saddr, addr, len);
1501     if (len >= offsetof(struct target_sockaddr, sa_family) +
1502         sizeof(target_saddr->sa_family)) {
1503         target_saddr->sa_family = tswap16(addr->sa_family);
1504     }
1505     if (addr->sa_family == AF_NETLINK &&
1506         len >= sizeof(struct target_sockaddr_nl)) {
1507         struct target_sockaddr_nl *target_nl =
1508                (struct target_sockaddr_nl *)target_saddr;
1509         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1510         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1511     } else if (addr->sa_family == AF_PACKET) {
1512         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1513         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1514         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1515     } else if (addr->sa_family == AF_INET6 &&
1516                len >= sizeof(struct target_sockaddr_in6)) {
1517         struct target_sockaddr_in6 *target_in6 =
1518                (struct target_sockaddr_in6 *)target_saddr;
1519         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1520     }
1521     unlock_user(target_saddr, target_addr, len);
1522 
1523     return 0;
1524 }
1525 
1526 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1527                                            struct target_msghdr *target_msgh)
1528 {
1529     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1530     abi_long msg_controllen;
1531     abi_ulong target_cmsg_addr;
1532     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1533     socklen_t space = 0;
1534 
1535     msg_controllen = tswapal(target_msgh->msg_controllen);
1536     if (msg_controllen < sizeof (struct target_cmsghdr))
1537         goto the_end;
1538     target_cmsg_addr = tswapal(target_msgh->msg_control);
1539     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1540     target_cmsg_start = target_cmsg;
1541     if (!target_cmsg)
1542         return -TARGET_EFAULT;
1543 
1544     while (cmsg && target_cmsg) {
1545         void *data = CMSG_DATA(cmsg);
1546         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1547 
1548         int len = tswapal(target_cmsg->cmsg_len)
1549             - sizeof(struct target_cmsghdr);
1550 
1551         space += CMSG_SPACE(len);
1552         if (space > msgh->msg_controllen) {
1553             space -= CMSG_SPACE(len);
1554             /* This is a QEMU bug, since we allocated the payload
1555              * area ourselves (unlike overflow in host-to-target
1556              * conversion, which is just the guest giving us a buffer
1557              * that's too small). It can't happen for the payload types
1558              * we currently support; if it becomes an issue in future
1559              * we would need to improve our allocation strategy to
1560              * something more intelligent than "twice the size of the
1561              * target buffer we're reading from".
1562              */
1563             gemu_log("Host cmsg overflow\n");
1564             break;
1565         }
1566 
1567         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1568             cmsg->cmsg_level = SOL_SOCKET;
1569         } else {
1570             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1571         }
1572         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1573         cmsg->cmsg_len = CMSG_LEN(len);
1574 
1575         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1576             int *fd = (int *)data;
1577             int *target_fd = (int *)target_data;
1578             int i, numfds = len / sizeof(int);
1579 
1580             for (i = 0; i < numfds; i++) {
1581                 __get_user(fd[i], target_fd + i);
1582             }
1583         } else if (cmsg->cmsg_level == SOL_SOCKET
1584                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1585             struct ucred *cred = (struct ucred *)data;
1586             struct target_ucred *target_cred =
1587                 (struct target_ucred *)target_data;
1588 
1589             __get_user(cred->pid, &target_cred->pid);
1590             __get_user(cred->uid, &target_cred->uid);
1591             __get_user(cred->gid, &target_cred->gid);
1592         } else {
1593             gemu_log("Unsupported ancillary data: %d/%d\n",
1594                                         cmsg->cmsg_level, cmsg->cmsg_type);
1595             memcpy(data, target_data, len);
1596         }
1597 
1598         cmsg = CMSG_NXTHDR(msgh, cmsg);
1599         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1600                                          target_cmsg_start);
1601     }
1602     unlock_user(target_cmsg, target_cmsg_addr, 0);
1603  the_end:
1604     msgh->msg_controllen = space;
1605     return 0;
1606 }
1607 
1608 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1609                                            struct msghdr *msgh)
1610 {
1611     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1612     abi_long msg_controllen;
1613     abi_ulong target_cmsg_addr;
1614     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1615     socklen_t space = 0;
1616 
1617     msg_controllen = tswapal(target_msgh->msg_controllen);
1618     if (msg_controllen < sizeof (struct target_cmsghdr))
1619         goto the_end;
1620     target_cmsg_addr = tswapal(target_msgh->msg_control);
1621     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1622     target_cmsg_start = target_cmsg;
1623     if (!target_cmsg)
1624         return -TARGET_EFAULT;
1625 
1626     while (cmsg && target_cmsg) {
1627         void *data = CMSG_DATA(cmsg);
1628         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1629 
1630         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1631         int tgt_len, tgt_space;
1632 
1633         /* We never copy a half-header but may copy half-data;
1634          * this is Linux's behaviour in put_cmsg(). Note that
1635          * truncation here is a guest problem (which we report
1636          * to the guest via the CTRUNC bit), unlike truncation
1637          * in target_to_host_cmsg, which is a QEMU bug.
1638          */
1639         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1640             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1641             break;
1642         }
1643 
1644         if (cmsg->cmsg_level == SOL_SOCKET) {
1645             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1646         } else {
1647             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1648         }
1649         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1650 
1651         /* Payload types which need a different size of payload on
1652          * the target must adjust tgt_len here.
1653          */
1654         tgt_len = len;
1655         switch (cmsg->cmsg_level) {
1656         case SOL_SOCKET:
1657             switch (cmsg->cmsg_type) {
1658             case SO_TIMESTAMP:
1659                 tgt_len = sizeof(struct target_timeval);
1660                 break;
1661             default:
1662                 break;
1663             }
1664             break;
1665         default:
1666             break;
1667         }
1668 
1669         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1670             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1671             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1672         }
1673 
1674         /* We must now copy-and-convert len bytes of payload
1675          * into tgt_len bytes of destination space. Bear in mind
1676          * that in both source and destination we may be dealing
1677          * with a truncated value!
1678          */
1679         switch (cmsg->cmsg_level) {
1680         case SOL_SOCKET:
1681             switch (cmsg->cmsg_type) {
1682             case SCM_RIGHTS:
1683             {
1684                 int *fd = (int *)data;
1685                 int *target_fd = (int *)target_data;
1686                 int i, numfds = tgt_len / sizeof(int);
1687 
1688                 for (i = 0; i < numfds; i++) {
1689                     __put_user(fd[i], target_fd + i);
1690                 }
1691                 break;
1692             }
1693             case SO_TIMESTAMP:
1694             {
1695                 struct timeval *tv = (struct timeval *)data;
1696                 struct target_timeval *target_tv =
1697                     (struct target_timeval *)target_data;
1698 
1699                 if (len != sizeof(struct timeval) ||
1700                     tgt_len != sizeof(struct target_timeval)) {
1701                     goto unimplemented;
1702                 }
1703 
1704                 /* copy struct timeval to target */
1705                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1706                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1707                 break;
1708             }
1709             case SCM_CREDENTIALS:
1710             {
1711                 struct ucred *cred = (struct ucred *)data;
1712                 struct target_ucred *target_cred =
1713                     (struct target_ucred *)target_data;
1714 
1715                 __put_user(cred->pid, &target_cred->pid);
1716                 __put_user(cred->uid, &target_cred->uid);
1717                 __put_user(cred->gid, &target_cred->gid);
1718                 break;
1719             }
1720             default:
1721                 goto unimplemented;
1722             }
1723             break;
1724 
1725         case SOL_IP:
1726             switch (cmsg->cmsg_type) {
1727             case IP_TTL:
1728             {
1729                 uint32_t *v = (uint32_t *)data;
1730                 uint32_t *t_int = (uint32_t *)target_data;
1731 
1732                 if (len != sizeof(uint32_t) ||
1733                     tgt_len != sizeof(uint32_t)) {
1734                     goto unimplemented;
1735                 }
1736                 __put_user(*v, t_int);
1737                 break;
1738             }
1739             case IP_RECVERR:
1740             {
1741                 struct errhdr_t {
1742                    struct sock_extended_err ee;
1743                    struct sockaddr_in offender;
1744                 };
1745                 struct errhdr_t *errh = (struct errhdr_t *)data;
1746                 struct errhdr_t *target_errh =
1747                     (struct errhdr_t *)target_data;
1748 
1749                 if (len != sizeof(struct errhdr_t) ||
1750                     tgt_len != sizeof(struct errhdr_t)) {
1751                     goto unimplemented;
1752                 }
1753                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1754                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1755                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1756                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1757                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1758                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1759                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1760                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1761                     (void *) &errh->offender, sizeof(errh->offender));
1762                 break;
1763             }
1764             default:
1765                 goto unimplemented;
1766             }
1767             break;
1768 
1769         case SOL_IPV6:
1770             switch (cmsg->cmsg_type) {
1771             case IPV6_HOPLIMIT:
1772             {
1773                 uint32_t *v = (uint32_t *)data;
1774                 uint32_t *t_int = (uint32_t *)target_data;
1775 
1776                 if (len != sizeof(uint32_t) ||
1777                     tgt_len != sizeof(uint32_t)) {
1778                     goto unimplemented;
1779                 }
1780                 __put_user(*v, t_int);
1781                 break;
1782             }
1783             case IPV6_RECVERR:
1784             {
1785                 struct errhdr6_t {
1786                    struct sock_extended_err ee;
1787                    struct sockaddr_in6 offender;
1788                 };
1789                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1790                 struct errhdr6_t *target_errh =
1791                     (struct errhdr6_t *)target_data;
1792 
1793                 if (len != sizeof(struct errhdr6_t) ||
1794                     tgt_len != sizeof(struct errhdr6_t)) {
1795                     goto unimplemented;
1796                 }
1797                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1798                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1799                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1800                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1801                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1802                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1803                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1804                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1805                     (void *) &errh->offender, sizeof(errh->offender));
1806                 break;
1807             }
1808             default:
1809                 goto unimplemented;
1810             }
1811             break;
1812 
1813         default:
1814         unimplemented:
1815             gemu_log("Unsupported ancillary data: %d/%d\n",
1816                                         cmsg->cmsg_level, cmsg->cmsg_type);
1817             memcpy(target_data, data, MIN(len, tgt_len));
1818             if (tgt_len > len) {
1819                 memset(target_data + len, 0, tgt_len - len);
1820             }
1821         }
1822 
1823         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1824         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1825         if (msg_controllen < tgt_space) {
1826             tgt_space = msg_controllen;
1827         }
1828         msg_controllen -= tgt_space;
1829         space += tgt_space;
1830         cmsg = CMSG_NXTHDR(msgh, cmsg);
1831         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1832                                          target_cmsg_start);
1833     }
1834     unlock_user(target_cmsg, target_cmsg_addr, space);
1835  the_end:
1836     target_msgh->msg_controllen = tswapal(space);
1837     return 0;
1838 }
1839 
1840 /* do_setsockopt() Must return target values and target errnos. */
1841 static abi_long do_setsockopt(int sockfd, int level, int optname,
1842                               abi_ulong optval_addr, socklen_t optlen)
1843 {
1844     abi_long ret;
1845     int val;
1846     struct ip_mreqn *ip_mreq;
1847     struct ip_mreq_source *ip_mreq_source;
1848 
1849     switch(level) {
1850     case SOL_TCP:
1851         /* TCP options all take an 'int' value.  */
1852         if (optlen < sizeof(uint32_t))
1853             return -TARGET_EINVAL;
1854 
1855         if (get_user_u32(val, optval_addr))
1856             return -TARGET_EFAULT;
1857         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1858         break;
1859     case SOL_IP:
1860         switch(optname) {
1861         case IP_TOS:
1862         case IP_TTL:
1863         case IP_HDRINCL:
1864         case IP_ROUTER_ALERT:
1865         case IP_RECVOPTS:
1866         case IP_RETOPTS:
1867         case IP_PKTINFO:
1868         case IP_MTU_DISCOVER:
1869         case IP_RECVERR:
1870         case IP_RECVTTL:
1871         case IP_RECVTOS:
1872 #ifdef IP_FREEBIND
1873         case IP_FREEBIND:
1874 #endif
1875         case IP_MULTICAST_TTL:
1876         case IP_MULTICAST_LOOP:
1877             val = 0;
1878             if (optlen >= sizeof(uint32_t)) {
1879                 if (get_user_u32(val, optval_addr))
1880                     return -TARGET_EFAULT;
1881             } else if (optlen >= 1) {
1882                 if (get_user_u8(val, optval_addr))
1883                     return -TARGET_EFAULT;
1884             }
1885             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1886             break;
1887         case IP_ADD_MEMBERSHIP:
1888         case IP_DROP_MEMBERSHIP:
1889             if (optlen < sizeof (struct target_ip_mreq) ||
1890                 optlen > sizeof (struct target_ip_mreqn))
1891                 return -TARGET_EINVAL;
1892 
1893             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1894             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1895             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1896             break;
1897 
1898         case IP_BLOCK_SOURCE:
1899         case IP_UNBLOCK_SOURCE:
1900         case IP_ADD_SOURCE_MEMBERSHIP:
1901         case IP_DROP_SOURCE_MEMBERSHIP:
1902             if (optlen != sizeof (struct target_ip_mreq_source))
1903                 return -TARGET_EINVAL;
1904 
1905             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1906             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1907             unlock_user (ip_mreq_source, optval_addr, 0);
1908             break;
1909 
1910         default:
1911             goto unimplemented;
1912         }
1913         break;
1914     case SOL_IPV6:
1915         switch (optname) {
1916         case IPV6_MTU_DISCOVER:
1917         case IPV6_MTU:
1918         case IPV6_V6ONLY:
1919         case IPV6_RECVPKTINFO:
1920         case IPV6_UNICAST_HOPS:
1921         case IPV6_MULTICAST_HOPS:
1922         case IPV6_MULTICAST_LOOP:
1923         case IPV6_RECVERR:
1924         case IPV6_RECVHOPLIMIT:
1925         case IPV6_2292HOPLIMIT:
1926         case IPV6_CHECKSUM:
1927         case IPV6_ADDRFORM:
1928         case IPV6_2292PKTINFO:
1929         case IPV6_RECVTCLASS:
1930         case IPV6_RECVRTHDR:
1931         case IPV6_2292RTHDR:
1932         case IPV6_RECVHOPOPTS:
1933         case IPV6_2292HOPOPTS:
1934         case IPV6_RECVDSTOPTS:
1935         case IPV6_2292DSTOPTS:
1936         case IPV6_TCLASS:
1937 #ifdef IPV6_RECVPATHMTU
1938         case IPV6_RECVPATHMTU:
1939 #endif
1940 #ifdef IPV6_TRANSPARENT
1941         case IPV6_TRANSPARENT:
1942 #endif
1943 #ifdef IPV6_FREEBIND
1944         case IPV6_FREEBIND:
1945 #endif
1946 #ifdef IPV6_RECVORIGDSTADDR
1947         case IPV6_RECVORIGDSTADDR:
1948 #endif
1949             val = 0;
1950             if (optlen < sizeof(uint32_t)) {
1951                 return -TARGET_EINVAL;
1952             }
1953             if (get_user_u32(val, optval_addr)) {
1954                 return -TARGET_EFAULT;
1955             }
1956             ret = get_errno(setsockopt(sockfd, level, optname,
1957                                        &val, sizeof(val)));
1958             break;
1959         case IPV6_PKTINFO:
1960         {
1961             struct in6_pktinfo pki;
1962 
1963             if (optlen < sizeof(pki)) {
1964                 return -TARGET_EINVAL;
1965             }
1966 
1967             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1968                 return -TARGET_EFAULT;
1969             }
1970 
1971             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1972 
1973             ret = get_errno(setsockopt(sockfd, level, optname,
1974                                        &pki, sizeof(pki)));
1975             break;
1976         }
1977         case IPV6_ADD_MEMBERSHIP:
1978         case IPV6_DROP_MEMBERSHIP:
1979         {
1980             struct ipv6_mreq ipv6mreq;
1981 
1982             if (optlen < sizeof(ipv6mreq)) {
1983                 return -TARGET_EINVAL;
1984             }
1985 
1986             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1987                 return -TARGET_EFAULT;
1988             }
1989 
1990             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1991 
1992             ret = get_errno(setsockopt(sockfd, level, optname,
1993                                        &ipv6mreq, sizeof(ipv6mreq)));
1994             break;
1995         }
1996         default:
1997             goto unimplemented;
1998         }
1999         break;
2000     case SOL_ICMPV6:
2001         switch (optname) {
2002         case ICMPV6_FILTER:
2003         {
2004             struct icmp6_filter icmp6f;
2005 
2006             if (optlen > sizeof(icmp6f)) {
2007                 optlen = sizeof(icmp6f);
2008             }
2009 
2010             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2011                 return -TARGET_EFAULT;
2012             }
2013 
2014             for (val = 0; val < 8; val++) {
2015                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2016             }
2017 
2018             ret = get_errno(setsockopt(sockfd, level, optname,
2019                                        &icmp6f, optlen));
2020             break;
2021         }
2022         default:
2023             goto unimplemented;
2024         }
2025         break;
2026     case SOL_RAW:
2027         switch (optname) {
2028         case ICMP_FILTER:
2029         case IPV6_CHECKSUM:
2030             /* those take an u32 value */
2031             if (optlen < sizeof(uint32_t)) {
2032                 return -TARGET_EINVAL;
2033             }
2034 
2035             if (get_user_u32(val, optval_addr)) {
2036                 return -TARGET_EFAULT;
2037             }
2038             ret = get_errno(setsockopt(sockfd, level, optname,
2039                                        &val, sizeof(val)));
2040             break;
2041 
2042         default:
2043             goto unimplemented;
2044         }
2045         break;
2046 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2047     case SOL_ALG:
2048         switch (optname) {
2049         case ALG_SET_KEY:
2050         {
2051             char *alg_key = g_malloc(optlen);
2052 
2053             if (!alg_key) {
2054                 return -TARGET_ENOMEM;
2055             }
2056             if (copy_from_user(alg_key, optval_addr, optlen)) {
2057                 g_free(alg_key);
2058                 return -TARGET_EFAULT;
2059             }
2060             ret = get_errno(setsockopt(sockfd, level, optname,
2061                                        alg_key, optlen));
2062             g_free(alg_key);
2063             break;
2064         }
2065         case ALG_SET_AEAD_AUTHSIZE:
2066         {
2067             ret = get_errno(setsockopt(sockfd, level, optname,
2068                                        NULL, optlen));
2069             break;
2070         }
2071         default:
2072             goto unimplemented;
2073         }
2074         break;
2075 #endif
2076     case TARGET_SOL_SOCKET:
2077         switch (optname) {
2078         case TARGET_SO_RCVTIMEO:
2079         {
2080                 struct timeval tv;
2081 
2082                 optname = SO_RCVTIMEO;
2083 
2084 set_timeout:
2085                 if (optlen != sizeof(struct target_timeval)) {
2086                     return -TARGET_EINVAL;
2087                 }
2088 
2089                 if (copy_from_user_timeval(&tv, optval_addr)) {
2090                     return -TARGET_EFAULT;
2091                 }
2092 
2093                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2094                                 &tv, sizeof(tv)));
2095                 return ret;
2096         }
2097         case TARGET_SO_SNDTIMEO:
2098                 optname = SO_SNDTIMEO;
2099                 goto set_timeout;
2100         case TARGET_SO_ATTACH_FILTER:
2101         {
2102                 struct target_sock_fprog *tfprog;
2103                 struct target_sock_filter *tfilter;
2104                 struct sock_fprog fprog;
2105                 struct sock_filter *filter;
2106                 int i;
2107 
2108                 if (optlen != sizeof(*tfprog)) {
2109                     return -TARGET_EINVAL;
2110                 }
2111                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2112                     return -TARGET_EFAULT;
2113                 }
2114                 if (!lock_user_struct(VERIFY_READ, tfilter,
2115                                       tswapal(tfprog->filter), 0)) {
2116                     unlock_user_struct(tfprog, optval_addr, 1);
2117                     return -TARGET_EFAULT;
2118                 }
2119 
2120                 fprog.len = tswap16(tfprog->len);
2121                 filter = g_try_new(struct sock_filter, fprog.len);
2122                 if (filter == NULL) {
2123                     unlock_user_struct(tfilter, tfprog->filter, 1);
2124                     unlock_user_struct(tfprog, optval_addr, 1);
2125                     return -TARGET_ENOMEM;
2126                 }
2127                 for (i = 0; i < fprog.len; i++) {
2128                     filter[i].code = tswap16(tfilter[i].code);
2129                     filter[i].jt = tfilter[i].jt;
2130                     filter[i].jf = tfilter[i].jf;
2131                     filter[i].k = tswap32(tfilter[i].k);
2132                 }
2133                 fprog.filter = filter;
2134 
2135                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2136                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2137                 g_free(filter);
2138 
2139                 unlock_user_struct(tfilter, tfprog->filter, 1);
2140                 unlock_user_struct(tfprog, optval_addr, 1);
2141                 return ret;
2142         }
2143 	case TARGET_SO_BINDTODEVICE:
2144 	{
2145 		char *dev_ifname, *addr_ifname;
2146 
2147 		if (optlen > IFNAMSIZ - 1) {
2148 		    optlen = IFNAMSIZ - 1;
2149 		}
2150 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2151 		if (!dev_ifname) {
2152 		    return -TARGET_EFAULT;
2153 		}
2154 		optname = SO_BINDTODEVICE;
2155 		addr_ifname = alloca(IFNAMSIZ);
2156 		memcpy(addr_ifname, dev_ifname, optlen);
2157 		addr_ifname[optlen] = 0;
2158 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2159                                            addr_ifname, optlen));
2160 		unlock_user (dev_ifname, optval_addr, 0);
2161 		return ret;
2162 	}
2163         case TARGET_SO_LINGER:
2164         {
2165                 struct linger lg;
2166                 struct target_linger *tlg;
2167 
2168                 if (optlen != sizeof(struct target_linger)) {
2169                     return -TARGET_EINVAL;
2170                 }
2171                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2172                     return -TARGET_EFAULT;
2173                 }
2174                 __get_user(lg.l_onoff, &tlg->l_onoff);
2175                 __get_user(lg.l_linger, &tlg->l_linger);
2176                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2177                                 &lg, sizeof(lg)));
2178                 unlock_user_struct(tlg, optval_addr, 0);
2179                 return ret;
2180         }
2181             /* Options with 'int' argument.  */
2182         case TARGET_SO_DEBUG:
2183 		optname = SO_DEBUG;
2184 		break;
2185         case TARGET_SO_REUSEADDR:
2186 		optname = SO_REUSEADDR;
2187 		break;
2188 #ifdef SO_REUSEPORT
2189         case TARGET_SO_REUSEPORT:
2190                 optname = SO_REUSEPORT;
2191                 break;
2192 #endif
2193         case TARGET_SO_TYPE:
2194 		optname = SO_TYPE;
2195 		break;
2196         case TARGET_SO_ERROR:
2197 		optname = SO_ERROR;
2198 		break;
2199         case TARGET_SO_DONTROUTE:
2200 		optname = SO_DONTROUTE;
2201 		break;
2202         case TARGET_SO_BROADCAST:
2203 		optname = SO_BROADCAST;
2204 		break;
2205         case TARGET_SO_SNDBUF:
2206 		optname = SO_SNDBUF;
2207 		break;
2208         case TARGET_SO_SNDBUFFORCE:
2209                 optname = SO_SNDBUFFORCE;
2210                 break;
2211         case TARGET_SO_RCVBUF:
2212 		optname = SO_RCVBUF;
2213 		break;
2214         case TARGET_SO_RCVBUFFORCE:
2215                 optname = SO_RCVBUFFORCE;
2216                 break;
2217         case TARGET_SO_KEEPALIVE:
2218 		optname = SO_KEEPALIVE;
2219 		break;
2220         case TARGET_SO_OOBINLINE:
2221 		optname = SO_OOBINLINE;
2222 		break;
2223         case TARGET_SO_NO_CHECK:
2224 		optname = SO_NO_CHECK;
2225 		break;
2226         case TARGET_SO_PRIORITY:
2227 		optname = SO_PRIORITY;
2228 		break;
2229 #ifdef SO_BSDCOMPAT
2230         case TARGET_SO_BSDCOMPAT:
2231 		optname = SO_BSDCOMPAT;
2232 		break;
2233 #endif
2234         case TARGET_SO_PASSCRED:
2235 		optname = SO_PASSCRED;
2236 		break;
2237         case TARGET_SO_PASSSEC:
2238                 optname = SO_PASSSEC;
2239                 break;
2240         case TARGET_SO_TIMESTAMP:
2241 		optname = SO_TIMESTAMP;
2242 		break;
2243         case TARGET_SO_RCVLOWAT:
2244 		optname = SO_RCVLOWAT;
2245 		break;
2246         default:
2247             goto unimplemented;
2248         }
2249 	if (optlen < sizeof(uint32_t))
2250             return -TARGET_EINVAL;
2251 
2252 	if (get_user_u32(val, optval_addr))
2253             return -TARGET_EFAULT;
2254 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2255         break;
2256 #ifdef SOL_NETLINK
2257     case SOL_NETLINK:
2258         switch (optname) {
2259         case NETLINK_PKTINFO:
2260         case NETLINK_ADD_MEMBERSHIP:
2261         case NETLINK_DROP_MEMBERSHIP:
2262         case NETLINK_BROADCAST_ERROR:
2263         case NETLINK_NO_ENOBUFS:
2264 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2265         case NETLINK_LISTEN_ALL_NSID:
2266         case NETLINK_CAP_ACK:
2267 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2268 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2269         case NETLINK_EXT_ACK:
2270 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2271 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2272         case NETLINK_GET_STRICT_CHK:
2273 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2274             break;
2275         default:
2276             goto unimplemented;
2277         }
2278         val = 0;
2279         if (optlen < sizeof(uint32_t)) {
2280             return -TARGET_EINVAL;
2281         }
2282         if (get_user_u32(val, optval_addr)) {
2283             return -TARGET_EFAULT;
2284         }
2285         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2286                                    sizeof(val)));
2287         break;
2288 #endif /* SOL_NETLINK */
2289     default:
2290     unimplemented:
2291         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2292         ret = -TARGET_ENOPROTOOPT;
2293     }
2294     return ret;
2295 }
2296 
2297 /* do_getsockopt() Must return target values and target errnos. */
2298 static abi_long do_getsockopt(int sockfd, int level, int optname,
2299                               abi_ulong optval_addr, abi_ulong optlen)
2300 {
2301     abi_long ret;
2302     int len, val;
2303     socklen_t lv;
2304 
2305     switch(level) {
2306     case TARGET_SOL_SOCKET:
2307         level = SOL_SOCKET;
2308         switch (optname) {
2309         /* These don't just return a single integer */
2310         case TARGET_SO_RCVTIMEO:
2311         case TARGET_SO_SNDTIMEO:
2312         case TARGET_SO_PEERNAME:
2313             goto unimplemented;
2314         case TARGET_SO_PEERCRED: {
2315             struct ucred cr;
2316             socklen_t crlen;
2317             struct target_ucred *tcr;
2318 
2319             if (get_user_u32(len, optlen)) {
2320                 return -TARGET_EFAULT;
2321             }
2322             if (len < 0) {
2323                 return -TARGET_EINVAL;
2324             }
2325 
2326             crlen = sizeof(cr);
2327             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2328                                        &cr, &crlen));
2329             if (ret < 0) {
2330                 return ret;
2331             }
2332             if (len > crlen) {
2333                 len = crlen;
2334             }
2335             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2336                 return -TARGET_EFAULT;
2337             }
2338             __put_user(cr.pid, &tcr->pid);
2339             __put_user(cr.uid, &tcr->uid);
2340             __put_user(cr.gid, &tcr->gid);
2341             unlock_user_struct(tcr, optval_addr, 1);
2342             if (put_user_u32(len, optlen)) {
2343                 return -TARGET_EFAULT;
2344             }
2345             break;
2346         }
2347         case TARGET_SO_PEERSEC: {
2348             char *name;
2349 
2350             if (get_user_u32(len, optlen)) {
2351                 return -TARGET_EFAULT;
2352             }
2353             if (len < 0) {
2354                 return -TARGET_EINVAL;
2355             }
2356             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2357             if (!name) {
2358                 return -TARGET_EFAULT;
2359             }
2360             lv = len;
2361             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2362                                        name, &lv));
2363             if (put_user_u32(lv, optlen)) {
2364                 ret = -TARGET_EFAULT;
2365             }
2366             unlock_user(name, optval_addr, lv);
2367             break;
2368         }
2369         case TARGET_SO_LINGER:
2370         {
2371             struct linger lg;
2372             socklen_t lglen;
2373             struct target_linger *tlg;
2374 
2375             if (get_user_u32(len, optlen)) {
2376                 return -TARGET_EFAULT;
2377             }
2378             if (len < 0) {
2379                 return -TARGET_EINVAL;
2380             }
2381 
2382             lglen = sizeof(lg);
2383             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2384                                        &lg, &lglen));
2385             if (ret < 0) {
2386                 return ret;
2387             }
2388             if (len > lglen) {
2389                 len = lglen;
2390             }
2391             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2392                 return -TARGET_EFAULT;
2393             }
2394             __put_user(lg.l_onoff, &tlg->l_onoff);
2395             __put_user(lg.l_linger, &tlg->l_linger);
2396             unlock_user_struct(tlg, optval_addr, 1);
2397             if (put_user_u32(len, optlen)) {
2398                 return -TARGET_EFAULT;
2399             }
2400             break;
2401         }
2402         /* Options with 'int' argument.  */
2403         case TARGET_SO_DEBUG:
2404             optname = SO_DEBUG;
2405             goto int_case;
2406         case TARGET_SO_REUSEADDR:
2407             optname = SO_REUSEADDR;
2408             goto int_case;
2409 #ifdef SO_REUSEPORT
2410         case TARGET_SO_REUSEPORT:
2411             optname = SO_REUSEPORT;
2412             goto int_case;
2413 #endif
2414         case TARGET_SO_TYPE:
2415             optname = SO_TYPE;
2416             goto int_case;
2417         case TARGET_SO_ERROR:
2418             optname = SO_ERROR;
2419             goto int_case;
2420         case TARGET_SO_DONTROUTE:
2421             optname = SO_DONTROUTE;
2422             goto int_case;
2423         case TARGET_SO_BROADCAST:
2424             optname = SO_BROADCAST;
2425             goto int_case;
2426         case TARGET_SO_SNDBUF:
2427             optname = SO_SNDBUF;
2428             goto int_case;
2429         case TARGET_SO_RCVBUF:
2430             optname = SO_RCVBUF;
2431             goto int_case;
2432         case TARGET_SO_KEEPALIVE:
2433             optname = SO_KEEPALIVE;
2434             goto int_case;
2435         case TARGET_SO_OOBINLINE:
2436             optname = SO_OOBINLINE;
2437             goto int_case;
2438         case TARGET_SO_NO_CHECK:
2439             optname = SO_NO_CHECK;
2440             goto int_case;
2441         case TARGET_SO_PRIORITY:
2442             optname = SO_PRIORITY;
2443             goto int_case;
2444 #ifdef SO_BSDCOMPAT
2445         case TARGET_SO_BSDCOMPAT:
2446             optname = SO_BSDCOMPAT;
2447             goto int_case;
2448 #endif
2449         case TARGET_SO_PASSCRED:
2450             optname = SO_PASSCRED;
2451             goto int_case;
2452         case TARGET_SO_TIMESTAMP:
2453             optname = SO_TIMESTAMP;
2454             goto int_case;
2455         case TARGET_SO_RCVLOWAT:
2456             optname = SO_RCVLOWAT;
2457             goto int_case;
2458         case TARGET_SO_ACCEPTCONN:
2459             optname = SO_ACCEPTCONN;
2460             goto int_case;
2461         default:
2462             goto int_case;
2463         }
2464         break;
2465     case SOL_TCP:
2466         /* TCP options all take an 'int' value.  */
2467     int_case:
2468         if (get_user_u32(len, optlen))
2469             return -TARGET_EFAULT;
2470         if (len < 0)
2471             return -TARGET_EINVAL;
2472         lv = sizeof(lv);
2473         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2474         if (ret < 0)
2475             return ret;
2476         if (optname == SO_TYPE) {
2477             val = host_to_target_sock_type(val);
2478         }
2479         if (len > lv)
2480             len = lv;
2481         if (len == 4) {
2482             if (put_user_u32(val, optval_addr))
2483                 return -TARGET_EFAULT;
2484         } else {
2485             if (put_user_u8(val, optval_addr))
2486                 return -TARGET_EFAULT;
2487         }
2488         if (put_user_u32(len, optlen))
2489             return -TARGET_EFAULT;
2490         break;
2491     case SOL_IP:
2492         switch(optname) {
2493         case IP_TOS:
2494         case IP_TTL:
2495         case IP_HDRINCL:
2496         case IP_ROUTER_ALERT:
2497         case IP_RECVOPTS:
2498         case IP_RETOPTS:
2499         case IP_PKTINFO:
2500         case IP_MTU_DISCOVER:
2501         case IP_RECVERR:
2502         case IP_RECVTOS:
2503 #ifdef IP_FREEBIND
2504         case IP_FREEBIND:
2505 #endif
2506         case IP_MULTICAST_TTL:
2507         case IP_MULTICAST_LOOP:
2508             if (get_user_u32(len, optlen))
2509                 return -TARGET_EFAULT;
2510             if (len < 0)
2511                 return -TARGET_EINVAL;
2512             lv = sizeof(lv);
2513             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2514             if (ret < 0)
2515                 return ret;
2516             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2517                 len = 1;
2518                 if (put_user_u32(len, optlen)
2519                     || put_user_u8(val, optval_addr))
2520                     return -TARGET_EFAULT;
2521             } else {
2522                 if (len > sizeof(int))
2523                     len = sizeof(int);
2524                 if (put_user_u32(len, optlen)
2525                     || put_user_u32(val, optval_addr))
2526                     return -TARGET_EFAULT;
2527             }
2528             break;
2529         default:
2530             ret = -TARGET_ENOPROTOOPT;
2531             break;
2532         }
2533         break;
2534     case SOL_IPV6:
2535         switch (optname) {
2536         case IPV6_MTU_DISCOVER:
2537         case IPV6_MTU:
2538         case IPV6_V6ONLY:
2539         case IPV6_RECVPKTINFO:
2540         case IPV6_UNICAST_HOPS:
2541         case IPV6_MULTICAST_HOPS:
2542         case IPV6_MULTICAST_LOOP:
2543         case IPV6_RECVERR:
2544         case IPV6_RECVHOPLIMIT:
2545         case IPV6_2292HOPLIMIT:
2546         case IPV6_CHECKSUM:
2547         case IPV6_ADDRFORM:
2548         case IPV6_2292PKTINFO:
2549         case IPV6_RECVTCLASS:
2550         case IPV6_RECVRTHDR:
2551         case IPV6_2292RTHDR:
2552         case IPV6_RECVHOPOPTS:
2553         case IPV6_2292HOPOPTS:
2554         case IPV6_RECVDSTOPTS:
2555         case IPV6_2292DSTOPTS:
2556         case IPV6_TCLASS:
2557 #ifdef IPV6_RECVPATHMTU
2558         case IPV6_RECVPATHMTU:
2559 #endif
2560 #ifdef IPV6_TRANSPARENT
2561         case IPV6_TRANSPARENT:
2562 #endif
2563 #ifdef IPV6_FREEBIND
2564         case IPV6_FREEBIND:
2565 #endif
2566 #ifdef IPV6_RECVORIGDSTADDR
2567         case IPV6_RECVORIGDSTADDR:
2568 #endif
2569             if (get_user_u32(len, optlen))
2570                 return -TARGET_EFAULT;
2571             if (len < 0)
2572                 return -TARGET_EINVAL;
2573             lv = sizeof(lv);
2574             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2575             if (ret < 0)
2576                 return ret;
2577             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2578                 len = 1;
2579                 if (put_user_u32(len, optlen)
2580                     || put_user_u8(val, optval_addr))
2581                     return -TARGET_EFAULT;
2582             } else {
2583                 if (len > sizeof(int))
2584                     len = sizeof(int);
2585                 if (put_user_u32(len, optlen)
2586                     || put_user_u32(val, optval_addr))
2587                     return -TARGET_EFAULT;
2588             }
2589             break;
2590         default:
2591             ret = -TARGET_ENOPROTOOPT;
2592             break;
2593         }
2594         break;
2595 #ifdef SOL_NETLINK
2596     case SOL_NETLINK:
2597         switch (optname) {
2598         case NETLINK_PKTINFO:
2599         case NETLINK_BROADCAST_ERROR:
2600         case NETLINK_NO_ENOBUFS:
2601 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2602         case NETLINK_LISTEN_ALL_NSID:
2603         case NETLINK_CAP_ACK:
2604 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2605 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2606         case NETLINK_EXT_ACK:
2607 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2608 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2609         case NETLINK_GET_STRICT_CHK:
2610 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2611             if (get_user_u32(len, optlen)) {
2612                 return -TARGET_EFAULT;
2613             }
2614             if (len != sizeof(val)) {
2615                 return -TARGET_EINVAL;
2616             }
2617             lv = len;
2618             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2619             if (ret < 0) {
2620                 return ret;
2621             }
2622             if (put_user_u32(lv, optlen)
2623                 || put_user_u32(val, optval_addr)) {
2624                 return -TARGET_EFAULT;
2625             }
2626             break;
2627 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2628         case NETLINK_LIST_MEMBERSHIPS:
2629         {
2630             uint32_t *results;
2631             int i;
2632             if (get_user_u32(len, optlen)) {
2633                 return -TARGET_EFAULT;
2634             }
2635             if (len < 0) {
2636                 return -TARGET_EINVAL;
2637             }
2638             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2639             if (!results) {
2640                 return -TARGET_EFAULT;
2641             }
2642             lv = len;
2643             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2644             if (ret < 0) {
2645                 unlock_user(results, optval_addr, 0);
2646                 return ret;
2647             }
2648             /* swap host endianess to target endianess. */
2649             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2650                 results[i] = tswap32(results[i]);
2651             }
2652             if (put_user_u32(lv, optlen)) {
2653                 return -TARGET_EFAULT;
2654             }
2655             unlock_user(results, optval_addr, 0);
2656             break;
2657         }
2658 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2659         default:
2660             goto unimplemented;
2661         }
2662         break;
2663 #endif /* SOL_NETLINK */
2664     default:
2665     unimplemented:
2666         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2667                  level, optname);
2668         ret = -TARGET_EOPNOTSUPP;
2669         break;
2670     }
2671     return ret;
2672 }
2673 
2674 /* Convert target low/high pair representing file offset into the host
2675  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2676  * as the kernel doesn't handle them either.
2677  */
2678 static void target_to_host_low_high(abi_ulong tlow,
2679                                     abi_ulong thigh,
2680                                     unsigned long *hlow,
2681                                     unsigned long *hhigh)
2682 {
2683     uint64_t off = tlow |
2684         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2685         TARGET_LONG_BITS / 2;
2686 
2687     *hlow = off;
2688     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2689 }
2690 
2691 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2692                                 abi_ulong count, int copy)
2693 {
2694     struct target_iovec *target_vec;
2695     struct iovec *vec;
2696     abi_ulong total_len, max_len;
2697     int i;
2698     int err = 0;
2699     bool bad_address = false;
2700 
2701     if (count == 0) {
2702         errno = 0;
2703         return NULL;
2704     }
2705     if (count > IOV_MAX) {
2706         errno = EINVAL;
2707         return NULL;
2708     }
2709 
2710     vec = g_try_new0(struct iovec, count);
2711     if (vec == NULL) {
2712         errno = ENOMEM;
2713         return NULL;
2714     }
2715 
2716     target_vec = lock_user(VERIFY_READ, target_addr,
2717                            count * sizeof(struct target_iovec), 1);
2718     if (target_vec == NULL) {
2719         err = EFAULT;
2720         goto fail2;
2721     }
2722 
2723     /* ??? If host page size > target page size, this will result in a
2724        value larger than what we can actually support.  */
2725     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2726     total_len = 0;
2727 
2728     for (i = 0; i < count; i++) {
2729         abi_ulong base = tswapal(target_vec[i].iov_base);
2730         abi_long len = tswapal(target_vec[i].iov_len);
2731 
2732         if (len < 0) {
2733             err = EINVAL;
2734             goto fail;
2735         } else if (len == 0) {
2736             /* Zero length pointer is ignored.  */
2737             vec[i].iov_base = 0;
2738         } else {
2739             vec[i].iov_base = lock_user(type, base, len, copy);
2740             /* If the first buffer pointer is bad, this is a fault.  But
2741              * subsequent bad buffers will result in a partial write; this
2742              * is realized by filling the vector with null pointers and
2743              * zero lengths. */
2744             if (!vec[i].iov_base) {
2745                 if (i == 0) {
2746                     err = EFAULT;
2747                     goto fail;
2748                 } else {
2749                     bad_address = true;
2750                 }
2751             }
2752             if (bad_address) {
2753                 len = 0;
2754             }
2755             if (len > max_len - total_len) {
2756                 len = max_len - total_len;
2757             }
2758         }
2759         vec[i].iov_len = len;
2760         total_len += len;
2761     }
2762 
2763     unlock_user(target_vec, target_addr, 0);
2764     return vec;
2765 
2766  fail:
2767     while (--i >= 0) {
2768         if (tswapal(target_vec[i].iov_len) > 0) {
2769             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2770         }
2771     }
2772     unlock_user(target_vec, target_addr, 0);
2773  fail2:
2774     g_free(vec);
2775     errno = err;
2776     return NULL;
2777 }
2778 
2779 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2780                          abi_ulong count, int copy)
2781 {
2782     struct target_iovec *target_vec;
2783     int i;
2784 
2785     target_vec = lock_user(VERIFY_READ, target_addr,
2786                            count * sizeof(struct target_iovec), 1);
2787     if (target_vec) {
2788         for (i = 0; i < count; i++) {
2789             abi_ulong base = tswapal(target_vec[i].iov_base);
2790             abi_long len = tswapal(target_vec[i].iov_len);
2791             if (len < 0) {
2792                 break;
2793             }
2794             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2795         }
2796         unlock_user(target_vec, target_addr, 0);
2797     }
2798 
2799     g_free(vec);
2800 }
2801 
2802 static inline int target_to_host_sock_type(int *type)
2803 {
2804     int host_type = 0;
2805     int target_type = *type;
2806 
2807     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2808     case TARGET_SOCK_DGRAM:
2809         host_type = SOCK_DGRAM;
2810         break;
2811     case TARGET_SOCK_STREAM:
2812         host_type = SOCK_STREAM;
2813         break;
2814     default:
2815         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2816         break;
2817     }
2818     if (target_type & TARGET_SOCK_CLOEXEC) {
2819 #if defined(SOCK_CLOEXEC)
2820         host_type |= SOCK_CLOEXEC;
2821 #else
2822         return -TARGET_EINVAL;
2823 #endif
2824     }
2825     if (target_type & TARGET_SOCK_NONBLOCK) {
2826 #if defined(SOCK_NONBLOCK)
2827         host_type |= SOCK_NONBLOCK;
2828 #elif !defined(O_NONBLOCK)
2829         return -TARGET_EINVAL;
2830 #endif
2831     }
2832     *type = host_type;
2833     return 0;
2834 }
2835 
2836 /* Try to emulate socket type flags after socket creation.  */
2837 static int sock_flags_fixup(int fd, int target_type)
2838 {
2839 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2840     if (target_type & TARGET_SOCK_NONBLOCK) {
2841         int flags = fcntl(fd, F_GETFL);
2842         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2843             close(fd);
2844             return -TARGET_EINVAL;
2845         }
2846     }
2847 #endif
2848     return fd;
2849 }
2850 
2851 /* do_socket() Must return target values and target errnos. */
2852 static abi_long do_socket(int domain, int type, int protocol)
2853 {
2854     int target_type = type;
2855     int ret;
2856 
2857     ret = target_to_host_sock_type(&type);
2858     if (ret) {
2859         return ret;
2860     }
2861 
2862     if (domain == PF_NETLINK && !(
2863 #ifdef CONFIG_RTNETLINK
2864          protocol == NETLINK_ROUTE ||
2865 #endif
2866          protocol == NETLINK_KOBJECT_UEVENT ||
2867          protocol == NETLINK_AUDIT)) {
2868         return -EPFNOSUPPORT;
2869     }
2870 
2871     if (domain == AF_PACKET ||
2872         (domain == AF_INET && type == SOCK_PACKET)) {
2873         protocol = tswap16(protocol);
2874     }
2875 
2876     ret = get_errno(socket(domain, type, protocol));
2877     if (ret >= 0) {
2878         ret = sock_flags_fixup(ret, target_type);
2879         if (type == SOCK_PACKET) {
2880             /* Manage an obsolete case :
2881              * if socket type is SOCK_PACKET, bind by name
2882              */
2883             fd_trans_register(ret, &target_packet_trans);
2884         } else if (domain == PF_NETLINK) {
2885             switch (protocol) {
2886 #ifdef CONFIG_RTNETLINK
2887             case NETLINK_ROUTE:
2888                 fd_trans_register(ret, &target_netlink_route_trans);
2889                 break;
2890 #endif
2891             case NETLINK_KOBJECT_UEVENT:
2892                 /* nothing to do: messages are strings */
2893                 break;
2894             case NETLINK_AUDIT:
2895                 fd_trans_register(ret, &target_netlink_audit_trans);
2896                 break;
2897             default:
2898                 g_assert_not_reached();
2899             }
2900         }
2901     }
2902     return ret;
2903 }
2904 
2905 /* do_bind() Must return target values and target errnos. */
2906 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2907                         socklen_t addrlen)
2908 {
2909     void *addr;
2910     abi_long ret;
2911 
2912     if ((int)addrlen < 0) {
2913         return -TARGET_EINVAL;
2914     }
2915 
2916     addr = alloca(addrlen+1);
2917 
2918     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2919     if (ret)
2920         return ret;
2921 
2922     return get_errno(bind(sockfd, addr, addrlen));
2923 }
2924 
2925 /* do_connect() Must return target values and target errnos. */
2926 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2927                            socklen_t addrlen)
2928 {
2929     void *addr;
2930     abi_long ret;
2931 
2932     if ((int)addrlen < 0) {
2933         return -TARGET_EINVAL;
2934     }
2935 
2936     addr = alloca(addrlen+1);
2937 
2938     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2939     if (ret)
2940         return ret;
2941 
2942     return get_errno(safe_connect(sockfd, addr, addrlen));
2943 }
2944 
2945 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2946 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2947                                       int flags, int send)
2948 {
2949     abi_long ret, len;
2950     struct msghdr msg;
2951     abi_ulong count;
2952     struct iovec *vec;
2953     abi_ulong target_vec;
2954 
2955     if (msgp->msg_name) {
2956         msg.msg_namelen = tswap32(msgp->msg_namelen);
2957         msg.msg_name = alloca(msg.msg_namelen+1);
2958         ret = target_to_host_sockaddr(fd, msg.msg_name,
2959                                       tswapal(msgp->msg_name),
2960                                       msg.msg_namelen);
2961         if (ret == -TARGET_EFAULT) {
2962             /* For connected sockets msg_name and msg_namelen must
2963              * be ignored, so returning EFAULT immediately is wrong.
2964              * Instead, pass a bad msg_name to the host kernel, and
2965              * let it decide whether to return EFAULT or not.
2966              */
2967             msg.msg_name = (void *)-1;
2968         } else if (ret) {
2969             goto out2;
2970         }
2971     } else {
2972         msg.msg_name = NULL;
2973         msg.msg_namelen = 0;
2974     }
2975     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2976     msg.msg_control = alloca(msg.msg_controllen);
2977     memset(msg.msg_control, 0, msg.msg_controllen);
2978 
2979     msg.msg_flags = tswap32(msgp->msg_flags);
2980 
2981     count = tswapal(msgp->msg_iovlen);
2982     target_vec = tswapal(msgp->msg_iov);
2983 
2984     if (count > IOV_MAX) {
2985         /* sendrcvmsg returns a different errno for this condition than
2986          * readv/writev, so we must catch it here before lock_iovec() does.
2987          */
2988         ret = -TARGET_EMSGSIZE;
2989         goto out2;
2990     }
2991 
2992     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2993                      target_vec, count, send);
2994     if (vec == NULL) {
2995         ret = -host_to_target_errno(errno);
2996         goto out2;
2997     }
2998     msg.msg_iovlen = count;
2999     msg.msg_iov = vec;
3000 
3001     if (send) {
3002         if (fd_trans_target_to_host_data(fd)) {
3003             void *host_msg;
3004 
3005             host_msg = g_malloc(msg.msg_iov->iov_len);
3006             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3007             ret = fd_trans_target_to_host_data(fd)(host_msg,
3008                                                    msg.msg_iov->iov_len);
3009             if (ret >= 0) {
3010                 msg.msg_iov->iov_base = host_msg;
3011                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3012             }
3013             g_free(host_msg);
3014         } else {
3015             ret = target_to_host_cmsg(&msg, msgp);
3016             if (ret == 0) {
3017                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3018             }
3019         }
3020     } else {
3021         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3022         if (!is_error(ret)) {
3023             len = ret;
3024             if (fd_trans_host_to_target_data(fd)) {
3025                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3026                                                MIN(msg.msg_iov->iov_len, len));
3027             } else {
3028                 ret = host_to_target_cmsg(msgp, &msg);
3029             }
3030             if (!is_error(ret)) {
3031                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3032                 msgp->msg_flags = tswap32(msg.msg_flags);
3033                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3034                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3035                                     msg.msg_name, msg.msg_namelen);
3036                     if (ret) {
3037                         goto out;
3038                     }
3039                 }
3040 
3041                 ret = len;
3042             }
3043         }
3044     }
3045 
3046 out:
3047     unlock_iovec(vec, target_vec, count, !send);
3048 out2:
3049     return ret;
3050 }
3051 
3052 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3053                                int flags, int send)
3054 {
3055     abi_long ret;
3056     struct target_msghdr *msgp;
3057 
3058     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3059                           msgp,
3060                           target_msg,
3061                           send ? 1 : 0)) {
3062         return -TARGET_EFAULT;
3063     }
3064     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3065     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3066     return ret;
3067 }
3068 
3069 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3070  * so it might not have this *mmsg-specific flag either.
3071  */
3072 #ifndef MSG_WAITFORONE
3073 #define MSG_WAITFORONE 0x10000
3074 #endif
3075 
3076 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3077                                 unsigned int vlen, unsigned int flags,
3078                                 int send)
3079 {
3080     struct target_mmsghdr *mmsgp;
3081     abi_long ret = 0;
3082     int i;
3083 
3084     if (vlen > UIO_MAXIOV) {
3085         vlen = UIO_MAXIOV;
3086     }
3087 
3088     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3089     if (!mmsgp) {
3090         return -TARGET_EFAULT;
3091     }
3092 
3093     for (i = 0; i < vlen; i++) {
3094         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3095         if (is_error(ret)) {
3096             break;
3097         }
3098         mmsgp[i].msg_len = tswap32(ret);
3099         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3100         if (flags & MSG_WAITFORONE) {
3101             flags |= MSG_DONTWAIT;
3102         }
3103     }
3104 
3105     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3106 
3107     /* Return number of datagrams sent if we sent any at all;
3108      * otherwise return the error.
3109      */
3110     if (i) {
3111         return i;
3112     }
3113     return ret;
3114 }
3115 
3116 /* do_accept4() Must return target values and target errnos. */
3117 static abi_long do_accept4(int fd, abi_ulong target_addr,
3118                            abi_ulong target_addrlen_addr, int flags)
3119 {
3120     socklen_t addrlen, ret_addrlen;
3121     void *addr;
3122     abi_long ret;
3123     int host_flags;
3124 
3125     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3126 
3127     if (target_addr == 0) {
3128         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3129     }
3130 
3131     /* linux returns EINVAL if addrlen pointer is invalid */
3132     if (get_user_u32(addrlen, target_addrlen_addr))
3133         return -TARGET_EINVAL;
3134 
3135     if ((int)addrlen < 0) {
3136         return -TARGET_EINVAL;
3137     }
3138 
3139     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3140         return -TARGET_EINVAL;
3141 
3142     addr = alloca(addrlen);
3143 
3144     ret_addrlen = addrlen;
3145     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3146     if (!is_error(ret)) {
3147         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3148         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3149             ret = -TARGET_EFAULT;
3150         }
3151     }
3152     return ret;
3153 }
3154 
3155 /* do_getpeername() Must return target values and target errnos. */
3156 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3157                                abi_ulong target_addrlen_addr)
3158 {
3159     socklen_t addrlen, ret_addrlen;
3160     void *addr;
3161     abi_long ret;
3162 
3163     if (get_user_u32(addrlen, target_addrlen_addr))
3164         return -TARGET_EFAULT;
3165 
3166     if ((int)addrlen < 0) {
3167         return -TARGET_EINVAL;
3168     }
3169 
3170     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3171         return -TARGET_EFAULT;
3172 
3173     addr = alloca(addrlen);
3174 
3175     ret_addrlen = addrlen;
3176     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3177     if (!is_error(ret)) {
3178         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3179         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3180             ret = -TARGET_EFAULT;
3181         }
3182     }
3183     return ret;
3184 }
3185 
3186 /* do_getsockname() Must return target values and target errnos. */
3187 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3188                                abi_ulong target_addrlen_addr)
3189 {
3190     socklen_t addrlen, ret_addrlen;
3191     void *addr;
3192     abi_long ret;
3193 
3194     if (get_user_u32(addrlen, target_addrlen_addr))
3195         return -TARGET_EFAULT;
3196 
3197     if ((int)addrlen < 0) {
3198         return -TARGET_EINVAL;
3199     }
3200 
3201     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3202         return -TARGET_EFAULT;
3203 
3204     addr = alloca(addrlen);
3205 
3206     ret_addrlen = addrlen;
3207     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3208     if (!is_error(ret)) {
3209         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3210         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3211             ret = -TARGET_EFAULT;
3212         }
3213     }
3214     return ret;
3215 }
3216 
3217 /* do_socketpair() Must return target values and target errnos. */
3218 static abi_long do_socketpair(int domain, int type, int protocol,
3219                               abi_ulong target_tab_addr)
3220 {
3221     int tab[2];
3222     abi_long ret;
3223 
3224     target_to_host_sock_type(&type);
3225 
3226     ret = get_errno(socketpair(domain, type, protocol, tab));
3227     if (!is_error(ret)) {
3228         if (put_user_s32(tab[0], target_tab_addr)
3229             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3230             ret = -TARGET_EFAULT;
3231     }
3232     return ret;
3233 }
3234 
3235 /* do_sendto() Must return target values and target errnos. */
3236 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3237                           abi_ulong target_addr, socklen_t addrlen)
3238 {
3239     void *addr;
3240     void *host_msg;
3241     void *copy_msg = NULL;
3242     abi_long ret;
3243 
3244     if ((int)addrlen < 0) {
3245         return -TARGET_EINVAL;
3246     }
3247 
3248     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3249     if (!host_msg)
3250         return -TARGET_EFAULT;
3251     if (fd_trans_target_to_host_data(fd)) {
3252         copy_msg = host_msg;
3253         host_msg = g_malloc(len);
3254         memcpy(host_msg, copy_msg, len);
3255         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3256         if (ret < 0) {
3257             goto fail;
3258         }
3259     }
3260     if (target_addr) {
3261         addr = alloca(addrlen+1);
3262         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3263         if (ret) {
3264             goto fail;
3265         }
3266         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3267     } else {
3268         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3269     }
3270 fail:
3271     if (copy_msg) {
3272         g_free(host_msg);
3273         host_msg = copy_msg;
3274     }
3275     unlock_user(host_msg, msg, 0);
3276     return ret;
3277 }
3278 
3279 /* do_recvfrom() Must return target values and target errnos. */
3280 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3281                             abi_ulong target_addr,
3282                             abi_ulong target_addrlen)
3283 {
3284     socklen_t addrlen, ret_addrlen;
3285     void *addr;
3286     void *host_msg;
3287     abi_long ret;
3288 
3289     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3290     if (!host_msg)
3291         return -TARGET_EFAULT;
3292     if (target_addr) {
3293         if (get_user_u32(addrlen, target_addrlen)) {
3294             ret = -TARGET_EFAULT;
3295             goto fail;
3296         }
3297         if ((int)addrlen < 0) {
3298             ret = -TARGET_EINVAL;
3299             goto fail;
3300         }
3301         addr = alloca(addrlen);
3302         ret_addrlen = addrlen;
3303         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3304                                       addr, &ret_addrlen));
3305     } else {
3306         addr = NULL; /* To keep compiler quiet.  */
3307         addrlen = 0; /* To keep compiler quiet.  */
3308         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3309     }
3310     if (!is_error(ret)) {
3311         if (fd_trans_host_to_target_data(fd)) {
3312             abi_long trans;
3313             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3314             if (is_error(trans)) {
3315                 ret = trans;
3316                 goto fail;
3317             }
3318         }
3319         if (target_addr) {
3320             host_to_target_sockaddr(target_addr, addr,
3321                                     MIN(addrlen, ret_addrlen));
3322             if (put_user_u32(ret_addrlen, target_addrlen)) {
3323                 ret = -TARGET_EFAULT;
3324                 goto fail;
3325             }
3326         }
3327         unlock_user(host_msg, msg, len);
3328     } else {
3329 fail:
3330         unlock_user(host_msg, msg, 0);
3331     }
3332     return ret;
3333 }
3334 
3335 #ifdef TARGET_NR_socketcall
3336 /* do_socketcall() must return target values and target errnos. */
3337 static abi_long do_socketcall(int num, abi_ulong vptr)
3338 {
3339     static const unsigned nargs[] = { /* number of arguments per operation */
3340         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3341         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3342         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3343         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3344         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3345         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3346         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3347         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3348         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3349         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3350         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3351         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3352         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3353         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3354         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3355         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3356         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3357         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3358         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3359         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3360     };
3361     abi_long a[6]; /* max 6 args */
3362     unsigned i;
3363 
3364     /* check the range of the first argument num */
3365     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3366     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3367         return -TARGET_EINVAL;
3368     }
3369     /* ensure we have space for args */
3370     if (nargs[num] > ARRAY_SIZE(a)) {
3371         return -TARGET_EINVAL;
3372     }
3373     /* collect the arguments in a[] according to nargs[] */
3374     for (i = 0; i < nargs[num]; ++i) {
3375         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3376             return -TARGET_EFAULT;
3377         }
3378     }
3379     /* now when we have the args, invoke the appropriate underlying function */
3380     switch (num) {
3381     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3382         return do_socket(a[0], a[1], a[2]);
3383     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3384         return do_bind(a[0], a[1], a[2]);
3385     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3386         return do_connect(a[0], a[1], a[2]);
3387     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3388         return get_errno(listen(a[0], a[1]));
3389     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3390         return do_accept4(a[0], a[1], a[2], 0);
3391     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3392         return do_getsockname(a[0], a[1], a[2]);
3393     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3394         return do_getpeername(a[0], a[1], a[2]);
3395     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3396         return do_socketpair(a[0], a[1], a[2], a[3]);
3397     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3398         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3399     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3400         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3401     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3402         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3403     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3404         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3405     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3406         return get_errno(shutdown(a[0], a[1]));
3407     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3408         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3409     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3410         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3411     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3412         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3413     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3414         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3415     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3416         return do_accept4(a[0], a[1], a[2], a[3]);
3417     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3418         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3419     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3420         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3421     default:
3422         gemu_log("Unsupported socketcall: %d\n", num);
3423         return -TARGET_EINVAL;
3424     }
3425 }
3426 #endif
3427 
3428 #define N_SHM_REGIONS	32
3429 
3430 static struct shm_region {
3431     abi_ulong start;
3432     abi_ulong size;
3433     bool in_use;
3434 } shm_regions[N_SHM_REGIONS];
3435 
3436 #ifndef TARGET_SEMID64_DS
3437 /* asm-generic version of this struct */
3438 struct target_semid64_ds
3439 {
3440   struct target_ipc_perm sem_perm;
3441   abi_ulong sem_otime;
3442 #if TARGET_ABI_BITS == 32
3443   abi_ulong __unused1;
3444 #endif
3445   abi_ulong sem_ctime;
3446 #if TARGET_ABI_BITS == 32
3447   abi_ulong __unused2;
3448 #endif
3449   abi_ulong sem_nsems;
3450   abi_ulong __unused3;
3451   abi_ulong __unused4;
3452 };
3453 #endif
3454 
3455 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3456                                                abi_ulong target_addr)
3457 {
3458     struct target_ipc_perm *target_ip;
3459     struct target_semid64_ds *target_sd;
3460 
3461     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3462         return -TARGET_EFAULT;
3463     target_ip = &(target_sd->sem_perm);
3464     host_ip->__key = tswap32(target_ip->__key);
3465     host_ip->uid = tswap32(target_ip->uid);
3466     host_ip->gid = tswap32(target_ip->gid);
3467     host_ip->cuid = tswap32(target_ip->cuid);
3468     host_ip->cgid = tswap32(target_ip->cgid);
3469 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3470     host_ip->mode = tswap32(target_ip->mode);
3471 #else
3472     host_ip->mode = tswap16(target_ip->mode);
3473 #endif
3474 #if defined(TARGET_PPC)
3475     host_ip->__seq = tswap32(target_ip->__seq);
3476 #else
3477     host_ip->__seq = tswap16(target_ip->__seq);
3478 #endif
3479     unlock_user_struct(target_sd, target_addr, 0);
3480     return 0;
3481 }
3482 
3483 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3484                                                struct ipc_perm *host_ip)
3485 {
3486     struct target_ipc_perm *target_ip;
3487     struct target_semid64_ds *target_sd;
3488 
3489     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3490         return -TARGET_EFAULT;
3491     target_ip = &(target_sd->sem_perm);
3492     target_ip->__key = tswap32(host_ip->__key);
3493     target_ip->uid = tswap32(host_ip->uid);
3494     target_ip->gid = tswap32(host_ip->gid);
3495     target_ip->cuid = tswap32(host_ip->cuid);
3496     target_ip->cgid = tswap32(host_ip->cgid);
3497 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3498     target_ip->mode = tswap32(host_ip->mode);
3499 #else
3500     target_ip->mode = tswap16(host_ip->mode);
3501 #endif
3502 #if defined(TARGET_PPC)
3503     target_ip->__seq = tswap32(host_ip->__seq);
3504 #else
3505     target_ip->__seq = tswap16(host_ip->__seq);
3506 #endif
3507     unlock_user_struct(target_sd, target_addr, 1);
3508     return 0;
3509 }
3510 
3511 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3512                                                abi_ulong target_addr)
3513 {
3514     struct target_semid64_ds *target_sd;
3515 
3516     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3517         return -TARGET_EFAULT;
3518     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3519         return -TARGET_EFAULT;
3520     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3521     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3522     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3523     unlock_user_struct(target_sd, target_addr, 0);
3524     return 0;
3525 }
3526 
3527 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3528                                                struct semid_ds *host_sd)
3529 {
3530     struct target_semid64_ds *target_sd;
3531 
3532     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3533         return -TARGET_EFAULT;
3534     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3535         return -TARGET_EFAULT;
3536     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3537     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3538     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3539     unlock_user_struct(target_sd, target_addr, 1);
3540     return 0;
3541 }
3542 
3543 struct target_seminfo {
3544     int semmap;
3545     int semmni;
3546     int semmns;
3547     int semmnu;
3548     int semmsl;
3549     int semopm;
3550     int semume;
3551     int semusz;
3552     int semvmx;
3553     int semaem;
3554 };
3555 
3556 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3557                                               struct seminfo *host_seminfo)
3558 {
3559     struct target_seminfo *target_seminfo;
3560     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3561         return -TARGET_EFAULT;
3562     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3563     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3564     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3565     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3566     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3567     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3568     __put_user(host_seminfo->semume, &target_seminfo->semume);
3569     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3570     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3571     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3572     unlock_user_struct(target_seminfo, target_addr, 1);
3573     return 0;
3574 }
3575 
3576 union semun {
3577 	int val;
3578 	struct semid_ds *buf;
3579 	unsigned short *array;
3580 	struct seminfo *__buf;
3581 };
3582 
3583 union target_semun {
3584 	int val;
3585 	abi_ulong buf;
3586 	abi_ulong array;
3587 	abi_ulong __buf;
3588 };
3589 
3590 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3591                                                abi_ulong target_addr)
3592 {
3593     int nsems;
3594     unsigned short *array;
3595     union semun semun;
3596     struct semid_ds semid_ds;
3597     int i, ret;
3598 
3599     semun.buf = &semid_ds;
3600 
3601     ret = semctl(semid, 0, IPC_STAT, semun);
3602     if (ret == -1)
3603         return get_errno(ret);
3604 
3605     nsems = semid_ds.sem_nsems;
3606 
3607     *host_array = g_try_new(unsigned short, nsems);
3608     if (!*host_array) {
3609         return -TARGET_ENOMEM;
3610     }
3611     array = lock_user(VERIFY_READ, target_addr,
3612                       nsems*sizeof(unsigned short), 1);
3613     if (!array) {
3614         g_free(*host_array);
3615         return -TARGET_EFAULT;
3616     }
3617 
3618     for(i=0; i<nsems; i++) {
3619         __get_user((*host_array)[i], &array[i]);
3620     }
3621     unlock_user(array, target_addr, 0);
3622 
3623     return 0;
3624 }
3625 
3626 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3627                                                unsigned short **host_array)
3628 {
3629     int nsems;
3630     unsigned short *array;
3631     union semun semun;
3632     struct semid_ds semid_ds;
3633     int i, ret;
3634 
3635     semun.buf = &semid_ds;
3636 
3637     ret = semctl(semid, 0, IPC_STAT, semun);
3638     if (ret == -1)
3639         return get_errno(ret);
3640 
3641     nsems = semid_ds.sem_nsems;
3642 
3643     array = lock_user(VERIFY_WRITE, target_addr,
3644                       nsems*sizeof(unsigned short), 0);
3645     if (!array)
3646         return -TARGET_EFAULT;
3647 
3648     for(i=0; i<nsems; i++) {
3649         __put_user((*host_array)[i], &array[i]);
3650     }
3651     g_free(*host_array);
3652     unlock_user(array, target_addr, 1);
3653 
3654     return 0;
3655 }
3656 
3657 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3658                                  abi_ulong target_arg)
3659 {
3660     union target_semun target_su = { .buf = target_arg };
3661     union semun arg;
3662     struct semid_ds dsarg;
3663     unsigned short *array = NULL;
3664     struct seminfo seminfo;
3665     abi_long ret = -TARGET_EINVAL;
3666     abi_long err;
3667     cmd &= 0xff;
3668 
3669     switch( cmd ) {
3670 	case GETVAL:
3671 	case SETVAL:
3672             /* In 64 bit cross-endian situations, we will erroneously pick up
3673              * the wrong half of the union for the "val" element.  To rectify
3674              * this, the entire 8-byte structure is byteswapped, followed by
3675 	     * a swap of the 4 byte val field. In other cases, the data is
3676 	     * already in proper host byte order. */
3677 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3678 		target_su.buf = tswapal(target_su.buf);
3679 		arg.val = tswap32(target_su.val);
3680 	    } else {
3681 		arg.val = target_su.val;
3682 	    }
3683             ret = get_errno(semctl(semid, semnum, cmd, arg));
3684             break;
3685 	case GETALL:
3686 	case SETALL:
3687             err = target_to_host_semarray(semid, &array, target_su.array);
3688             if (err)
3689                 return err;
3690             arg.array = array;
3691             ret = get_errno(semctl(semid, semnum, cmd, arg));
3692             err = host_to_target_semarray(semid, target_su.array, &array);
3693             if (err)
3694                 return err;
3695             break;
3696 	case IPC_STAT:
3697 	case IPC_SET:
3698 	case SEM_STAT:
3699             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3700             if (err)
3701                 return err;
3702             arg.buf = &dsarg;
3703             ret = get_errno(semctl(semid, semnum, cmd, arg));
3704             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3705             if (err)
3706                 return err;
3707             break;
3708 	case IPC_INFO:
3709 	case SEM_INFO:
3710             arg.__buf = &seminfo;
3711             ret = get_errno(semctl(semid, semnum, cmd, arg));
3712             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3713             if (err)
3714                 return err;
3715             break;
3716 	case IPC_RMID:
3717 	case GETPID:
3718 	case GETNCNT:
3719 	case GETZCNT:
3720             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3721             break;
3722     }
3723 
3724     return ret;
3725 }
3726 
3727 struct target_sembuf {
3728     unsigned short sem_num;
3729     short sem_op;
3730     short sem_flg;
3731 };
3732 
3733 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3734                                              abi_ulong target_addr,
3735                                              unsigned nsops)
3736 {
3737     struct target_sembuf *target_sembuf;
3738     int i;
3739 
3740     target_sembuf = lock_user(VERIFY_READ, target_addr,
3741                               nsops*sizeof(struct target_sembuf), 1);
3742     if (!target_sembuf)
3743         return -TARGET_EFAULT;
3744 
3745     for(i=0; i<nsops; i++) {
3746         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3747         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3748         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3749     }
3750 
3751     unlock_user(target_sembuf, target_addr, 0);
3752 
3753     return 0;
3754 }
3755 
3756 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3757 {
3758     struct sembuf sops[nsops];
3759     abi_long ret;
3760 
3761     if (target_to_host_sembuf(sops, ptr, nsops))
3762         return -TARGET_EFAULT;
3763 
3764     ret = -TARGET_ENOSYS;
3765 #ifdef __NR_semtimedop
3766     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3767 #endif
3768 #ifdef __NR_ipc
3769     if (ret == -TARGET_ENOSYS) {
3770         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3771     }
3772 #endif
3773     return ret;
3774 }
3775 
3776 struct target_msqid_ds
3777 {
3778     struct target_ipc_perm msg_perm;
3779     abi_ulong msg_stime;
3780 #if TARGET_ABI_BITS == 32
3781     abi_ulong __unused1;
3782 #endif
3783     abi_ulong msg_rtime;
3784 #if TARGET_ABI_BITS == 32
3785     abi_ulong __unused2;
3786 #endif
3787     abi_ulong msg_ctime;
3788 #if TARGET_ABI_BITS == 32
3789     abi_ulong __unused3;
3790 #endif
3791     abi_ulong __msg_cbytes;
3792     abi_ulong msg_qnum;
3793     abi_ulong msg_qbytes;
3794     abi_ulong msg_lspid;
3795     abi_ulong msg_lrpid;
3796     abi_ulong __unused4;
3797     abi_ulong __unused5;
3798 };
3799 
3800 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3801                                                abi_ulong target_addr)
3802 {
3803     struct target_msqid_ds *target_md;
3804 
3805     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3806         return -TARGET_EFAULT;
3807     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3808         return -TARGET_EFAULT;
3809     host_md->msg_stime = tswapal(target_md->msg_stime);
3810     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3811     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3812     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3813     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3814     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3815     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3816     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3817     unlock_user_struct(target_md, target_addr, 0);
3818     return 0;
3819 }
3820 
3821 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3822                                                struct msqid_ds *host_md)
3823 {
3824     struct target_msqid_ds *target_md;
3825 
3826     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3827         return -TARGET_EFAULT;
3828     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3829         return -TARGET_EFAULT;
3830     target_md->msg_stime = tswapal(host_md->msg_stime);
3831     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3832     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3833     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3834     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3835     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3836     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3837     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3838     unlock_user_struct(target_md, target_addr, 1);
3839     return 0;
3840 }
3841 
3842 struct target_msginfo {
3843     int msgpool;
3844     int msgmap;
3845     int msgmax;
3846     int msgmnb;
3847     int msgmni;
3848     int msgssz;
3849     int msgtql;
3850     unsigned short int msgseg;
3851 };
3852 
3853 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3854                                               struct msginfo *host_msginfo)
3855 {
3856     struct target_msginfo *target_msginfo;
3857     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3858         return -TARGET_EFAULT;
3859     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3860     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3861     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3862     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3863     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3864     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3865     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3866     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3867     unlock_user_struct(target_msginfo, target_addr, 1);
3868     return 0;
3869 }
3870 
3871 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3872 {
3873     struct msqid_ds dsarg;
3874     struct msginfo msginfo;
3875     abi_long ret = -TARGET_EINVAL;
3876 
3877     cmd &= 0xff;
3878 
3879     switch (cmd) {
3880     case IPC_STAT:
3881     case IPC_SET:
3882     case MSG_STAT:
3883         if (target_to_host_msqid_ds(&dsarg,ptr))
3884             return -TARGET_EFAULT;
3885         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3886         if (host_to_target_msqid_ds(ptr,&dsarg))
3887             return -TARGET_EFAULT;
3888         break;
3889     case IPC_RMID:
3890         ret = get_errno(msgctl(msgid, cmd, NULL));
3891         break;
3892     case IPC_INFO:
3893     case MSG_INFO:
3894         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3895         if (host_to_target_msginfo(ptr, &msginfo))
3896             return -TARGET_EFAULT;
3897         break;
3898     }
3899 
3900     return ret;
3901 }
3902 
3903 struct target_msgbuf {
3904     abi_long mtype;
3905     char	mtext[1];
3906 };
3907 
3908 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3909                                  ssize_t msgsz, int msgflg)
3910 {
3911     struct target_msgbuf *target_mb;
3912     struct msgbuf *host_mb;
3913     abi_long ret = 0;
3914 
3915     if (msgsz < 0) {
3916         return -TARGET_EINVAL;
3917     }
3918 
3919     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3920         return -TARGET_EFAULT;
3921     host_mb = g_try_malloc(msgsz + sizeof(long));
3922     if (!host_mb) {
3923         unlock_user_struct(target_mb, msgp, 0);
3924         return -TARGET_ENOMEM;
3925     }
3926     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3927     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3928     ret = -TARGET_ENOSYS;
3929 #ifdef __NR_msgsnd
3930     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3931 #endif
3932 #ifdef __NR_ipc
3933     if (ret == -TARGET_ENOSYS) {
3934         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3935                                  host_mb, 0));
3936     }
3937 #endif
3938     g_free(host_mb);
3939     unlock_user_struct(target_mb, msgp, 0);
3940 
3941     return ret;
3942 }
3943 
3944 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3945                                  ssize_t msgsz, abi_long msgtyp,
3946                                  int msgflg)
3947 {
3948     struct target_msgbuf *target_mb;
3949     char *target_mtext;
3950     struct msgbuf *host_mb;
3951     abi_long ret = 0;
3952 
3953     if (msgsz < 0) {
3954         return -TARGET_EINVAL;
3955     }
3956 
3957     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3958         return -TARGET_EFAULT;
3959 
3960     host_mb = g_try_malloc(msgsz + sizeof(long));
3961     if (!host_mb) {
3962         ret = -TARGET_ENOMEM;
3963         goto end;
3964     }
3965     ret = -TARGET_ENOSYS;
3966 #ifdef __NR_msgrcv
3967     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3968 #endif
3969 #ifdef __NR_ipc
3970     if (ret == -TARGET_ENOSYS) {
3971         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3972                         msgflg, host_mb, msgtyp));
3973     }
3974 #endif
3975 
3976     if (ret > 0) {
3977         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3978         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3979         if (!target_mtext) {
3980             ret = -TARGET_EFAULT;
3981             goto end;
3982         }
3983         memcpy(target_mb->mtext, host_mb->mtext, ret);
3984         unlock_user(target_mtext, target_mtext_addr, ret);
3985     }
3986 
3987     target_mb->mtype = tswapal(host_mb->mtype);
3988 
3989 end:
3990     if (target_mb)
3991         unlock_user_struct(target_mb, msgp, 1);
3992     g_free(host_mb);
3993     return ret;
3994 }
3995 
3996 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3997                                                abi_ulong target_addr)
3998 {
3999     struct target_shmid_ds *target_sd;
4000 
4001     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4002         return -TARGET_EFAULT;
4003     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4004         return -TARGET_EFAULT;
4005     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4006     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4007     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4008     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4009     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4010     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4011     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4012     unlock_user_struct(target_sd, target_addr, 0);
4013     return 0;
4014 }
4015 
4016 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4017                                                struct shmid_ds *host_sd)
4018 {
4019     struct target_shmid_ds *target_sd;
4020 
4021     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4022         return -TARGET_EFAULT;
4023     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4024         return -TARGET_EFAULT;
4025     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4026     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4027     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4028     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4029     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4030     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4031     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4032     unlock_user_struct(target_sd, target_addr, 1);
4033     return 0;
4034 }
4035 
4036 struct  target_shminfo {
4037     abi_ulong shmmax;
4038     abi_ulong shmmin;
4039     abi_ulong shmmni;
4040     abi_ulong shmseg;
4041     abi_ulong shmall;
4042 };
4043 
4044 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4045                                               struct shminfo *host_shminfo)
4046 {
4047     struct target_shminfo *target_shminfo;
4048     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4049         return -TARGET_EFAULT;
4050     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4051     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4052     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4053     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4054     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4055     unlock_user_struct(target_shminfo, target_addr, 1);
4056     return 0;
4057 }
4058 
4059 struct target_shm_info {
4060     int used_ids;
4061     abi_ulong shm_tot;
4062     abi_ulong shm_rss;
4063     abi_ulong shm_swp;
4064     abi_ulong swap_attempts;
4065     abi_ulong swap_successes;
4066 };
4067 
4068 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4069                                                struct shm_info *host_shm_info)
4070 {
4071     struct target_shm_info *target_shm_info;
4072     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4073         return -TARGET_EFAULT;
4074     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4075     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4076     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4077     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4078     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4079     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4080     unlock_user_struct(target_shm_info, target_addr, 1);
4081     return 0;
4082 }
4083 
4084 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4085 {
4086     struct shmid_ds dsarg;
4087     struct shminfo shminfo;
4088     struct shm_info shm_info;
4089     abi_long ret = -TARGET_EINVAL;
4090 
4091     cmd &= 0xff;
4092 
4093     switch(cmd) {
4094     case IPC_STAT:
4095     case IPC_SET:
4096     case SHM_STAT:
4097         if (target_to_host_shmid_ds(&dsarg, buf))
4098             return -TARGET_EFAULT;
4099         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4100         if (host_to_target_shmid_ds(buf, &dsarg))
4101             return -TARGET_EFAULT;
4102         break;
4103     case IPC_INFO:
4104         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4105         if (host_to_target_shminfo(buf, &shminfo))
4106             return -TARGET_EFAULT;
4107         break;
4108     case SHM_INFO:
4109         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4110         if (host_to_target_shm_info(buf, &shm_info))
4111             return -TARGET_EFAULT;
4112         break;
4113     case IPC_RMID:
4114     case SHM_LOCK:
4115     case SHM_UNLOCK:
4116         ret = get_errno(shmctl(shmid, cmd, NULL));
4117         break;
4118     }
4119 
4120     return ret;
4121 }
4122 
4123 #ifndef TARGET_FORCE_SHMLBA
4124 /* For most architectures, SHMLBA is the same as the page size;
4125  * some architectures have larger values, in which case they should
4126  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4127  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4128  * and defining its own value for SHMLBA.
4129  *
4130  * The kernel also permits SHMLBA to be set by the architecture to a
4131  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4132  * this means that addresses are rounded to the large size if
4133  * SHM_RND is set but addresses not aligned to that size are not rejected
4134  * as long as they are at least page-aligned. Since the only architecture
4135  * which uses this is ia64 this code doesn't provide for that oddity.
4136  */
4137 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4138 {
4139     return TARGET_PAGE_SIZE;
4140 }
4141 #endif
4142 
4143 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4144                                  int shmid, abi_ulong shmaddr, int shmflg)
4145 {
4146     abi_long raddr;
4147     void *host_raddr;
4148     struct shmid_ds shm_info;
4149     int i,ret;
4150     abi_ulong shmlba;
4151 
4152     /* find out the length of the shared memory segment */
4153     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4154     if (is_error(ret)) {
4155         /* can't get length, bail out */
4156         return ret;
4157     }
4158 
4159     shmlba = target_shmlba(cpu_env);
4160 
4161     if (shmaddr & (shmlba - 1)) {
4162         if (shmflg & SHM_RND) {
4163             shmaddr &= ~(shmlba - 1);
4164         } else {
4165             return -TARGET_EINVAL;
4166         }
4167     }
4168     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4169         return -TARGET_EINVAL;
4170     }
4171 
4172     mmap_lock();
4173 
4174     if (shmaddr)
4175         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4176     else {
4177         abi_ulong mmap_start;
4178 
4179         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4180         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4181 
4182         if (mmap_start == -1) {
4183             errno = ENOMEM;
4184             host_raddr = (void *)-1;
4185         } else
4186             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4187     }
4188 
4189     if (host_raddr == (void *)-1) {
4190         mmap_unlock();
4191         return get_errno((long)host_raddr);
4192     }
4193     raddr=h2g((unsigned long)host_raddr);
4194 
4195     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4196                    PAGE_VALID | PAGE_READ |
4197                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4198 
4199     for (i = 0; i < N_SHM_REGIONS; i++) {
4200         if (!shm_regions[i].in_use) {
4201             shm_regions[i].in_use = true;
4202             shm_regions[i].start = raddr;
4203             shm_regions[i].size = shm_info.shm_segsz;
4204             break;
4205         }
4206     }
4207 
4208     mmap_unlock();
4209     return raddr;
4210 
4211 }
4212 
4213 static inline abi_long do_shmdt(abi_ulong shmaddr)
4214 {
4215     int i;
4216     abi_long rv;
4217 
4218     mmap_lock();
4219 
4220     for (i = 0; i < N_SHM_REGIONS; ++i) {
4221         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4222             shm_regions[i].in_use = false;
4223             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4224             break;
4225         }
4226     }
4227     rv = get_errno(shmdt(g2h(shmaddr)));
4228 
4229     mmap_unlock();
4230 
4231     return rv;
4232 }
4233 
4234 #ifdef TARGET_NR_ipc
4235 /* ??? This only works with linear mappings.  */
4236 /* do_ipc() must return target values and target errnos. */
4237 static abi_long do_ipc(CPUArchState *cpu_env,
4238                        unsigned int call, abi_long first,
4239                        abi_long second, abi_long third,
4240                        abi_long ptr, abi_long fifth)
4241 {
4242     int version;
4243     abi_long ret = 0;
4244 
4245     version = call >> 16;
4246     call &= 0xffff;
4247 
4248     switch (call) {
4249     case IPCOP_semop:
4250         ret = do_semop(first, ptr, second);
4251         break;
4252 
4253     case IPCOP_semget:
4254         ret = get_errno(semget(first, second, third));
4255         break;
4256 
4257     case IPCOP_semctl: {
4258         /* The semun argument to semctl is passed by value, so dereference the
4259          * ptr argument. */
4260         abi_ulong atptr;
4261         get_user_ual(atptr, ptr);
4262         ret = do_semctl(first, second, third, atptr);
4263         break;
4264     }
4265 
4266     case IPCOP_msgget:
4267         ret = get_errno(msgget(first, second));
4268         break;
4269 
4270     case IPCOP_msgsnd:
4271         ret = do_msgsnd(first, ptr, second, third);
4272         break;
4273 
4274     case IPCOP_msgctl:
4275         ret = do_msgctl(first, second, ptr);
4276         break;
4277 
4278     case IPCOP_msgrcv:
4279         switch (version) {
4280         case 0:
4281             {
4282                 struct target_ipc_kludge {
4283                     abi_long msgp;
4284                     abi_long msgtyp;
4285                 } *tmp;
4286 
4287                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4288                     ret = -TARGET_EFAULT;
4289                     break;
4290                 }
4291 
4292                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4293 
4294                 unlock_user_struct(tmp, ptr, 0);
4295                 break;
4296             }
4297         default:
4298             ret = do_msgrcv(first, ptr, second, fifth, third);
4299         }
4300         break;
4301 
4302     case IPCOP_shmat:
4303         switch (version) {
4304         default:
4305         {
4306             abi_ulong raddr;
4307             raddr = do_shmat(cpu_env, first, ptr, second);
4308             if (is_error(raddr))
4309                 return get_errno(raddr);
4310             if (put_user_ual(raddr, third))
4311                 return -TARGET_EFAULT;
4312             break;
4313         }
4314         case 1:
4315             ret = -TARGET_EINVAL;
4316             break;
4317         }
4318 	break;
4319     case IPCOP_shmdt:
4320         ret = do_shmdt(ptr);
4321 	break;
4322 
4323     case IPCOP_shmget:
4324 	/* IPC_* flag values are the same on all linux platforms */
4325 	ret = get_errno(shmget(first, second, third));
4326 	break;
4327 
4328 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4329     case IPCOP_shmctl:
4330         ret = do_shmctl(first, second, ptr);
4331         break;
4332     default:
4333 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4334 	ret = -TARGET_ENOSYS;
4335 	break;
4336     }
4337     return ret;
4338 }
4339 #endif
4340 
4341 /* kernel structure types definitions */
4342 
4343 #define STRUCT(name, ...) STRUCT_ ## name,
4344 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4345 enum {
4346 #include "syscall_types.h"
4347 STRUCT_MAX
4348 };
4349 #undef STRUCT
4350 #undef STRUCT_SPECIAL
4351 
4352 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4353 #define STRUCT_SPECIAL(name)
4354 #include "syscall_types.h"
4355 #undef STRUCT
4356 #undef STRUCT_SPECIAL
4357 
4358 typedef struct IOCTLEntry IOCTLEntry;
4359 
4360 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4361                              int fd, int cmd, abi_long arg);
4362 
4363 struct IOCTLEntry {
4364     int target_cmd;
4365     unsigned int host_cmd;
4366     const char *name;
4367     int access;
4368     do_ioctl_fn *do_ioctl;
4369     const argtype arg_type[5];
4370 };
4371 
4372 #define IOC_R 0x0001
4373 #define IOC_W 0x0002
4374 #define IOC_RW (IOC_R | IOC_W)
4375 
4376 #define MAX_STRUCT_SIZE 4096
4377 
4378 #ifdef CONFIG_FIEMAP
4379 /* So fiemap access checks don't overflow on 32 bit systems.
4380  * This is very slightly smaller than the limit imposed by
4381  * the underlying kernel.
4382  */
4383 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4384                             / sizeof(struct fiemap_extent))
4385 
4386 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4387                                        int fd, int cmd, abi_long arg)
4388 {
4389     /* The parameter for this ioctl is a struct fiemap followed
4390      * by an array of struct fiemap_extent whose size is set
4391      * in fiemap->fm_extent_count. The array is filled in by the
4392      * ioctl.
4393      */
4394     int target_size_in, target_size_out;
4395     struct fiemap *fm;
4396     const argtype *arg_type = ie->arg_type;
4397     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4398     void *argptr, *p;
4399     abi_long ret;
4400     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4401     uint32_t outbufsz;
4402     int free_fm = 0;
4403 
4404     assert(arg_type[0] == TYPE_PTR);
4405     assert(ie->access == IOC_RW);
4406     arg_type++;
4407     target_size_in = thunk_type_size(arg_type, 0);
4408     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4409     if (!argptr) {
4410         return -TARGET_EFAULT;
4411     }
4412     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4413     unlock_user(argptr, arg, 0);
4414     fm = (struct fiemap *)buf_temp;
4415     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4416         return -TARGET_EINVAL;
4417     }
4418 
4419     outbufsz = sizeof (*fm) +
4420         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4421 
4422     if (outbufsz > MAX_STRUCT_SIZE) {
4423         /* We can't fit all the extents into the fixed size buffer.
4424          * Allocate one that is large enough and use it instead.
4425          */
4426         fm = g_try_malloc(outbufsz);
4427         if (!fm) {
4428             return -TARGET_ENOMEM;
4429         }
4430         memcpy(fm, buf_temp, sizeof(struct fiemap));
4431         free_fm = 1;
4432     }
4433     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4434     if (!is_error(ret)) {
4435         target_size_out = target_size_in;
4436         /* An extent_count of 0 means we were only counting the extents
4437          * so there are no structs to copy
4438          */
4439         if (fm->fm_extent_count != 0) {
4440             target_size_out += fm->fm_mapped_extents * extent_size;
4441         }
4442         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4443         if (!argptr) {
4444             ret = -TARGET_EFAULT;
4445         } else {
4446             /* Convert the struct fiemap */
4447             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4448             if (fm->fm_extent_count != 0) {
4449                 p = argptr + target_size_in;
4450                 /* ...and then all the struct fiemap_extents */
4451                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4452                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4453                                   THUNK_TARGET);
4454                     p += extent_size;
4455                 }
4456             }
4457             unlock_user(argptr, arg, target_size_out);
4458         }
4459     }
4460     if (free_fm) {
4461         g_free(fm);
4462     }
4463     return ret;
4464 }
4465 #endif
4466 
4467 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4468                                 int fd, int cmd, abi_long arg)
4469 {
4470     const argtype *arg_type = ie->arg_type;
4471     int target_size;
4472     void *argptr;
4473     int ret;
4474     struct ifconf *host_ifconf;
4475     uint32_t outbufsz;
4476     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4477     int target_ifreq_size;
4478     int nb_ifreq;
4479     int free_buf = 0;
4480     int i;
4481     int target_ifc_len;
4482     abi_long target_ifc_buf;
4483     int host_ifc_len;
4484     char *host_ifc_buf;
4485 
4486     assert(arg_type[0] == TYPE_PTR);
4487     assert(ie->access == IOC_RW);
4488 
4489     arg_type++;
4490     target_size = thunk_type_size(arg_type, 0);
4491 
4492     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4493     if (!argptr)
4494         return -TARGET_EFAULT;
4495     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4496     unlock_user(argptr, arg, 0);
4497 
4498     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4499     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4500     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4501 
4502     if (target_ifc_buf != 0) {
4503         target_ifc_len = host_ifconf->ifc_len;
4504         nb_ifreq = target_ifc_len / target_ifreq_size;
4505         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4506 
4507         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4508         if (outbufsz > MAX_STRUCT_SIZE) {
4509             /*
4510              * We can't fit all the extents into the fixed size buffer.
4511              * Allocate one that is large enough and use it instead.
4512              */
4513             host_ifconf = malloc(outbufsz);
4514             if (!host_ifconf) {
4515                 return -TARGET_ENOMEM;
4516             }
4517             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4518             free_buf = 1;
4519         }
4520         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4521 
4522         host_ifconf->ifc_len = host_ifc_len;
4523     } else {
4524       host_ifc_buf = NULL;
4525     }
4526     host_ifconf->ifc_buf = host_ifc_buf;
4527 
4528     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4529     if (!is_error(ret)) {
4530 	/* convert host ifc_len to target ifc_len */
4531 
4532         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4533         target_ifc_len = nb_ifreq * target_ifreq_size;
4534         host_ifconf->ifc_len = target_ifc_len;
4535 
4536 	/* restore target ifc_buf */
4537 
4538         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4539 
4540 	/* copy struct ifconf to target user */
4541 
4542         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4543         if (!argptr)
4544             return -TARGET_EFAULT;
4545         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4546         unlock_user(argptr, arg, target_size);
4547 
4548         if (target_ifc_buf != 0) {
4549             /* copy ifreq[] to target user */
4550             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4551             for (i = 0; i < nb_ifreq ; i++) {
4552                 thunk_convert(argptr + i * target_ifreq_size,
4553                               host_ifc_buf + i * sizeof(struct ifreq),
4554                               ifreq_arg_type, THUNK_TARGET);
4555             }
4556             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4557         }
4558     }
4559 
4560     if (free_buf) {
4561         free(host_ifconf);
4562     }
4563 
4564     return ret;
4565 }
4566 
4567 #if defined(CONFIG_USBFS)
4568 #if HOST_LONG_BITS > 64
4569 #error USBDEVFS thunks do not support >64 bit hosts yet.
4570 #endif
4571 struct live_urb {
4572     uint64_t target_urb_adr;
4573     uint64_t target_buf_adr;
4574     char *target_buf_ptr;
4575     struct usbdevfs_urb host_urb;
4576 };
4577 
4578 static GHashTable *usbdevfs_urb_hashtable(void)
4579 {
4580     static GHashTable *urb_hashtable;
4581 
4582     if (!urb_hashtable) {
4583         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4584     }
4585     return urb_hashtable;
4586 }
4587 
4588 static void urb_hashtable_insert(struct live_urb *urb)
4589 {
4590     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4591     g_hash_table_insert(urb_hashtable, urb, urb);
4592 }
4593 
4594 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4595 {
4596     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4597     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4598 }
4599 
4600 static void urb_hashtable_remove(struct live_urb *urb)
4601 {
4602     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4603     g_hash_table_remove(urb_hashtable, urb);
4604 }
4605 
4606 static abi_long
4607 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4608                           int fd, int cmd, abi_long arg)
4609 {
4610     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4611     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4612     struct live_urb *lurb;
4613     void *argptr;
4614     uint64_t hurb;
4615     int target_size;
4616     uintptr_t target_urb_adr;
4617     abi_long ret;
4618 
4619     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4620 
4621     memset(buf_temp, 0, sizeof(uint64_t));
4622     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4623     if (is_error(ret)) {
4624         return ret;
4625     }
4626 
4627     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4628     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4629     if (!lurb->target_urb_adr) {
4630         return -TARGET_EFAULT;
4631     }
4632     urb_hashtable_remove(lurb);
4633     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4634         lurb->host_urb.buffer_length);
4635     lurb->target_buf_ptr = NULL;
4636 
4637     /* restore the guest buffer pointer */
4638     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4639 
4640     /* update the guest urb struct */
4641     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4642     if (!argptr) {
4643         g_free(lurb);
4644         return -TARGET_EFAULT;
4645     }
4646     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4647     unlock_user(argptr, lurb->target_urb_adr, target_size);
4648 
4649     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4650     /* write back the urb handle */
4651     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4652     if (!argptr) {
4653         g_free(lurb);
4654         return -TARGET_EFAULT;
4655     }
4656 
4657     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4658     target_urb_adr = lurb->target_urb_adr;
4659     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4660     unlock_user(argptr, arg, target_size);
4661 
4662     g_free(lurb);
4663     return ret;
4664 }
4665 
4666 static abi_long
4667 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4668                              uint8_t *buf_temp __attribute__((unused)),
4669                              int fd, int cmd, abi_long arg)
4670 {
4671     struct live_urb *lurb;
4672 
4673     /* map target address back to host URB with metadata. */
4674     lurb = urb_hashtable_lookup(arg);
4675     if (!lurb) {
4676         return -TARGET_EFAULT;
4677     }
4678     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4679 }
4680 
4681 static abi_long
4682 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4683                             int fd, int cmd, abi_long arg)
4684 {
4685     const argtype *arg_type = ie->arg_type;
4686     int target_size;
4687     abi_long ret;
4688     void *argptr;
4689     int rw_dir;
4690     struct live_urb *lurb;
4691 
4692     /*
4693      * each submitted URB needs to map to a unique ID for the
4694      * kernel, and that unique ID needs to be a pointer to
4695      * host memory.  hence, we need to malloc for each URB.
4696      * isochronous transfers have a variable length struct.
4697      */
4698     arg_type++;
4699     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4700 
4701     /* construct host copy of urb and metadata */
4702     lurb = g_try_malloc0(sizeof(struct live_urb));
4703     if (!lurb) {
4704         return -TARGET_ENOMEM;
4705     }
4706 
4707     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4708     if (!argptr) {
4709         g_free(lurb);
4710         return -TARGET_EFAULT;
4711     }
4712     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4713     unlock_user(argptr, arg, 0);
4714 
4715     lurb->target_urb_adr = arg;
4716     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4717 
4718     /* buffer space used depends on endpoint type so lock the entire buffer */
4719     /* control type urbs should check the buffer contents for true direction */
4720     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4721     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4722         lurb->host_urb.buffer_length, 1);
4723     if (lurb->target_buf_ptr == NULL) {
4724         g_free(lurb);
4725         return -TARGET_EFAULT;
4726     }
4727 
4728     /* update buffer pointer in host copy */
4729     lurb->host_urb.buffer = lurb->target_buf_ptr;
4730 
4731     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4732     if (is_error(ret)) {
4733         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4734         g_free(lurb);
4735     } else {
4736         urb_hashtable_insert(lurb);
4737     }
4738 
4739     return ret;
4740 }
4741 #endif /* CONFIG_USBFS */
4742 
4743 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4744                             int cmd, abi_long arg)
4745 {
4746     void *argptr;
4747     struct dm_ioctl *host_dm;
4748     abi_long guest_data;
4749     uint32_t guest_data_size;
4750     int target_size;
4751     const argtype *arg_type = ie->arg_type;
4752     abi_long ret;
4753     void *big_buf = NULL;
4754     char *host_data;
4755 
4756     arg_type++;
4757     target_size = thunk_type_size(arg_type, 0);
4758     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4759     if (!argptr) {
4760         ret = -TARGET_EFAULT;
4761         goto out;
4762     }
4763     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4764     unlock_user(argptr, arg, 0);
4765 
4766     /* buf_temp is too small, so fetch things into a bigger buffer */
4767     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4768     memcpy(big_buf, buf_temp, target_size);
4769     buf_temp = big_buf;
4770     host_dm = big_buf;
4771 
4772     guest_data = arg + host_dm->data_start;
4773     if ((guest_data - arg) < 0) {
4774         ret = -TARGET_EINVAL;
4775         goto out;
4776     }
4777     guest_data_size = host_dm->data_size - host_dm->data_start;
4778     host_data = (char*)host_dm + host_dm->data_start;
4779 
4780     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4781     if (!argptr) {
4782         ret = -TARGET_EFAULT;
4783         goto out;
4784     }
4785 
4786     switch (ie->host_cmd) {
4787     case DM_REMOVE_ALL:
4788     case DM_LIST_DEVICES:
4789     case DM_DEV_CREATE:
4790     case DM_DEV_REMOVE:
4791     case DM_DEV_SUSPEND:
4792     case DM_DEV_STATUS:
4793     case DM_DEV_WAIT:
4794     case DM_TABLE_STATUS:
4795     case DM_TABLE_CLEAR:
4796     case DM_TABLE_DEPS:
4797     case DM_LIST_VERSIONS:
4798         /* no input data */
4799         break;
4800     case DM_DEV_RENAME:
4801     case DM_DEV_SET_GEOMETRY:
4802         /* data contains only strings */
4803         memcpy(host_data, argptr, guest_data_size);
4804         break;
4805     case DM_TARGET_MSG:
4806         memcpy(host_data, argptr, guest_data_size);
4807         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4808         break;
4809     case DM_TABLE_LOAD:
4810     {
4811         void *gspec = argptr;
4812         void *cur_data = host_data;
4813         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4814         int spec_size = thunk_type_size(arg_type, 0);
4815         int i;
4816 
4817         for (i = 0; i < host_dm->target_count; i++) {
4818             struct dm_target_spec *spec = cur_data;
4819             uint32_t next;
4820             int slen;
4821 
4822             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4823             slen = strlen((char*)gspec + spec_size) + 1;
4824             next = spec->next;
4825             spec->next = sizeof(*spec) + slen;
4826             strcpy((char*)&spec[1], gspec + spec_size);
4827             gspec += next;
4828             cur_data += spec->next;
4829         }
4830         break;
4831     }
4832     default:
4833         ret = -TARGET_EINVAL;
4834         unlock_user(argptr, guest_data, 0);
4835         goto out;
4836     }
4837     unlock_user(argptr, guest_data, 0);
4838 
4839     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4840     if (!is_error(ret)) {
4841         guest_data = arg + host_dm->data_start;
4842         guest_data_size = host_dm->data_size - host_dm->data_start;
4843         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4844         switch (ie->host_cmd) {
4845         case DM_REMOVE_ALL:
4846         case DM_DEV_CREATE:
4847         case DM_DEV_REMOVE:
4848         case DM_DEV_RENAME:
4849         case DM_DEV_SUSPEND:
4850         case DM_DEV_STATUS:
4851         case DM_TABLE_LOAD:
4852         case DM_TABLE_CLEAR:
4853         case DM_TARGET_MSG:
4854         case DM_DEV_SET_GEOMETRY:
4855             /* no return data */
4856             break;
4857         case DM_LIST_DEVICES:
4858         {
4859             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4860             uint32_t remaining_data = guest_data_size;
4861             void *cur_data = argptr;
4862             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4863             int nl_size = 12; /* can't use thunk_size due to alignment */
4864 
4865             while (1) {
4866                 uint32_t next = nl->next;
4867                 if (next) {
4868                     nl->next = nl_size + (strlen(nl->name) + 1);
4869                 }
4870                 if (remaining_data < nl->next) {
4871                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4872                     break;
4873                 }
4874                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4875                 strcpy(cur_data + nl_size, nl->name);
4876                 cur_data += nl->next;
4877                 remaining_data -= nl->next;
4878                 if (!next) {
4879                     break;
4880                 }
4881                 nl = (void*)nl + next;
4882             }
4883             break;
4884         }
4885         case DM_DEV_WAIT:
4886         case DM_TABLE_STATUS:
4887         {
4888             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4889             void *cur_data = argptr;
4890             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4891             int spec_size = thunk_type_size(arg_type, 0);
4892             int i;
4893 
4894             for (i = 0; i < host_dm->target_count; i++) {
4895                 uint32_t next = spec->next;
4896                 int slen = strlen((char*)&spec[1]) + 1;
4897                 spec->next = (cur_data - argptr) + spec_size + slen;
4898                 if (guest_data_size < spec->next) {
4899                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4900                     break;
4901                 }
4902                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4903                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4904                 cur_data = argptr + spec->next;
4905                 spec = (void*)host_dm + host_dm->data_start + next;
4906             }
4907             break;
4908         }
4909         case DM_TABLE_DEPS:
4910         {
4911             void *hdata = (void*)host_dm + host_dm->data_start;
4912             int count = *(uint32_t*)hdata;
4913             uint64_t *hdev = hdata + 8;
4914             uint64_t *gdev = argptr + 8;
4915             int i;
4916 
4917             *(uint32_t*)argptr = tswap32(count);
4918             for (i = 0; i < count; i++) {
4919                 *gdev = tswap64(*hdev);
4920                 gdev++;
4921                 hdev++;
4922             }
4923             break;
4924         }
4925         case DM_LIST_VERSIONS:
4926         {
4927             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4928             uint32_t remaining_data = guest_data_size;
4929             void *cur_data = argptr;
4930             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4931             int vers_size = thunk_type_size(arg_type, 0);
4932 
4933             while (1) {
4934                 uint32_t next = vers->next;
4935                 if (next) {
4936                     vers->next = vers_size + (strlen(vers->name) + 1);
4937                 }
4938                 if (remaining_data < vers->next) {
4939                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4940                     break;
4941                 }
4942                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4943                 strcpy(cur_data + vers_size, vers->name);
4944                 cur_data += vers->next;
4945                 remaining_data -= vers->next;
4946                 if (!next) {
4947                     break;
4948                 }
4949                 vers = (void*)vers + next;
4950             }
4951             break;
4952         }
4953         default:
4954             unlock_user(argptr, guest_data, 0);
4955             ret = -TARGET_EINVAL;
4956             goto out;
4957         }
4958         unlock_user(argptr, guest_data, guest_data_size);
4959 
4960         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4961         if (!argptr) {
4962             ret = -TARGET_EFAULT;
4963             goto out;
4964         }
4965         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4966         unlock_user(argptr, arg, target_size);
4967     }
4968 out:
4969     g_free(big_buf);
4970     return ret;
4971 }
4972 
4973 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4974                                int cmd, abi_long arg)
4975 {
4976     void *argptr;
4977     int target_size;
4978     const argtype *arg_type = ie->arg_type;
4979     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4980     abi_long ret;
4981 
4982     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4983     struct blkpg_partition host_part;
4984 
4985     /* Read and convert blkpg */
4986     arg_type++;
4987     target_size = thunk_type_size(arg_type, 0);
4988     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4989     if (!argptr) {
4990         ret = -TARGET_EFAULT;
4991         goto out;
4992     }
4993     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4994     unlock_user(argptr, arg, 0);
4995 
4996     switch (host_blkpg->op) {
4997     case BLKPG_ADD_PARTITION:
4998     case BLKPG_DEL_PARTITION:
4999         /* payload is struct blkpg_partition */
5000         break;
5001     default:
5002         /* Unknown opcode */
5003         ret = -TARGET_EINVAL;
5004         goto out;
5005     }
5006 
5007     /* Read and convert blkpg->data */
5008     arg = (abi_long)(uintptr_t)host_blkpg->data;
5009     target_size = thunk_type_size(part_arg_type, 0);
5010     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5011     if (!argptr) {
5012         ret = -TARGET_EFAULT;
5013         goto out;
5014     }
5015     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5016     unlock_user(argptr, arg, 0);
5017 
5018     /* Swizzle the data pointer to our local copy and call! */
5019     host_blkpg->data = &host_part;
5020     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5021 
5022 out:
5023     return ret;
5024 }
5025 
5026 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5027                                 int fd, int cmd, abi_long arg)
5028 {
5029     const argtype *arg_type = ie->arg_type;
5030     const StructEntry *se;
5031     const argtype *field_types;
5032     const int *dst_offsets, *src_offsets;
5033     int target_size;
5034     void *argptr;
5035     abi_ulong *target_rt_dev_ptr = NULL;
5036     unsigned long *host_rt_dev_ptr = NULL;
5037     abi_long ret;
5038     int i;
5039 
5040     assert(ie->access == IOC_W);
5041     assert(*arg_type == TYPE_PTR);
5042     arg_type++;
5043     assert(*arg_type == TYPE_STRUCT);
5044     target_size = thunk_type_size(arg_type, 0);
5045     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5046     if (!argptr) {
5047         return -TARGET_EFAULT;
5048     }
5049     arg_type++;
5050     assert(*arg_type == (int)STRUCT_rtentry);
5051     se = struct_entries + *arg_type++;
5052     assert(se->convert[0] == NULL);
5053     /* convert struct here to be able to catch rt_dev string */
5054     field_types = se->field_types;
5055     dst_offsets = se->field_offsets[THUNK_HOST];
5056     src_offsets = se->field_offsets[THUNK_TARGET];
5057     for (i = 0; i < se->nb_fields; i++) {
5058         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5059             assert(*field_types == TYPE_PTRVOID);
5060             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5061             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5062             if (*target_rt_dev_ptr != 0) {
5063                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5064                                                   tswapal(*target_rt_dev_ptr));
5065                 if (!*host_rt_dev_ptr) {
5066                     unlock_user(argptr, arg, 0);
5067                     return -TARGET_EFAULT;
5068                 }
5069             } else {
5070                 *host_rt_dev_ptr = 0;
5071             }
5072             field_types++;
5073             continue;
5074         }
5075         field_types = thunk_convert(buf_temp + dst_offsets[i],
5076                                     argptr + src_offsets[i],
5077                                     field_types, THUNK_HOST);
5078     }
5079     unlock_user(argptr, arg, 0);
5080 
5081     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5082 
5083     assert(host_rt_dev_ptr != NULL);
5084     assert(target_rt_dev_ptr != NULL);
5085     if (*host_rt_dev_ptr != 0) {
5086         unlock_user((void *)*host_rt_dev_ptr,
5087                     *target_rt_dev_ptr, 0);
5088     }
5089     return ret;
5090 }
5091 
5092 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5093                                      int fd, int cmd, abi_long arg)
5094 {
5095     int sig = target_to_host_signal(arg);
5096     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5097 }
5098 
5099 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5100                                     int fd, int cmd, abi_long arg)
5101 {
5102     struct timeval tv;
5103     abi_long ret;
5104 
5105     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5106     if (is_error(ret)) {
5107         return ret;
5108     }
5109 
5110     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5111         if (copy_to_user_timeval(arg, &tv)) {
5112             return -TARGET_EFAULT;
5113         }
5114     } else {
5115         if (copy_to_user_timeval64(arg, &tv)) {
5116             return -TARGET_EFAULT;
5117         }
5118     }
5119 
5120     return ret;
5121 }
5122 
5123 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5124                                       int fd, int cmd, abi_long arg)
5125 {
5126     struct timespec ts;
5127     abi_long ret;
5128 
5129     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5130     if (is_error(ret)) {
5131         return ret;
5132     }
5133 
5134     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5135         if (host_to_target_timespec(arg, &ts)) {
5136             return -TARGET_EFAULT;
5137         }
5138     } else{
5139         if (host_to_target_timespec64(arg, &ts)) {
5140             return -TARGET_EFAULT;
5141         }
5142     }
5143 
5144     return ret;
5145 }
5146 
5147 #ifdef TIOCGPTPEER
5148 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5149                                      int fd, int cmd, abi_long arg)
5150 {
5151     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5152     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5153 }
5154 #endif
5155 
5156 static IOCTLEntry ioctl_entries[] = {
5157 #define IOCTL(cmd, access, ...) \
5158     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5159 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5160     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5161 #define IOCTL_IGNORE(cmd) \
5162     { TARGET_ ## cmd, 0, #cmd },
5163 #include "ioctls.h"
5164     { 0, 0, },
5165 };
5166 
5167 /* ??? Implement proper locking for ioctls.  */
5168 /* do_ioctl() Must return target values and target errnos. */
5169 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5170 {
5171     const IOCTLEntry *ie;
5172     const argtype *arg_type;
5173     abi_long ret;
5174     uint8_t buf_temp[MAX_STRUCT_SIZE];
5175     int target_size;
5176     void *argptr;
5177 
5178     ie = ioctl_entries;
5179     for(;;) {
5180         if (ie->target_cmd == 0) {
5181             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5182             return -TARGET_ENOSYS;
5183         }
5184         if (ie->target_cmd == cmd)
5185             break;
5186         ie++;
5187     }
5188     arg_type = ie->arg_type;
5189     if (ie->do_ioctl) {
5190         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5191     } else if (!ie->host_cmd) {
5192         /* Some architectures define BSD ioctls in their headers
5193            that are not implemented in Linux.  */
5194         return -TARGET_ENOSYS;
5195     }
5196 
5197     switch(arg_type[0]) {
5198     case TYPE_NULL:
5199         /* no argument */
5200         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5201         break;
5202     case TYPE_PTRVOID:
5203     case TYPE_INT:
5204     case TYPE_LONG:
5205     case TYPE_ULONG:
5206         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5207         break;
5208     case TYPE_PTR:
5209         arg_type++;
5210         target_size = thunk_type_size(arg_type, 0);
5211         switch(ie->access) {
5212         case IOC_R:
5213             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5214             if (!is_error(ret)) {
5215                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5216                 if (!argptr)
5217                     return -TARGET_EFAULT;
5218                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5219                 unlock_user(argptr, arg, target_size);
5220             }
5221             break;
5222         case IOC_W:
5223             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5224             if (!argptr)
5225                 return -TARGET_EFAULT;
5226             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5227             unlock_user(argptr, arg, 0);
5228             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5229             break;
5230         default:
5231         case IOC_RW:
5232             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5233             if (!argptr)
5234                 return -TARGET_EFAULT;
5235             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5236             unlock_user(argptr, arg, 0);
5237             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5238             if (!is_error(ret)) {
5239                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5240                 if (!argptr)
5241                     return -TARGET_EFAULT;
5242                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5243                 unlock_user(argptr, arg, target_size);
5244             }
5245             break;
5246         }
5247         break;
5248     default:
5249         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5250                  (long)cmd, arg_type[0]);
5251         ret = -TARGET_ENOSYS;
5252         break;
5253     }
5254     return ret;
5255 }
5256 
5257 static const bitmask_transtbl iflag_tbl[] = {
5258         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5259         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5260         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5261         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5262         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5263         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5264         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5265         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5266         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5267         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5268         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5269         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5270         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5271         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5272         { 0, 0, 0, 0 }
5273 };
5274 
5275 static const bitmask_transtbl oflag_tbl[] = {
5276 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5277 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5278 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5279 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5280 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5281 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5282 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5283 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5284 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5285 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5286 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5287 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5288 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5289 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5290 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5291 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5292 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5293 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5294 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5295 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5296 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5297 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5298 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5299 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5300 	{ 0, 0, 0, 0 }
5301 };
5302 
5303 static const bitmask_transtbl cflag_tbl[] = {
5304 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5305 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5306 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5307 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5308 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5309 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5310 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5311 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5312 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5313 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5314 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5315 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5316 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5317 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5318 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5319 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5320 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5321 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5322 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5323 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5324 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5325 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5326 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5327 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5328 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5329 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5330 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5331 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5332 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5333 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5334 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5335 	{ 0, 0, 0, 0 }
5336 };
5337 
5338 static const bitmask_transtbl lflag_tbl[] = {
5339 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5340 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5341 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5342 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5343 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5344 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5345 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5346 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5347 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5348 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5349 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5350 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5351 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5352 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5353 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5354 	{ 0, 0, 0, 0 }
5355 };
5356 
5357 static void target_to_host_termios (void *dst, const void *src)
5358 {
5359     struct host_termios *host = dst;
5360     const struct target_termios *target = src;
5361 
5362     host->c_iflag =
5363         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5364     host->c_oflag =
5365         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5366     host->c_cflag =
5367         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5368     host->c_lflag =
5369         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5370     host->c_line = target->c_line;
5371 
5372     memset(host->c_cc, 0, sizeof(host->c_cc));
5373     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5374     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5375     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5376     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5377     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5378     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5379     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5380     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5381     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5382     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5383     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5384     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5385     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5386     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5387     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5388     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5389     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5390 }
5391 
5392 static void host_to_target_termios (void *dst, const void *src)
5393 {
5394     struct target_termios *target = dst;
5395     const struct host_termios *host = src;
5396 
5397     target->c_iflag =
5398         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5399     target->c_oflag =
5400         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5401     target->c_cflag =
5402         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5403     target->c_lflag =
5404         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5405     target->c_line = host->c_line;
5406 
5407     memset(target->c_cc, 0, sizeof(target->c_cc));
5408     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5409     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5410     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5411     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5412     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5413     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5414     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5415     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5416     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5417     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5418     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5419     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5420     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5421     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5422     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5423     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5424     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5425 }
5426 
5427 static const StructEntry struct_termios_def = {
5428     .convert = { host_to_target_termios, target_to_host_termios },
5429     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5430     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5431 };
5432 
5433 static bitmask_transtbl mmap_flags_tbl[] = {
5434     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5435     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5436     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5437     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5438       MAP_ANONYMOUS, MAP_ANONYMOUS },
5439     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5440       MAP_GROWSDOWN, MAP_GROWSDOWN },
5441     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5442       MAP_DENYWRITE, MAP_DENYWRITE },
5443     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5444       MAP_EXECUTABLE, MAP_EXECUTABLE },
5445     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5446     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5447       MAP_NORESERVE, MAP_NORESERVE },
5448     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5449     /* MAP_STACK had been ignored by the kernel for quite some time.
5450        Recognize it for the target insofar as we do not want to pass
5451        it through to the host.  */
5452     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5453     { 0, 0, 0, 0 }
5454 };
5455 
5456 #if defined(TARGET_I386)
5457 
5458 /* NOTE: there is really one LDT for all the threads */
5459 static uint8_t *ldt_table;
5460 
5461 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5462 {
5463     int size;
5464     void *p;
5465 
5466     if (!ldt_table)
5467         return 0;
5468     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5469     if (size > bytecount)
5470         size = bytecount;
5471     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5472     if (!p)
5473         return -TARGET_EFAULT;
5474     /* ??? Should this by byteswapped?  */
5475     memcpy(p, ldt_table, size);
5476     unlock_user(p, ptr, size);
5477     return size;
5478 }
5479 
5480 /* XXX: add locking support */
5481 static abi_long write_ldt(CPUX86State *env,
5482                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5483 {
5484     struct target_modify_ldt_ldt_s ldt_info;
5485     struct target_modify_ldt_ldt_s *target_ldt_info;
5486     int seg_32bit, contents, read_exec_only, limit_in_pages;
5487     int seg_not_present, useable, lm;
5488     uint32_t *lp, entry_1, entry_2;
5489 
5490     if (bytecount != sizeof(ldt_info))
5491         return -TARGET_EINVAL;
5492     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5493         return -TARGET_EFAULT;
5494     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5495     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5496     ldt_info.limit = tswap32(target_ldt_info->limit);
5497     ldt_info.flags = tswap32(target_ldt_info->flags);
5498     unlock_user_struct(target_ldt_info, ptr, 0);
5499 
5500     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5501         return -TARGET_EINVAL;
5502     seg_32bit = ldt_info.flags & 1;
5503     contents = (ldt_info.flags >> 1) & 3;
5504     read_exec_only = (ldt_info.flags >> 3) & 1;
5505     limit_in_pages = (ldt_info.flags >> 4) & 1;
5506     seg_not_present = (ldt_info.flags >> 5) & 1;
5507     useable = (ldt_info.flags >> 6) & 1;
5508 #ifdef TARGET_ABI32
5509     lm = 0;
5510 #else
5511     lm = (ldt_info.flags >> 7) & 1;
5512 #endif
5513     if (contents == 3) {
5514         if (oldmode)
5515             return -TARGET_EINVAL;
5516         if (seg_not_present == 0)
5517             return -TARGET_EINVAL;
5518     }
5519     /* allocate the LDT */
5520     if (!ldt_table) {
5521         env->ldt.base = target_mmap(0,
5522                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5523                                     PROT_READ|PROT_WRITE,
5524                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5525         if (env->ldt.base == -1)
5526             return -TARGET_ENOMEM;
5527         memset(g2h(env->ldt.base), 0,
5528                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5529         env->ldt.limit = 0xffff;
5530         ldt_table = g2h(env->ldt.base);
5531     }
5532 
5533     /* NOTE: same code as Linux kernel */
5534     /* Allow LDTs to be cleared by the user. */
5535     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5536         if (oldmode ||
5537             (contents == 0		&&
5538              read_exec_only == 1	&&
5539              seg_32bit == 0		&&
5540              limit_in_pages == 0	&&
5541              seg_not_present == 1	&&
5542              useable == 0 )) {
5543             entry_1 = 0;
5544             entry_2 = 0;
5545             goto install;
5546         }
5547     }
5548 
5549     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5550         (ldt_info.limit & 0x0ffff);
5551     entry_2 = (ldt_info.base_addr & 0xff000000) |
5552         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5553         (ldt_info.limit & 0xf0000) |
5554         ((read_exec_only ^ 1) << 9) |
5555         (contents << 10) |
5556         ((seg_not_present ^ 1) << 15) |
5557         (seg_32bit << 22) |
5558         (limit_in_pages << 23) |
5559         (lm << 21) |
5560         0x7000;
5561     if (!oldmode)
5562         entry_2 |= (useable << 20);
5563 
5564     /* Install the new entry ...  */
5565 install:
5566     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5567     lp[0] = tswap32(entry_1);
5568     lp[1] = tswap32(entry_2);
5569     return 0;
5570 }
5571 
5572 /* specific and weird i386 syscalls */
5573 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5574                               unsigned long bytecount)
5575 {
5576     abi_long ret;
5577 
5578     switch (func) {
5579     case 0:
5580         ret = read_ldt(ptr, bytecount);
5581         break;
5582     case 1:
5583         ret = write_ldt(env, ptr, bytecount, 1);
5584         break;
5585     case 0x11:
5586         ret = write_ldt(env, ptr, bytecount, 0);
5587         break;
5588     default:
5589         ret = -TARGET_ENOSYS;
5590         break;
5591     }
5592     return ret;
5593 }
5594 
5595 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5596 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5597 {
5598     uint64_t *gdt_table = g2h(env->gdt.base);
5599     struct target_modify_ldt_ldt_s ldt_info;
5600     struct target_modify_ldt_ldt_s *target_ldt_info;
5601     int seg_32bit, contents, read_exec_only, limit_in_pages;
5602     int seg_not_present, useable, lm;
5603     uint32_t *lp, entry_1, entry_2;
5604     int i;
5605 
5606     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5607     if (!target_ldt_info)
5608         return -TARGET_EFAULT;
5609     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5610     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5611     ldt_info.limit = tswap32(target_ldt_info->limit);
5612     ldt_info.flags = tswap32(target_ldt_info->flags);
5613     if (ldt_info.entry_number == -1) {
5614         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5615             if (gdt_table[i] == 0) {
5616                 ldt_info.entry_number = i;
5617                 target_ldt_info->entry_number = tswap32(i);
5618                 break;
5619             }
5620         }
5621     }
5622     unlock_user_struct(target_ldt_info, ptr, 1);
5623 
5624     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5625         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5626            return -TARGET_EINVAL;
5627     seg_32bit = ldt_info.flags & 1;
5628     contents = (ldt_info.flags >> 1) & 3;
5629     read_exec_only = (ldt_info.flags >> 3) & 1;
5630     limit_in_pages = (ldt_info.flags >> 4) & 1;
5631     seg_not_present = (ldt_info.flags >> 5) & 1;
5632     useable = (ldt_info.flags >> 6) & 1;
5633 #ifdef TARGET_ABI32
5634     lm = 0;
5635 #else
5636     lm = (ldt_info.flags >> 7) & 1;
5637 #endif
5638 
5639     if (contents == 3) {
5640         if (seg_not_present == 0)
5641             return -TARGET_EINVAL;
5642     }
5643 
5644     /* NOTE: same code as Linux kernel */
5645     /* Allow LDTs to be cleared by the user. */
5646     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5647         if ((contents == 0             &&
5648              read_exec_only == 1       &&
5649              seg_32bit == 0            &&
5650              limit_in_pages == 0       &&
5651              seg_not_present == 1      &&
5652              useable == 0 )) {
5653             entry_1 = 0;
5654             entry_2 = 0;
5655             goto install;
5656         }
5657     }
5658 
5659     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5660         (ldt_info.limit & 0x0ffff);
5661     entry_2 = (ldt_info.base_addr & 0xff000000) |
5662         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5663         (ldt_info.limit & 0xf0000) |
5664         ((read_exec_only ^ 1) << 9) |
5665         (contents << 10) |
5666         ((seg_not_present ^ 1) << 15) |
5667         (seg_32bit << 22) |
5668         (limit_in_pages << 23) |
5669         (useable << 20) |
5670         (lm << 21) |
5671         0x7000;
5672 
5673     /* Install the new entry ...  */
5674 install:
5675     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5676     lp[0] = tswap32(entry_1);
5677     lp[1] = tswap32(entry_2);
5678     return 0;
5679 }
5680 
5681 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5682 {
5683     struct target_modify_ldt_ldt_s *target_ldt_info;
5684     uint64_t *gdt_table = g2h(env->gdt.base);
5685     uint32_t base_addr, limit, flags;
5686     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5687     int seg_not_present, useable, lm;
5688     uint32_t *lp, entry_1, entry_2;
5689 
5690     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5691     if (!target_ldt_info)
5692         return -TARGET_EFAULT;
5693     idx = tswap32(target_ldt_info->entry_number);
5694     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5695         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5696         unlock_user_struct(target_ldt_info, ptr, 1);
5697         return -TARGET_EINVAL;
5698     }
5699     lp = (uint32_t *)(gdt_table + idx);
5700     entry_1 = tswap32(lp[0]);
5701     entry_2 = tswap32(lp[1]);
5702 
5703     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5704     contents = (entry_2 >> 10) & 3;
5705     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5706     seg_32bit = (entry_2 >> 22) & 1;
5707     limit_in_pages = (entry_2 >> 23) & 1;
5708     useable = (entry_2 >> 20) & 1;
5709 #ifdef TARGET_ABI32
5710     lm = 0;
5711 #else
5712     lm = (entry_2 >> 21) & 1;
5713 #endif
5714     flags = (seg_32bit << 0) | (contents << 1) |
5715         (read_exec_only << 3) | (limit_in_pages << 4) |
5716         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5717     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5718     base_addr = (entry_1 >> 16) |
5719         (entry_2 & 0xff000000) |
5720         ((entry_2 & 0xff) << 16);
5721     target_ldt_info->base_addr = tswapal(base_addr);
5722     target_ldt_info->limit = tswap32(limit);
5723     target_ldt_info->flags = tswap32(flags);
5724     unlock_user_struct(target_ldt_info, ptr, 1);
5725     return 0;
5726 }
5727 #endif /* TARGET_I386 && TARGET_ABI32 */
5728 
5729 #ifndef TARGET_ABI32
5730 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5731 {
5732     abi_long ret = 0;
5733     abi_ulong val;
5734     int idx;
5735 
5736     switch(code) {
5737     case TARGET_ARCH_SET_GS:
5738     case TARGET_ARCH_SET_FS:
5739         if (code == TARGET_ARCH_SET_GS)
5740             idx = R_GS;
5741         else
5742             idx = R_FS;
5743         cpu_x86_load_seg(env, idx, 0);
5744         env->segs[idx].base = addr;
5745         break;
5746     case TARGET_ARCH_GET_GS:
5747     case TARGET_ARCH_GET_FS:
5748         if (code == TARGET_ARCH_GET_GS)
5749             idx = R_GS;
5750         else
5751             idx = R_FS;
5752         val = env->segs[idx].base;
5753         if (put_user(val, addr, abi_ulong))
5754             ret = -TARGET_EFAULT;
5755         break;
5756     default:
5757         ret = -TARGET_EINVAL;
5758         break;
5759     }
5760     return ret;
5761 }
5762 #endif
5763 
5764 #endif /* defined(TARGET_I386) */
5765 
5766 #define NEW_STACK_SIZE 0x40000
5767 
5768 
5769 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5770 typedef struct {
5771     CPUArchState *env;
5772     pthread_mutex_t mutex;
5773     pthread_cond_t cond;
5774     pthread_t thread;
5775     uint32_t tid;
5776     abi_ulong child_tidptr;
5777     abi_ulong parent_tidptr;
5778     sigset_t sigmask;
5779 } new_thread_info;
5780 
5781 static void *clone_func(void *arg)
5782 {
5783     new_thread_info *info = arg;
5784     CPUArchState *env;
5785     CPUState *cpu;
5786     TaskState *ts;
5787 
5788     rcu_register_thread();
5789     tcg_register_thread();
5790     env = info->env;
5791     cpu = env_cpu(env);
5792     thread_cpu = cpu;
5793     ts = (TaskState *)cpu->opaque;
5794     info->tid = sys_gettid();
5795     task_settid(ts);
5796     if (info->child_tidptr)
5797         put_user_u32(info->tid, info->child_tidptr);
5798     if (info->parent_tidptr)
5799         put_user_u32(info->tid, info->parent_tidptr);
5800     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5801     /* Enable signals.  */
5802     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5803     /* Signal to the parent that we're ready.  */
5804     pthread_mutex_lock(&info->mutex);
5805     pthread_cond_broadcast(&info->cond);
5806     pthread_mutex_unlock(&info->mutex);
5807     /* Wait until the parent has finished initializing the tls state.  */
5808     pthread_mutex_lock(&clone_lock);
5809     pthread_mutex_unlock(&clone_lock);
5810     cpu_loop(env);
5811     /* never exits */
5812     return NULL;
5813 }
5814 
5815 /* do_fork() Must return host values and target errnos (unlike most
5816    do_*() functions). */
5817 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5818                    abi_ulong parent_tidptr, target_ulong newtls,
5819                    abi_ulong child_tidptr)
5820 {
5821     CPUState *cpu = env_cpu(env);
5822     int ret;
5823     TaskState *ts;
5824     CPUState *new_cpu;
5825     CPUArchState *new_env;
5826     sigset_t sigmask;
5827 
5828     flags &= ~CLONE_IGNORED_FLAGS;
5829 
5830     /* Emulate vfork() with fork() */
5831     if (flags & CLONE_VFORK)
5832         flags &= ~(CLONE_VFORK | CLONE_VM);
5833 
5834     if (flags & CLONE_VM) {
5835         TaskState *parent_ts = (TaskState *)cpu->opaque;
5836         new_thread_info info;
5837         pthread_attr_t attr;
5838 
5839         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5840             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5841             return -TARGET_EINVAL;
5842         }
5843 
5844         ts = g_new0(TaskState, 1);
5845         init_task_state(ts);
5846 
5847         /* Grab a mutex so that thread setup appears atomic.  */
5848         pthread_mutex_lock(&clone_lock);
5849 
5850         /* we create a new CPU instance. */
5851         new_env = cpu_copy(env);
5852         /* Init regs that differ from the parent.  */
5853         cpu_clone_regs_child(new_env, newsp, flags);
5854         cpu_clone_regs_parent(env, flags);
5855         new_cpu = env_cpu(new_env);
5856         new_cpu->opaque = ts;
5857         ts->bprm = parent_ts->bprm;
5858         ts->info = parent_ts->info;
5859         ts->signal_mask = parent_ts->signal_mask;
5860 
5861         if (flags & CLONE_CHILD_CLEARTID) {
5862             ts->child_tidptr = child_tidptr;
5863         }
5864 
5865         if (flags & CLONE_SETTLS) {
5866             cpu_set_tls (new_env, newtls);
5867         }
5868 
5869         memset(&info, 0, sizeof(info));
5870         pthread_mutex_init(&info.mutex, NULL);
5871         pthread_mutex_lock(&info.mutex);
5872         pthread_cond_init(&info.cond, NULL);
5873         info.env = new_env;
5874         if (flags & CLONE_CHILD_SETTID) {
5875             info.child_tidptr = child_tidptr;
5876         }
5877         if (flags & CLONE_PARENT_SETTID) {
5878             info.parent_tidptr = parent_tidptr;
5879         }
5880 
5881         ret = pthread_attr_init(&attr);
5882         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5883         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5884         /* It is not safe to deliver signals until the child has finished
5885            initializing, so temporarily block all signals.  */
5886         sigfillset(&sigmask);
5887         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5888         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5889 
5890         /* If this is our first additional thread, we need to ensure we
5891          * generate code for parallel execution and flush old translations.
5892          */
5893         if (!parallel_cpus) {
5894             parallel_cpus = true;
5895             tb_flush(cpu);
5896         }
5897 
5898         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5899         /* TODO: Free new CPU state if thread creation failed.  */
5900 
5901         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5902         pthread_attr_destroy(&attr);
5903         if (ret == 0) {
5904             /* Wait for the child to initialize.  */
5905             pthread_cond_wait(&info.cond, &info.mutex);
5906             ret = info.tid;
5907         } else {
5908             ret = -1;
5909         }
5910         pthread_mutex_unlock(&info.mutex);
5911         pthread_cond_destroy(&info.cond);
5912         pthread_mutex_destroy(&info.mutex);
5913         pthread_mutex_unlock(&clone_lock);
5914     } else {
5915         /* if no CLONE_VM, we consider it is a fork */
5916         if (flags & CLONE_INVALID_FORK_FLAGS) {
5917             return -TARGET_EINVAL;
5918         }
5919 
5920         /* We can't support custom termination signals */
5921         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5922             return -TARGET_EINVAL;
5923         }
5924 
5925         if (block_signals()) {
5926             return -TARGET_ERESTARTSYS;
5927         }
5928 
5929         fork_start();
5930         ret = fork();
5931         if (ret == 0) {
5932             /* Child Process.  */
5933             cpu_clone_regs_child(env, newsp, flags);
5934             fork_end(1);
5935             /* There is a race condition here.  The parent process could
5936                theoretically read the TID in the child process before the child
5937                tid is set.  This would require using either ptrace
5938                (not implemented) or having *_tidptr to point at a shared memory
5939                mapping.  We can't repeat the spinlock hack used above because
5940                the child process gets its own copy of the lock.  */
5941             if (flags & CLONE_CHILD_SETTID)
5942                 put_user_u32(sys_gettid(), child_tidptr);
5943             if (flags & CLONE_PARENT_SETTID)
5944                 put_user_u32(sys_gettid(), parent_tidptr);
5945             ts = (TaskState *)cpu->opaque;
5946             if (flags & CLONE_SETTLS)
5947                 cpu_set_tls (env, newtls);
5948             if (flags & CLONE_CHILD_CLEARTID)
5949                 ts->child_tidptr = child_tidptr;
5950         } else {
5951             cpu_clone_regs_parent(env, flags);
5952             fork_end(0);
5953         }
5954     }
5955     return ret;
5956 }
5957 
5958 /* warning : doesn't handle linux specific flags... */
5959 static int target_to_host_fcntl_cmd(int cmd)
5960 {
5961     int ret;
5962 
5963     switch(cmd) {
5964     case TARGET_F_DUPFD:
5965     case TARGET_F_GETFD:
5966     case TARGET_F_SETFD:
5967     case TARGET_F_GETFL:
5968     case TARGET_F_SETFL:
5969         ret = cmd;
5970         break;
5971     case TARGET_F_GETLK:
5972         ret = F_GETLK64;
5973         break;
5974     case TARGET_F_SETLK:
5975         ret = F_SETLK64;
5976         break;
5977     case TARGET_F_SETLKW:
5978         ret = F_SETLKW64;
5979         break;
5980     case TARGET_F_GETOWN:
5981         ret = F_GETOWN;
5982         break;
5983     case TARGET_F_SETOWN:
5984         ret = F_SETOWN;
5985         break;
5986     case TARGET_F_GETSIG:
5987         ret = F_GETSIG;
5988         break;
5989     case TARGET_F_SETSIG:
5990         ret = F_SETSIG;
5991         break;
5992 #if TARGET_ABI_BITS == 32
5993     case TARGET_F_GETLK64:
5994         ret = F_GETLK64;
5995         break;
5996     case TARGET_F_SETLK64:
5997         ret = F_SETLK64;
5998         break;
5999     case TARGET_F_SETLKW64:
6000         ret = F_SETLKW64;
6001         break;
6002 #endif
6003     case TARGET_F_SETLEASE:
6004         ret = F_SETLEASE;
6005         break;
6006     case TARGET_F_GETLEASE:
6007         ret = F_GETLEASE;
6008         break;
6009 #ifdef F_DUPFD_CLOEXEC
6010     case TARGET_F_DUPFD_CLOEXEC:
6011         ret = F_DUPFD_CLOEXEC;
6012         break;
6013 #endif
6014     case TARGET_F_NOTIFY:
6015         ret = F_NOTIFY;
6016         break;
6017 #ifdef F_GETOWN_EX
6018     case TARGET_F_GETOWN_EX:
6019         ret = F_GETOWN_EX;
6020         break;
6021 #endif
6022 #ifdef F_SETOWN_EX
6023     case TARGET_F_SETOWN_EX:
6024         ret = F_SETOWN_EX;
6025         break;
6026 #endif
6027 #ifdef F_SETPIPE_SZ
6028     case TARGET_F_SETPIPE_SZ:
6029         ret = F_SETPIPE_SZ;
6030         break;
6031     case TARGET_F_GETPIPE_SZ:
6032         ret = F_GETPIPE_SZ;
6033         break;
6034 #endif
6035     default:
6036         ret = -TARGET_EINVAL;
6037         break;
6038     }
6039 
6040 #if defined(__powerpc64__)
6041     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6042      * is not supported by kernel. The glibc fcntl call actually adjusts
6043      * them to 5, 6 and 7 before making the syscall(). Since we make the
6044      * syscall directly, adjust to what is supported by the kernel.
6045      */
6046     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6047         ret -= F_GETLK64 - 5;
6048     }
6049 #endif
6050 
6051     return ret;
6052 }
6053 
6054 #define FLOCK_TRANSTBL \
6055     switch (type) { \
6056     TRANSTBL_CONVERT(F_RDLCK); \
6057     TRANSTBL_CONVERT(F_WRLCK); \
6058     TRANSTBL_CONVERT(F_UNLCK); \
6059     TRANSTBL_CONVERT(F_EXLCK); \
6060     TRANSTBL_CONVERT(F_SHLCK); \
6061     }
6062 
6063 static int target_to_host_flock(int type)
6064 {
6065 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6066     FLOCK_TRANSTBL
6067 #undef  TRANSTBL_CONVERT
6068     return -TARGET_EINVAL;
6069 }
6070 
6071 static int host_to_target_flock(int type)
6072 {
6073 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6074     FLOCK_TRANSTBL
6075 #undef  TRANSTBL_CONVERT
6076     /* if we don't know how to convert the value coming
6077      * from the host we copy to the target field as-is
6078      */
6079     return type;
6080 }
6081 
6082 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6083                                             abi_ulong target_flock_addr)
6084 {
6085     struct target_flock *target_fl;
6086     int l_type;
6087 
6088     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6089         return -TARGET_EFAULT;
6090     }
6091 
6092     __get_user(l_type, &target_fl->l_type);
6093     l_type = target_to_host_flock(l_type);
6094     if (l_type < 0) {
6095         return l_type;
6096     }
6097     fl->l_type = l_type;
6098     __get_user(fl->l_whence, &target_fl->l_whence);
6099     __get_user(fl->l_start, &target_fl->l_start);
6100     __get_user(fl->l_len, &target_fl->l_len);
6101     __get_user(fl->l_pid, &target_fl->l_pid);
6102     unlock_user_struct(target_fl, target_flock_addr, 0);
6103     return 0;
6104 }
6105 
6106 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6107                                           const struct flock64 *fl)
6108 {
6109     struct target_flock *target_fl;
6110     short l_type;
6111 
6112     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6113         return -TARGET_EFAULT;
6114     }
6115 
6116     l_type = host_to_target_flock(fl->l_type);
6117     __put_user(l_type, &target_fl->l_type);
6118     __put_user(fl->l_whence, &target_fl->l_whence);
6119     __put_user(fl->l_start, &target_fl->l_start);
6120     __put_user(fl->l_len, &target_fl->l_len);
6121     __put_user(fl->l_pid, &target_fl->l_pid);
6122     unlock_user_struct(target_fl, target_flock_addr, 1);
6123     return 0;
6124 }
6125 
6126 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6127 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6128 
6129 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6130 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6131                                                    abi_ulong target_flock_addr)
6132 {
6133     struct target_oabi_flock64 *target_fl;
6134     int l_type;
6135 
6136     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6137         return -TARGET_EFAULT;
6138     }
6139 
6140     __get_user(l_type, &target_fl->l_type);
6141     l_type = target_to_host_flock(l_type);
6142     if (l_type < 0) {
6143         return l_type;
6144     }
6145     fl->l_type = l_type;
6146     __get_user(fl->l_whence, &target_fl->l_whence);
6147     __get_user(fl->l_start, &target_fl->l_start);
6148     __get_user(fl->l_len, &target_fl->l_len);
6149     __get_user(fl->l_pid, &target_fl->l_pid);
6150     unlock_user_struct(target_fl, target_flock_addr, 0);
6151     return 0;
6152 }
6153 
6154 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6155                                                  const struct flock64 *fl)
6156 {
6157     struct target_oabi_flock64 *target_fl;
6158     short l_type;
6159 
6160     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6161         return -TARGET_EFAULT;
6162     }
6163 
6164     l_type = host_to_target_flock(fl->l_type);
6165     __put_user(l_type, &target_fl->l_type);
6166     __put_user(fl->l_whence, &target_fl->l_whence);
6167     __put_user(fl->l_start, &target_fl->l_start);
6168     __put_user(fl->l_len, &target_fl->l_len);
6169     __put_user(fl->l_pid, &target_fl->l_pid);
6170     unlock_user_struct(target_fl, target_flock_addr, 1);
6171     return 0;
6172 }
6173 #endif
6174 
6175 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6176                                               abi_ulong target_flock_addr)
6177 {
6178     struct target_flock64 *target_fl;
6179     int l_type;
6180 
6181     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6182         return -TARGET_EFAULT;
6183     }
6184 
6185     __get_user(l_type, &target_fl->l_type);
6186     l_type = target_to_host_flock(l_type);
6187     if (l_type < 0) {
6188         return l_type;
6189     }
6190     fl->l_type = l_type;
6191     __get_user(fl->l_whence, &target_fl->l_whence);
6192     __get_user(fl->l_start, &target_fl->l_start);
6193     __get_user(fl->l_len, &target_fl->l_len);
6194     __get_user(fl->l_pid, &target_fl->l_pid);
6195     unlock_user_struct(target_fl, target_flock_addr, 0);
6196     return 0;
6197 }
6198 
6199 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6200                                             const struct flock64 *fl)
6201 {
6202     struct target_flock64 *target_fl;
6203     short l_type;
6204 
6205     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6206         return -TARGET_EFAULT;
6207     }
6208 
6209     l_type = host_to_target_flock(fl->l_type);
6210     __put_user(l_type, &target_fl->l_type);
6211     __put_user(fl->l_whence, &target_fl->l_whence);
6212     __put_user(fl->l_start, &target_fl->l_start);
6213     __put_user(fl->l_len, &target_fl->l_len);
6214     __put_user(fl->l_pid, &target_fl->l_pid);
6215     unlock_user_struct(target_fl, target_flock_addr, 1);
6216     return 0;
6217 }
6218 
6219 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6220 {
6221     struct flock64 fl64;
6222 #ifdef F_GETOWN_EX
6223     struct f_owner_ex fox;
6224     struct target_f_owner_ex *target_fox;
6225 #endif
6226     abi_long ret;
6227     int host_cmd = target_to_host_fcntl_cmd(cmd);
6228 
6229     if (host_cmd == -TARGET_EINVAL)
6230 	    return host_cmd;
6231 
6232     switch(cmd) {
6233     case TARGET_F_GETLK:
6234         ret = copy_from_user_flock(&fl64, arg);
6235         if (ret) {
6236             return ret;
6237         }
6238         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6239         if (ret == 0) {
6240             ret = copy_to_user_flock(arg, &fl64);
6241         }
6242         break;
6243 
6244     case TARGET_F_SETLK:
6245     case TARGET_F_SETLKW:
6246         ret = copy_from_user_flock(&fl64, arg);
6247         if (ret) {
6248             return ret;
6249         }
6250         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6251         break;
6252 
6253     case TARGET_F_GETLK64:
6254         ret = copy_from_user_flock64(&fl64, arg);
6255         if (ret) {
6256             return ret;
6257         }
6258         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6259         if (ret == 0) {
6260             ret = copy_to_user_flock64(arg, &fl64);
6261         }
6262         break;
6263     case TARGET_F_SETLK64:
6264     case TARGET_F_SETLKW64:
6265         ret = copy_from_user_flock64(&fl64, arg);
6266         if (ret) {
6267             return ret;
6268         }
6269         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6270         break;
6271 
6272     case TARGET_F_GETFL:
6273         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6274         if (ret >= 0) {
6275             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6276         }
6277         break;
6278 
6279     case TARGET_F_SETFL:
6280         ret = get_errno(safe_fcntl(fd, host_cmd,
6281                                    target_to_host_bitmask(arg,
6282                                                           fcntl_flags_tbl)));
6283         break;
6284 
6285 #ifdef F_GETOWN_EX
6286     case TARGET_F_GETOWN_EX:
6287         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6288         if (ret >= 0) {
6289             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6290                 return -TARGET_EFAULT;
6291             target_fox->type = tswap32(fox.type);
6292             target_fox->pid = tswap32(fox.pid);
6293             unlock_user_struct(target_fox, arg, 1);
6294         }
6295         break;
6296 #endif
6297 
6298 #ifdef F_SETOWN_EX
6299     case TARGET_F_SETOWN_EX:
6300         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6301             return -TARGET_EFAULT;
6302         fox.type = tswap32(target_fox->type);
6303         fox.pid = tswap32(target_fox->pid);
6304         unlock_user_struct(target_fox, arg, 0);
6305         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6306         break;
6307 #endif
6308 
6309     case TARGET_F_SETOWN:
6310     case TARGET_F_GETOWN:
6311     case TARGET_F_SETSIG:
6312     case TARGET_F_GETSIG:
6313     case TARGET_F_SETLEASE:
6314     case TARGET_F_GETLEASE:
6315     case TARGET_F_SETPIPE_SZ:
6316     case TARGET_F_GETPIPE_SZ:
6317         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6318         break;
6319 
6320     default:
6321         ret = get_errno(safe_fcntl(fd, cmd, arg));
6322         break;
6323     }
6324     return ret;
6325 }
6326 
6327 #ifdef USE_UID16
6328 
6329 static inline int high2lowuid(int uid)
6330 {
6331     if (uid > 65535)
6332         return 65534;
6333     else
6334         return uid;
6335 }
6336 
6337 static inline int high2lowgid(int gid)
6338 {
6339     if (gid > 65535)
6340         return 65534;
6341     else
6342         return gid;
6343 }
6344 
6345 static inline int low2highuid(int uid)
6346 {
6347     if ((int16_t)uid == -1)
6348         return -1;
6349     else
6350         return uid;
6351 }
6352 
6353 static inline int low2highgid(int gid)
6354 {
6355     if ((int16_t)gid == -1)
6356         return -1;
6357     else
6358         return gid;
6359 }
6360 static inline int tswapid(int id)
6361 {
6362     return tswap16(id);
6363 }
6364 
6365 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6366 
6367 #else /* !USE_UID16 */
6368 static inline int high2lowuid(int uid)
6369 {
6370     return uid;
6371 }
6372 static inline int high2lowgid(int gid)
6373 {
6374     return gid;
6375 }
6376 static inline int low2highuid(int uid)
6377 {
6378     return uid;
6379 }
6380 static inline int low2highgid(int gid)
6381 {
6382     return gid;
6383 }
6384 static inline int tswapid(int id)
6385 {
6386     return tswap32(id);
6387 }
6388 
6389 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6390 
6391 #endif /* USE_UID16 */
6392 
6393 /* We must do direct syscalls for setting UID/GID, because we want to
6394  * implement the Linux system call semantics of "change only for this thread",
6395  * not the libc/POSIX semantics of "change for all threads in process".
6396  * (See http://ewontfix.com/17/ for more details.)
6397  * We use the 32-bit version of the syscalls if present; if it is not
6398  * then either the host architecture supports 32-bit UIDs natively with
6399  * the standard syscall, or the 16-bit UID is the best we can do.
6400  */
6401 #ifdef __NR_setuid32
6402 #define __NR_sys_setuid __NR_setuid32
6403 #else
6404 #define __NR_sys_setuid __NR_setuid
6405 #endif
6406 #ifdef __NR_setgid32
6407 #define __NR_sys_setgid __NR_setgid32
6408 #else
6409 #define __NR_sys_setgid __NR_setgid
6410 #endif
6411 #ifdef __NR_setresuid32
6412 #define __NR_sys_setresuid __NR_setresuid32
6413 #else
6414 #define __NR_sys_setresuid __NR_setresuid
6415 #endif
6416 #ifdef __NR_setresgid32
6417 #define __NR_sys_setresgid __NR_setresgid32
6418 #else
6419 #define __NR_sys_setresgid __NR_setresgid
6420 #endif
6421 
6422 _syscall1(int, sys_setuid, uid_t, uid)
6423 _syscall1(int, sys_setgid, gid_t, gid)
6424 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6425 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6426 
6427 void syscall_init(void)
6428 {
6429     IOCTLEntry *ie;
6430     const argtype *arg_type;
6431     int size;
6432     int i;
6433 
6434     thunk_init(STRUCT_MAX);
6435 
6436 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6437 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6438 #include "syscall_types.h"
6439 #undef STRUCT
6440 #undef STRUCT_SPECIAL
6441 
6442     /* Build target_to_host_errno_table[] table from
6443      * host_to_target_errno_table[]. */
6444     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6445         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6446     }
6447 
6448     /* we patch the ioctl size if necessary. We rely on the fact that
6449        no ioctl has all the bits at '1' in the size field */
6450     ie = ioctl_entries;
6451     while (ie->target_cmd != 0) {
6452         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6453             TARGET_IOC_SIZEMASK) {
6454             arg_type = ie->arg_type;
6455             if (arg_type[0] != TYPE_PTR) {
6456                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6457                         ie->target_cmd);
6458                 exit(1);
6459             }
6460             arg_type++;
6461             size = thunk_type_size(arg_type, 0);
6462             ie->target_cmd = (ie->target_cmd &
6463                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6464                 (size << TARGET_IOC_SIZESHIFT);
6465         }
6466 
6467         /* automatic consistency check if same arch */
6468 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6469     (defined(__x86_64__) && defined(TARGET_X86_64))
6470         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6471             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6472                     ie->name, ie->target_cmd, ie->host_cmd);
6473         }
6474 #endif
6475         ie++;
6476     }
6477 }
6478 
6479 #if TARGET_ABI_BITS == 32
6480 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6481 {
6482 #ifdef TARGET_WORDS_BIGENDIAN
6483     return ((uint64_t)word0 << 32) | word1;
6484 #else
6485     return ((uint64_t)word1 << 32) | word0;
6486 #endif
6487 }
6488 #else /* TARGET_ABI_BITS == 32 */
6489 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6490 {
6491     return word0;
6492 }
6493 #endif /* TARGET_ABI_BITS != 32 */
6494 
6495 #ifdef TARGET_NR_truncate64
6496 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6497                                          abi_long arg2,
6498                                          abi_long arg3,
6499                                          abi_long arg4)
6500 {
6501     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6502         arg2 = arg3;
6503         arg3 = arg4;
6504     }
6505     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6506 }
6507 #endif
6508 
6509 #ifdef TARGET_NR_ftruncate64
6510 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6511                                           abi_long arg2,
6512                                           abi_long arg3,
6513                                           abi_long arg4)
6514 {
6515     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6516         arg2 = arg3;
6517         arg3 = arg4;
6518     }
6519     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6520 }
6521 #endif
6522 
6523 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6524                                                  abi_ulong target_addr)
6525 {
6526     struct target_itimerspec *target_itspec;
6527 
6528     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6529         return -TARGET_EFAULT;
6530     }
6531 
6532     host_itspec->it_interval.tv_sec =
6533                             tswapal(target_itspec->it_interval.tv_sec);
6534     host_itspec->it_interval.tv_nsec =
6535                             tswapal(target_itspec->it_interval.tv_nsec);
6536     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6537     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6538 
6539     unlock_user_struct(target_itspec, target_addr, 1);
6540     return 0;
6541 }
6542 
6543 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6544                                                struct itimerspec *host_its)
6545 {
6546     struct target_itimerspec *target_itspec;
6547 
6548     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6549         return -TARGET_EFAULT;
6550     }
6551 
6552     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6553     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6554 
6555     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6556     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6557 
6558     unlock_user_struct(target_itspec, target_addr, 0);
6559     return 0;
6560 }
6561 
6562 static inline abi_long target_to_host_timex(struct timex *host_tx,
6563                                             abi_long target_addr)
6564 {
6565     struct target_timex *target_tx;
6566 
6567     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6568         return -TARGET_EFAULT;
6569     }
6570 
6571     __get_user(host_tx->modes, &target_tx->modes);
6572     __get_user(host_tx->offset, &target_tx->offset);
6573     __get_user(host_tx->freq, &target_tx->freq);
6574     __get_user(host_tx->maxerror, &target_tx->maxerror);
6575     __get_user(host_tx->esterror, &target_tx->esterror);
6576     __get_user(host_tx->status, &target_tx->status);
6577     __get_user(host_tx->constant, &target_tx->constant);
6578     __get_user(host_tx->precision, &target_tx->precision);
6579     __get_user(host_tx->tolerance, &target_tx->tolerance);
6580     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6581     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6582     __get_user(host_tx->tick, &target_tx->tick);
6583     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6584     __get_user(host_tx->jitter, &target_tx->jitter);
6585     __get_user(host_tx->shift, &target_tx->shift);
6586     __get_user(host_tx->stabil, &target_tx->stabil);
6587     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6588     __get_user(host_tx->calcnt, &target_tx->calcnt);
6589     __get_user(host_tx->errcnt, &target_tx->errcnt);
6590     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6591     __get_user(host_tx->tai, &target_tx->tai);
6592 
6593     unlock_user_struct(target_tx, target_addr, 0);
6594     return 0;
6595 }
6596 
6597 static inline abi_long host_to_target_timex(abi_long target_addr,
6598                                             struct timex *host_tx)
6599 {
6600     struct target_timex *target_tx;
6601 
6602     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6603         return -TARGET_EFAULT;
6604     }
6605 
6606     __put_user(host_tx->modes, &target_tx->modes);
6607     __put_user(host_tx->offset, &target_tx->offset);
6608     __put_user(host_tx->freq, &target_tx->freq);
6609     __put_user(host_tx->maxerror, &target_tx->maxerror);
6610     __put_user(host_tx->esterror, &target_tx->esterror);
6611     __put_user(host_tx->status, &target_tx->status);
6612     __put_user(host_tx->constant, &target_tx->constant);
6613     __put_user(host_tx->precision, &target_tx->precision);
6614     __put_user(host_tx->tolerance, &target_tx->tolerance);
6615     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6616     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6617     __put_user(host_tx->tick, &target_tx->tick);
6618     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6619     __put_user(host_tx->jitter, &target_tx->jitter);
6620     __put_user(host_tx->shift, &target_tx->shift);
6621     __put_user(host_tx->stabil, &target_tx->stabil);
6622     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6623     __put_user(host_tx->calcnt, &target_tx->calcnt);
6624     __put_user(host_tx->errcnt, &target_tx->errcnt);
6625     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6626     __put_user(host_tx->tai, &target_tx->tai);
6627 
6628     unlock_user_struct(target_tx, target_addr, 1);
6629     return 0;
6630 }
6631 
6632 
6633 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6634                                                abi_ulong target_addr)
6635 {
6636     struct target_sigevent *target_sevp;
6637 
6638     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6639         return -TARGET_EFAULT;
6640     }
6641 
6642     /* This union is awkward on 64 bit systems because it has a 32 bit
6643      * integer and a pointer in it; we follow the conversion approach
6644      * used for handling sigval types in signal.c so the guest should get
6645      * the correct value back even if we did a 64 bit byteswap and it's
6646      * using the 32 bit integer.
6647      */
6648     host_sevp->sigev_value.sival_ptr =
6649         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6650     host_sevp->sigev_signo =
6651         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6652     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6653     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6654 
6655     unlock_user_struct(target_sevp, target_addr, 1);
6656     return 0;
6657 }
6658 
6659 #if defined(TARGET_NR_mlockall)
6660 static inline int target_to_host_mlockall_arg(int arg)
6661 {
6662     int result = 0;
6663 
6664     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6665         result |= MCL_CURRENT;
6666     }
6667     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6668         result |= MCL_FUTURE;
6669     }
6670     return result;
6671 }
6672 #endif
6673 
6674 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6675      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6676      defined(TARGET_NR_newfstatat))
6677 static inline abi_long host_to_target_stat64(void *cpu_env,
6678                                              abi_ulong target_addr,
6679                                              struct stat *host_st)
6680 {
6681 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6682     if (((CPUARMState *)cpu_env)->eabi) {
6683         struct target_eabi_stat64 *target_st;
6684 
6685         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6686             return -TARGET_EFAULT;
6687         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6688         __put_user(host_st->st_dev, &target_st->st_dev);
6689         __put_user(host_st->st_ino, &target_st->st_ino);
6690 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6691         __put_user(host_st->st_ino, &target_st->__st_ino);
6692 #endif
6693         __put_user(host_st->st_mode, &target_st->st_mode);
6694         __put_user(host_st->st_nlink, &target_st->st_nlink);
6695         __put_user(host_st->st_uid, &target_st->st_uid);
6696         __put_user(host_st->st_gid, &target_st->st_gid);
6697         __put_user(host_st->st_rdev, &target_st->st_rdev);
6698         __put_user(host_st->st_size, &target_st->st_size);
6699         __put_user(host_st->st_blksize, &target_st->st_blksize);
6700         __put_user(host_st->st_blocks, &target_st->st_blocks);
6701         __put_user(host_st->st_atime, &target_st->target_st_atime);
6702         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6703         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6704 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6705         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6706         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6707         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6708 #endif
6709         unlock_user_struct(target_st, target_addr, 1);
6710     } else
6711 #endif
6712     {
6713 #if defined(TARGET_HAS_STRUCT_STAT64)
6714         struct target_stat64 *target_st;
6715 #else
6716         struct target_stat *target_st;
6717 #endif
6718 
6719         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6720             return -TARGET_EFAULT;
6721         memset(target_st, 0, sizeof(*target_st));
6722         __put_user(host_st->st_dev, &target_st->st_dev);
6723         __put_user(host_st->st_ino, &target_st->st_ino);
6724 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6725         __put_user(host_st->st_ino, &target_st->__st_ino);
6726 #endif
6727         __put_user(host_st->st_mode, &target_st->st_mode);
6728         __put_user(host_st->st_nlink, &target_st->st_nlink);
6729         __put_user(host_st->st_uid, &target_st->st_uid);
6730         __put_user(host_st->st_gid, &target_st->st_gid);
6731         __put_user(host_st->st_rdev, &target_st->st_rdev);
6732         /* XXX: better use of kernel struct */
6733         __put_user(host_st->st_size, &target_st->st_size);
6734         __put_user(host_st->st_blksize, &target_st->st_blksize);
6735         __put_user(host_st->st_blocks, &target_st->st_blocks);
6736         __put_user(host_st->st_atime, &target_st->target_st_atime);
6737         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6738         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6739 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6740         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6741         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6742         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6743 #endif
6744         unlock_user_struct(target_st, target_addr, 1);
6745     }
6746 
6747     return 0;
6748 }
6749 #endif
6750 
6751 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6752 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6753                                             abi_ulong target_addr)
6754 {
6755     struct target_statx *target_stx;
6756 
6757     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6758         return -TARGET_EFAULT;
6759     }
6760     memset(target_stx, 0, sizeof(*target_stx));
6761 
6762     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6763     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6764     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6765     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6766     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6767     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6768     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6769     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6770     __put_user(host_stx->stx_size, &target_stx->stx_size);
6771     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6772     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6773     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6774     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6775     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
6776     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
6777     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
6778     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
6779     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
6780     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
6781     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6782     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6783     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6784     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6785 
6786     unlock_user_struct(target_stx, target_addr, 1);
6787 
6788     return 0;
6789 }
6790 #endif
6791 
6792 
6793 /* ??? Using host futex calls even when target atomic operations
6794    are not really atomic probably breaks things.  However implementing
6795    futexes locally would make futexes shared between multiple processes
6796    tricky.  However they're probably useless because guest atomic
6797    operations won't work either.  */
6798 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6799                     target_ulong uaddr2, int val3)
6800 {
6801     struct timespec ts, *pts;
6802     int base_op;
6803 
6804     /* ??? We assume FUTEX_* constants are the same on both host
6805        and target.  */
6806 #ifdef FUTEX_CMD_MASK
6807     base_op = op & FUTEX_CMD_MASK;
6808 #else
6809     base_op = op;
6810 #endif
6811     switch (base_op) {
6812     case FUTEX_WAIT:
6813     case FUTEX_WAIT_BITSET:
6814         if (timeout) {
6815             pts = &ts;
6816             target_to_host_timespec(pts, timeout);
6817         } else {
6818             pts = NULL;
6819         }
6820         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6821                          pts, NULL, val3));
6822     case FUTEX_WAKE:
6823         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6824     case FUTEX_FD:
6825         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6826     case FUTEX_REQUEUE:
6827     case FUTEX_CMP_REQUEUE:
6828     case FUTEX_WAKE_OP:
6829         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6830            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6831            But the prototype takes a `struct timespec *'; insert casts
6832            to satisfy the compiler.  We do not need to tswap TIMEOUT
6833            since it's not compared to guest memory.  */
6834         pts = (struct timespec *)(uintptr_t) timeout;
6835         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6836                                     g2h(uaddr2),
6837                                     (base_op == FUTEX_CMP_REQUEUE
6838                                      ? tswap32(val3)
6839                                      : val3)));
6840     default:
6841         return -TARGET_ENOSYS;
6842     }
6843 }
6844 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6845 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6846                                      abi_long handle, abi_long mount_id,
6847                                      abi_long flags)
6848 {
6849     struct file_handle *target_fh;
6850     struct file_handle *fh;
6851     int mid = 0;
6852     abi_long ret;
6853     char *name;
6854     unsigned int size, total_size;
6855 
6856     if (get_user_s32(size, handle)) {
6857         return -TARGET_EFAULT;
6858     }
6859 
6860     name = lock_user_string(pathname);
6861     if (!name) {
6862         return -TARGET_EFAULT;
6863     }
6864 
6865     total_size = sizeof(struct file_handle) + size;
6866     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6867     if (!target_fh) {
6868         unlock_user(name, pathname, 0);
6869         return -TARGET_EFAULT;
6870     }
6871 
6872     fh = g_malloc0(total_size);
6873     fh->handle_bytes = size;
6874 
6875     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6876     unlock_user(name, pathname, 0);
6877 
6878     /* man name_to_handle_at(2):
6879      * Other than the use of the handle_bytes field, the caller should treat
6880      * the file_handle structure as an opaque data type
6881      */
6882 
6883     memcpy(target_fh, fh, total_size);
6884     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6885     target_fh->handle_type = tswap32(fh->handle_type);
6886     g_free(fh);
6887     unlock_user(target_fh, handle, total_size);
6888 
6889     if (put_user_s32(mid, mount_id)) {
6890         return -TARGET_EFAULT;
6891     }
6892 
6893     return ret;
6894 
6895 }
6896 #endif
6897 
6898 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6899 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6900                                      abi_long flags)
6901 {
6902     struct file_handle *target_fh;
6903     struct file_handle *fh;
6904     unsigned int size, total_size;
6905     abi_long ret;
6906 
6907     if (get_user_s32(size, handle)) {
6908         return -TARGET_EFAULT;
6909     }
6910 
6911     total_size = sizeof(struct file_handle) + size;
6912     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6913     if (!target_fh) {
6914         return -TARGET_EFAULT;
6915     }
6916 
6917     fh = g_memdup(target_fh, total_size);
6918     fh->handle_bytes = size;
6919     fh->handle_type = tswap32(target_fh->handle_type);
6920 
6921     ret = get_errno(open_by_handle_at(mount_fd, fh,
6922                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6923 
6924     g_free(fh);
6925 
6926     unlock_user(target_fh, handle, total_size);
6927 
6928     return ret;
6929 }
6930 #endif
6931 
6932 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6933 
6934 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6935 {
6936     int host_flags;
6937     target_sigset_t *target_mask;
6938     sigset_t host_mask;
6939     abi_long ret;
6940 
6941     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6942         return -TARGET_EINVAL;
6943     }
6944     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6945         return -TARGET_EFAULT;
6946     }
6947 
6948     target_to_host_sigset(&host_mask, target_mask);
6949 
6950     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6951 
6952     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6953     if (ret >= 0) {
6954         fd_trans_register(ret, &target_signalfd_trans);
6955     }
6956 
6957     unlock_user_struct(target_mask, mask, 0);
6958 
6959     return ret;
6960 }
6961 #endif
6962 
6963 /* Map host to target signal numbers for the wait family of syscalls.
6964    Assume all other status bits are the same.  */
6965 int host_to_target_waitstatus(int status)
6966 {
6967     if (WIFSIGNALED(status)) {
6968         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6969     }
6970     if (WIFSTOPPED(status)) {
6971         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6972                | (status & 0xff);
6973     }
6974     return status;
6975 }
6976 
6977 static int open_self_cmdline(void *cpu_env, int fd)
6978 {
6979     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6980     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6981     int i;
6982 
6983     for (i = 0; i < bprm->argc; i++) {
6984         size_t len = strlen(bprm->argv[i]) + 1;
6985 
6986         if (write(fd, bprm->argv[i], len) != len) {
6987             return -1;
6988         }
6989     }
6990 
6991     return 0;
6992 }
6993 
6994 static int open_self_maps(void *cpu_env, int fd)
6995 {
6996     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6997     TaskState *ts = cpu->opaque;
6998     FILE *fp;
6999     char *line = NULL;
7000     size_t len = 0;
7001     ssize_t read;
7002 
7003     fp = fopen("/proc/self/maps", "r");
7004     if (fp == NULL) {
7005         return -1;
7006     }
7007 
7008     while ((read = getline(&line, &len, fp)) != -1) {
7009         int fields, dev_maj, dev_min, inode;
7010         uint64_t min, max, offset;
7011         char flag_r, flag_w, flag_x, flag_p;
7012         char path[512] = "";
7013         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7014                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7015                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7016 
7017         if ((fields < 10) || (fields > 11)) {
7018             continue;
7019         }
7020         if (h2g_valid(min)) {
7021             int flags = page_get_flags(h2g(min));
7022             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7023             if (page_check_range(h2g(min), max - min, flags) == -1) {
7024                 continue;
7025             }
7026             if (h2g(min) == ts->info->stack_limit) {
7027                 pstrcpy(path, sizeof(path), "      [stack]");
7028             }
7029             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7030                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7031                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7032                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7033                     path[0] ? "         " : "", path);
7034         }
7035     }
7036 
7037     free(line);
7038     fclose(fp);
7039 
7040     return 0;
7041 }
7042 
7043 static int open_self_stat(void *cpu_env, int fd)
7044 {
7045     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7046     TaskState *ts = cpu->opaque;
7047     abi_ulong start_stack = ts->info->start_stack;
7048     int i;
7049 
7050     for (i = 0; i < 44; i++) {
7051       char buf[128];
7052       int len;
7053       uint64_t val = 0;
7054 
7055       if (i == 0) {
7056         /* pid */
7057         val = getpid();
7058         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7059       } else if (i == 1) {
7060         /* app name */
7061         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7062       } else if (i == 27) {
7063         /* stack bottom */
7064         val = start_stack;
7065         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7066       } else {
7067         /* for the rest, there is MasterCard */
7068         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7069       }
7070 
7071       len = strlen(buf);
7072       if (write(fd, buf, len) != len) {
7073           return -1;
7074       }
7075     }
7076 
7077     return 0;
7078 }
7079 
7080 static int open_self_auxv(void *cpu_env, int fd)
7081 {
7082     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7083     TaskState *ts = cpu->opaque;
7084     abi_ulong auxv = ts->info->saved_auxv;
7085     abi_ulong len = ts->info->auxv_len;
7086     char *ptr;
7087 
7088     /*
7089      * Auxiliary vector is stored in target process stack.
7090      * read in whole auxv vector and copy it to file
7091      */
7092     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7093     if (ptr != NULL) {
7094         while (len > 0) {
7095             ssize_t r;
7096             r = write(fd, ptr, len);
7097             if (r <= 0) {
7098                 break;
7099             }
7100             len -= r;
7101             ptr += r;
7102         }
7103         lseek(fd, 0, SEEK_SET);
7104         unlock_user(ptr, auxv, len);
7105     }
7106 
7107     return 0;
7108 }
7109 
7110 static int is_proc_myself(const char *filename, const char *entry)
7111 {
7112     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7113         filename += strlen("/proc/");
7114         if (!strncmp(filename, "self/", strlen("self/"))) {
7115             filename += strlen("self/");
7116         } else if (*filename >= '1' && *filename <= '9') {
7117             char myself[80];
7118             snprintf(myself, sizeof(myself), "%d/", getpid());
7119             if (!strncmp(filename, myself, strlen(myself))) {
7120                 filename += strlen(myself);
7121             } else {
7122                 return 0;
7123             }
7124         } else {
7125             return 0;
7126         }
7127         if (!strcmp(filename, entry)) {
7128             return 1;
7129         }
7130     }
7131     return 0;
7132 }
7133 
7134 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7135     defined(TARGET_SPARC) || defined(TARGET_M68K)
7136 static int is_proc(const char *filename, const char *entry)
7137 {
7138     return strcmp(filename, entry) == 0;
7139 }
7140 #endif
7141 
7142 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7143 static int open_net_route(void *cpu_env, int fd)
7144 {
7145     FILE *fp;
7146     char *line = NULL;
7147     size_t len = 0;
7148     ssize_t read;
7149 
7150     fp = fopen("/proc/net/route", "r");
7151     if (fp == NULL) {
7152         return -1;
7153     }
7154 
7155     /* read header */
7156 
7157     read = getline(&line, &len, fp);
7158     dprintf(fd, "%s", line);
7159 
7160     /* read routes */
7161 
7162     while ((read = getline(&line, &len, fp)) != -1) {
7163         char iface[16];
7164         uint32_t dest, gw, mask;
7165         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7166         int fields;
7167 
7168         fields = sscanf(line,
7169                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7170                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7171                         &mask, &mtu, &window, &irtt);
7172         if (fields != 11) {
7173             continue;
7174         }
7175         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7176                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7177                 metric, tswap32(mask), mtu, window, irtt);
7178     }
7179 
7180     free(line);
7181     fclose(fp);
7182 
7183     return 0;
7184 }
7185 #endif
7186 
7187 #if defined(TARGET_SPARC)
7188 static int open_cpuinfo(void *cpu_env, int fd)
7189 {
7190     dprintf(fd, "type\t\t: sun4u\n");
7191     return 0;
7192 }
7193 #endif
7194 
7195 #if defined(TARGET_M68K)
7196 static int open_hardware(void *cpu_env, int fd)
7197 {
7198     dprintf(fd, "Model:\t\tqemu-m68k\n");
7199     return 0;
7200 }
7201 #endif
7202 
7203 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7204 {
7205     struct fake_open {
7206         const char *filename;
7207         int (*fill)(void *cpu_env, int fd);
7208         int (*cmp)(const char *s1, const char *s2);
7209     };
7210     const struct fake_open *fake_open;
7211     static const struct fake_open fakes[] = {
7212         { "maps", open_self_maps, is_proc_myself },
7213         { "stat", open_self_stat, is_proc_myself },
7214         { "auxv", open_self_auxv, is_proc_myself },
7215         { "cmdline", open_self_cmdline, is_proc_myself },
7216 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7217         { "/proc/net/route", open_net_route, is_proc },
7218 #endif
7219 #if defined(TARGET_SPARC)
7220         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7221 #endif
7222 #if defined(TARGET_M68K)
7223         { "/proc/hardware", open_hardware, is_proc },
7224 #endif
7225         { NULL, NULL, NULL }
7226     };
7227 
7228     if (is_proc_myself(pathname, "exe")) {
7229         int execfd = qemu_getauxval(AT_EXECFD);
7230         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7231     }
7232 
7233     for (fake_open = fakes; fake_open->filename; fake_open++) {
7234         if (fake_open->cmp(pathname, fake_open->filename)) {
7235             break;
7236         }
7237     }
7238 
7239     if (fake_open->filename) {
7240         const char *tmpdir;
7241         char filename[PATH_MAX];
7242         int fd, r;
7243 
7244         /* create temporary file to map stat to */
7245         tmpdir = getenv("TMPDIR");
7246         if (!tmpdir)
7247             tmpdir = "/tmp";
7248         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7249         fd = mkstemp(filename);
7250         if (fd < 0) {
7251             return fd;
7252         }
7253         unlink(filename);
7254 
7255         if ((r = fake_open->fill(cpu_env, fd))) {
7256             int e = errno;
7257             close(fd);
7258             errno = e;
7259             return r;
7260         }
7261         lseek(fd, 0, SEEK_SET);
7262 
7263         return fd;
7264     }
7265 
7266     return safe_openat(dirfd, path(pathname), flags, mode);
7267 }
7268 
7269 #define TIMER_MAGIC 0x0caf0000
7270 #define TIMER_MAGIC_MASK 0xffff0000
7271 
7272 /* Convert QEMU provided timer ID back to internal 16bit index format */
7273 static target_timer_t get_timer_id(abi_long arg)
7274 {
7275     target_timer_t timerid = arg;
7276 
7277     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7278         return -TARGET_EINVAL;
7279     }
7280 
7281     timerid &= 0xffff;
7282 
7283     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7284         return -TARGET_EINVAL;
7285     }
7286 
7287     return timerid;
7288 }
7289 
7290 static int target_to_host_cpu_mask(unsigned long *host_mask,
7291                                    size_t host_size,
7292                                    abi_ulong target_addr,
7293                                    size_t target_size)
7294 {
7295     unsigned target_bits = sizeof(abi_ulong) * 8;
7296     unsigned host_bits = sizeof(*host_mask) * 8;
7297     abi_ulong *target_mask;
7298     unsigned i, j;
7299 
7300     assert(host_size >= target_size);
7301 
7302     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7303     if (!target_mask) {
7304         return -TARGET_EFAULT;
7305     }
7306     memset(host_mask, 0, host_size);
7307 
7308     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7309         unsigned bit = i * target_bits;
7310         abi_ulong val;
7311 
7312         __get_user(val, &target_mask[i]);
7313         for (j = 0; j < target_bits; j++, bit++) {
7314             if (val & (1UL << j)) {
7315                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7316             }
7317         }
7318     }
7319 
7320     unlock_user(target_mask, target_addr, 0);
7321     return 0;
7322 }
7323 
7324 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7325                                    size_t host_size,
7326                                    abi_ulong target_addr,
7327                                    size_t target_size)
7328 {
7329     unsigned target_bits = sizeof(abi_ulong) * 8;
7330     unsigned host_bits = sizeof(*host_mask) * 8;
7331     abi_ulong *target_mask;
7332     unsigned i, j;
7333 
7334     assert(host_size >= target_size);
7335 
7336     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7337     if (!target_mask) {
7338         return -TARGET_EFAULT;
7339     }
7340 
7341     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7342         unsigned bit = i * target_bits;
7343         abi_ulong val = 0;
7344 
7345         for (j = 0; j < target_bits; j++, bit++) {
7346             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7347                 val |= 1UL << j;
7348             }
7349         }
7350         __put_user(val, &target_mask[i]);
7351     }
7352 
7353     unlock_user(target_mask, target_addr, target_size);
7354     return 0;
7355 }
7356 
7357 /* This is an internal helper for do_syscall so that it is easier
7358  * to have a single return point, so that actions, such as logging
7359  * of syscall results, can be performed.
7360  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7361  */
7362 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7363                             abi_long arg2, abi_long arg3, abi_long arg4,
7364                             abi_long arg5, abi_long arg6, abi_long arg7,
7365                             abi_long arg8)
7366 {
7367     CPUState *cpu = env_cpu(cpu_env);
7368     abi_long ret;
7369 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7370     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7371     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7372     || defined(TARGET_NR_statx)
7373     struct stat st;
7374 #endif
7375 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7376     || defined(TARGET_NR_fstatfs)
7377     struct statfs stfs;
7378 #endif
7379     void *p;
7380 
7381     switch(num) {
7382     case TARGET_NR_exit:
7383         /* In old applications this may be used to implement _exit(2).
7384            However in threaded applictions it is used for thread termination,
7385            and _exit_group is used for application termination.
7386            Do thread termination if we have more then one thread.  */
7387 
7388         if (block_signals()) {
7389             return -TARGET_ERESTARTSYS;
7390         }
7391 
7392         cpu_list_lock();
7393 
7394         if (CPU_NEXT(first_cpu)) {
7395             TaskState *ts;
7396 
7397             /* Remove the CPU from the list.  */
7398             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7399 
7400             cpu_list_unlock();
7401 
7402             ts = cpu->opaque;
7403             if (ts->child_tidptr) {
7404                 put_user_u32(0, ts->child_tidptr);
7405                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7406                           NULL, NULL, 0);
7407             }
7408             thread_cpu = NULL;
7409             object_unref(OBJECT(cpu));
7410             g_free(ts);
7411             rcu_unregister_thread();
7412             pthread_exit(NULL);
7413         }
7414 
7415         cpu_list_unlock();
7416         preexit_cleanup(cpu_env, arg1);
7417         _exit(arg1);
7418         return 0; /* avoid warning */
7419     case TARGET_NR_read:
7420         if (arg2 == 0 && arg3 == 0) {
7421             return get_errno(safe_read(arg1, 0, 0));
7422         } else {
7423             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7424                 return -TARGET_EFAULT;
7425             ret = get_errno(safe_read(arg1, p, arg3));
7426             if (ret >= 0 &&
7427                 fd_trans_host_to_target_data(arg1)) {
7428                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7429             }
7430             unlock_user(p, arg2, ret);
7431         }
7432         return ret;
7433     case TARGET_NR_write:
7434         if (arg2 == 0 && arg3 == 0) {
7435             return get_errno(safe_write(arg1, 0, 0));
7436         }
7437         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7438             return -TARGET_EFAULT;
7439         if (fd_trans_target_to_host_data(arg1)) {
7440             void *copy = g_malloc(arg3);
7441             memcpy(copy, p, arg3);
7442             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7443             if (ret >= 0) {
7444                 ret = get_errno(safe_write(arg1, copy, ret));
7445             }
7446             g_free(copy);
7447         } else {
7448             ret = get_errno(safe_write(arg1, p, arg3));
7449         }
7450         unlock_user(p, arg2, 0);
7451         return ret;
7452 
7453 #ifdef TARGET_NR_open
7454     case TARGET_NR_open:
7455         if (!(p = lock_user_string(arg1)))
7456             return -TARGET_EFAULT;
7457         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7458                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7459                                   arg3));
7460         fd_trans_unregister(ret);
7461         unlock_user(p, arg1, 0);
7462         return ret;
7463 #endif
7464     case TARGET_NR_openat:
7465         if (!(p = lock_user_string(arg2)))
7466             return -TARGET_EFAULT;
7467         ret = get_errno(do_openat(cpu_env, arg1, p,
7468                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7469                                   arg4));
7470         fd_trans_unregister(ret);
7471         unlock_user(p, arg2, 0);
7472         return ret;
7473 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7474     case TARGET_NR_name_to_handle_at:
7475         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7476         return ret;
7477 #endif
7478 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7479     case TARGET_NR_open_by_handle_at:
7480         ret = do_open_by_handle_at(arg1, arg2, arg3);
7481         fd_trans_unregister(ret);
7482         return ret;
7483 #endif
7484     case TARGET_NR_close:
7485         fd_trans_unregister(arg1);
7486         return get_errno(close(arg1));
7487 
7488     case TARGET_NR_brk:
7489         return do_brk(arg1);
7490 #ifdef TARGET_NR_fork
7491     case TARGET_NR_fork:
7492         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7493 #endif
7494 #ifdef TARGET_NR_waitpid
7495     case TARGET_NR_waitpid:
7496         {
7497             int status;
7498             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7499             if (!is_error(ret) && arg2 && ret
7500                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7501                 return -TARGET_EFAULT;
7502         }
7503         return ret;
7504 #endif
7505 #ifdef TARGET_NR_waitid
7506     case TARGET_NR_waitid:
7507         {
7508             siginfo_t info;
7509             info.si_pid = 0;
7510             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7511             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7512                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7513                     return -TARGET_EFAULT;
7514                 host_to_target_siginfo(p, &info);
7515                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7516             }
7517         }
7518         return ret;
7519 #endif
7520 #ifdef TARGET_NR_creat /* not on alpha */
7521     case TARGET_NR_creat:
7522         if (!(p = lock_user_string(arg1)))
7523             return -TARGET_EFAULT;
7524         ret = get_errno(creat(p, arg2));
7525         fd_trans_unregister(ret);
7526         unlock_user(p, arg1, 0);
7527         return ret;
7528 #endif
7529 #ifdef TARGET_NR_link
7530     case TARGET_NR_link:
7531         {
7532             void * p2;
7533             p = lock_user_string(arg1);
7534             p2 = lock_user_string(arg2);
7535             if (!p || !p2)
7536                 ret = -TARGET_EFAULT;
7537             else
7538                 ret = get_errno(link(p, p2));
7539             unlock_user(p2, arg2, 0);
7540             unlock_user(p, arg1, 0);
7541         }
7542         return ret;
7543 #endif
7544 #if defined(TARGET_NR_linkat)
7545     case TARGET_NR_linkat:
7546         {
7547             void * p2 = NULL;
7548             if (!arg2 || !arg4)
7549                 return -TARGET_EFAULT;
7550             p  = lock_user_string(arg2);
7551             p2 = lock_user_string(arg4);
7552             if (!p || !p2)
7553                 ret = -TARGET_EFAULT;
7554             else
7555                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7556             unlock_user(p, arg2, 0);
7557             unlock_user(p2, arg4, 0);
7558         }
7559         return ret;
7560 #endif
7561 #ifdef TARGET_NR_unlink
7562     case TARGET_NR_unlink:
7563         if (!(p = lock_user_string(arg1)))
7564             return -TARGET_EFAULT;
7565         ret = get_errno(unlink(p));
7566         unlock_user(p, arg1, 0);
7567         return ret;
7568 #endif
7569 #if defined(TARGET_NR_unlinkat)
7570     case TARGET_NR_unlinkat:
7571         if (!(p = lock_user_string(arg2)))
7572             return -TARGET_EFAULT;
7573         ret = get_errno(unlinkat(arg1, p, arg3));
7574         unlock_user(p, arg2, 0);
7575         return ret;
7576 #endif
7577     case TARGET_NR_execve:
7578         {
7579             char **argp, **envp;
7580             int argc, envc;
7581             abi_ulong gp;
7582             abi_ulong guest_argp;
7583             abi_ulong guest_envp;
7584             abi_ulong addr;
7585             char **q;
7586             int total_size = 0;
7587 
7588             argc = 0;
7589             guest_argp = arg2;
7590             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7591                 if (get_user_ual(addr, gp))
7592                     return -TARGET_EFAULT;
7593                 if (!addr)
7594                     break;
7595                 argc++;
7596             }
7597             envc = 0;
7598             guest_envp = arg3;
7599             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7600                 if (get_user_ual(addr, gp))
7601                     return -TARGET_EFAULT;
7602                 if (!addr)
7603                     break;
7604                 envc++;
7605             }
7606 
7607             argp = g_new0(char *, argc + 1);
7608             envp = g_new0(char *, envc + 1);
7609 
7610             for (gp = guest_argp, q = argp; gp;
7611                   gp += sizeof(abi_ulong), q++) {
7612                 if (get_user_ual(addr, gp))
7613                     goto execve_efault;
7614                 if (!addr)
7615                     break;
7616                 if (!(*q = lock_user_string(addr)))
7617                     goto execve_efault;
7618                 total_size += strlen(*q) + 1;
7619             }
7620             *q = NULL;
7621 
7622             for (gp = guest_envp, q = envp; gp;
7623                   gp += sizeof(abi_ulong), q++) {
7624                 if (get_user_ual(addr, gp))
7625                     goto execve_efault;
7626                 if (!addr)
7627                     break;
7628                 if (!(*q = lock_user_string(addr)))
7629                     goto execve_efault;
7630                 total_size += strlen(*q) + 1;
7631             }
7632             *q = NULL;
7633 
7634             if (!(p = lock_user_string(arg1)))
7635                 goto execve_efault;
7636             /* Although execve() is not an interruptible syscall it is
7637              * a special case where we must use the safe_syscall wrapper:
7638              * if we allow a signal to happen before we make the host
7639              * syscall then we will 'lose' it, because at the point of
7640              * execve the process leaves QEMU's control. So we use the
7641              * safe syscall wrapper to ensure that we either take the
7642              * signal as a guest signal, or else it does not happen
7643              * before the execve completes and makes it the other
7644              * program's problem.
7645              */
7646             ret = get_errno(safe_execve(p, argp, envp));
7647             unlock_user(p, arg1, 0);
7648 
7649             goto execve_end;
7650 
7651         execve_efault:
7652             ret = -TARGET_EFAULT;
7653 
7654         execve_end:
7655             for (gp = guest_argp, q = argp; *q;
7656                   gp += sizeof(abi_ulong), q++) {
7657                 if (get_user_ual(addr, gp)
7658                     || !addr)
7659                     break;
7660                 unlock_user(*q, addr, 0);
7661             }
7662             for (gp = guest_envp, q = envp; *q;
7663                   gp += sizeof(abi_ulong), q++) {
7664                 if (get_user_ual(addr, gp)
7665                     || !addr)
7666                     break;
7667                 unlock_user(*q, addr, 0);
7668             }
7669 
7670             g_free(argp);
7671             g_free(envp);
7672         }
7673         return ret;
7674     case TARGET_NR_chdir:
7675         if (!(p = lock_user_string(arg1)))
7676             return -TARGET_EFAULT;
7677         ret = get_errno(chdir(p));
7678         unlock_user(p, arg1, 0);
7679         return ret;
7680 #ifdef TARGET_NR_time
7681     case TARGET_NR_time:
7682         {
7683             time_t host_time;
7684             ret = get_errno(time(&host_time));
7685             if (!is_error(ret)
7686                 && arg1
7687                 && put_user_sal(host_time, arg1))
7688                 return -TARGET_EFAULT;
7689         }
7690         return ret;
7691 #endif
7692 #ifdef TARGET_NR_mknod
7693     case TARGET_NR_mknod:
7694         if (!(p = lock_user_string(arg1)))
7695             return -TARGET_EFAULT;
7696         ret = get_errno(mknod(p, arg2, arg3));
7697         unlock_user(p, arg1, 0);
7698         return ret;
7699 #endif
7700 #if defined(TARGET_NR_mknodat)
7701     case TARGET_NR_mknodat:
7702         if (!(p = lock_user_string(arg2)))
7703             return -TARGET_EFAULT;
7704         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7705         unlock_user(p, arg2, 0);
7706         return ret;
7707 #endif
7708 #ifdef TARGET_NR_chmod
7709     case TARGET_NR_chmod:
7710         if (!(p = lock_user_string(arg1)))
7711             return -TARGET_EFAULT;
7712         ret = get_errno(chmod(p, arg2));
7713         unlock_user(p, arg1, 0);
7714         return ret;
7715 #endif
7716 #ifdef TARGET_NR_lseek
7717     case TARGET_NR_lseek:
7718         return get_errno(lseek(arg1, arg2, arg3));
7719 #endif
7720 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7721     /* Alpha specific */
7722     case TARGET_NR_getxpid:
7723         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7724         return get_errno(getpid());
7725 #endif
7726 #ifdef TARGET_NR_getpid
7727     case TARGET_NR_getpid:
7728         return get_errno(getpid());
7729 #endif
7730     case TARGET_NR_mount:
7731         {
7732             /* need to look at the data field */
7733             void *p2, *p3;
7734 
7735             if (arg1) {
7736                 p = lock_user_string(arg1);
7737                 if (!p) {
7738                     return -TARGET_EFAULT;
7739                 }
7740             } else {
7741                 p = NULL;
7742             }
7743 
7744             p2 = lock_user_string(arg2);
7745             if (!p2) {
7746                 if (arg1) {
7747                     unlock_user(p, arg1, 0);
7748                 }
7749                 return -TARGET_EFAULT;
7750             }
7751 
7752             if (arg3) {
7753                 p3 = lock_user_string(arg3);
7754                 if (!p3) {
7755                     if (arg1) {
7756                         unlock_user(p, arg1, 0);
7757                     }
7758                     unlock_user(p2, arg2, 0);
7759                     return -TARGET_EFAULT;
7760                 }
7761             } else {
7762                 p3 = NULL;
7763             }
7764 
7765             /* FIXME - arg5 should be locked, but it isn't clear how to
7766              * do that since it's not guaranteed to be a NULL-terminated
7767              * string.
7768              */
7769             if (!arg5) {
7770                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7771             } else {
7772                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7773             }
7774             ret = get_errno(ret);
7775 
7776             if (arg1) {
7777                 unlock_user(p, arg1, 0);
7778             }
7779             unlock_user(p2, arg2, 0);
7780             if (arg3) {
7781                 unlock_user(p3, arg3, 0);
7782             }
7783         }
7784         return ret;
7785 #ifdef TARGET_NR_umount
7786     case TARGET_NR_umount:
7787         if (!(p = lock_user_string(arg1)))
7788             return -TARGET_EFAULT;
7789         ret = get_errno(umount(p));
7790         unlock_user(p, arg1, 0);
7791         return ret;
7792 #endif
7793 #ifdef TARGET_NR_stime /* not on alpha */
7794     case TARGET_NR_stime:
7795         {
7796             struct timespec ts;
7797             ts.tv_nsec = 0;
7798             if (get_user_sal(ts.tv_sec, arg1)) {
7799                 return -TARGET_EFAULT;
7800             }
7801             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
7802         }
7803 #endif
7804 #ifdef TARGET_NR_alarm /* not on alpha */
7805     case TARGET_NR_alarm:
7806         return alarm(arg1);
7807 #endif
7808 #ifdef TARGET_NR_pause /* not on alpha */
7809     case TARGET_NR_pause:
7810         if (!block_signals()) {
7811             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7812         }
7813         return -TARGET_EINTR;
7814 #endif
7815 #ifdef TARGET_NR_utime
7816     case TARGET_NR_utime:
7817         {
7818             struct utimbuf tbuf, *host_tbuf;
7819             struct target_utimbuf *target_tbuf;
7820             if (arg2) {
7821                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7822                     return -TARGET_EFAULT;
7823                 tbuf.actime = tswapal(target_tbuf->actime);
7824                 tbuf.modtime = tswapal(target_tbuf->modtime);
7825                 unlock_user_struct(target_tbuf, arg2, 0);
7826                 host_tbuf = &tbuf;
7827             } else {
7828                 host_tbuf = NULL;
7829             }
7830             if (!(p = lock_user_string(arg1)))
7831                 return -TARGET_EFAULT;
7832             ret = get_errno(utime(p, host_tbuf));
7833             unlock_user(p, arg1, 0);
7834         }
7835         return ret;
7836 #endif
7837 #ifdef TARGET_NR_utimes
7838     case TARGET_NR_utimes:
7839         {
7840             struct timeval *tvp, tv[2];
7841             if (arg2) {
7842                 if (copy_from_user_timeval(&tv[0], arg2)
7843                     || copy_from_user_timeval(&tv[1],
7844                                               arg2 + sizeof(struct target_timeval)))
7845                     return -TARGET_EFAULT;
7846                 tvp = tv;
7847             } else {
7848                 tvp = NULL;
7849             }
7850             if (!(p = lock_user_string(arg1)))
7851                 return -TARGET_EFAULT;
7852             ret = get_errno(utimes(p, tvp));
7853             unlock_user(p, arg1, 0);
7854         }
7855         return ret;
7856 #endif
7857 #if defined(TARGET_NR_futimesat)
7858     case TARGET_NR_futimesat:
7859         {
7860             struct timeval *tvp, tv[2];
7861             if (arg3) {
7862                 if (copy_from_user_timeval(&tv[0], arg3)
7863                     || copy_from_user_timeval(&tv[1],
7864                                               arg3 + sizeof(struct target_timeval)))
7865                     return -TARGET_EFAULT;
7866                 tvp = tv;
7867             } else {
7868                 tvp = NULL;
7869             }
7870             if (!(p = lock_user_string(arg2))) {
7871                 return -TARGET_EFAULT;
7872             }
7873             ret = get_errno(futimesat(arg1, path(p), tvp));
7874             unlock_user(p, arg2, 0);
7875         }
7876         return ret;
7877 #endif
7878 #ifdef TARGET_NR_access
7879     case TARGET_NR_access:
7880         if (!(p = lock_user_string(arg1))) {
7881             return -TARGET_EFAULT;
7882         }
7883         ret = get_errno(access(path(p), arg2));
7884         unlock_user(p, arg1, 0);
7885         return ret;
7886 #endif
7887 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7888     case TARGET_NR_faccessat:
7889         if (!(p = lock_user_string(arg2))) {
7890             return -TARGET_EFAULT;
7891         }
7892         ret = get_errno(faccessat(arg1, p, arg3, 0));
7893         unlock_user(p, arg2, 0);
7894         return ret;
7895 #endif
7896 #ifdef TARGET_NR_nice /* not on alpha */
7897     case TARGET_NR_nice:
7898         return get_errno(nice(arg1));
7899 #endif
7900     case TARGET_NR_sync:
7901         sync();
7902         return 0;
7903 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7904     case TARGET_NR_syncfs:
7905         return get_errno(syncfs(arg1));
7906 #endif
7907     case TARGET_NR_kill:
7908         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7909 #ifdef TARGET_NR_rename
7910     case TARGET_NR_rename:
7911         {
7912             void *p2;
7913             p = lock_user_string(arg1);
7914             p2 = lock_user_string(arg2);
7915             if (!p || !p2)
7916                 ret = -TARGET_EFAULT;
7917             else
7918                 ret = get_errno(rename(p, p2));
7919             unlock_user(p2, arg2, 0);
7920             unlock_user(p, arg1, 0);
7921         }
7922         return ret;
7923 #endif
7924 #if defined(TARGET_NR_renameat)
7925     case TARGET_NR_renameat:
7926         {
7927             void *p2;
7928             p  = lock_user_string(arg2);
7929             p2 = lock_user_string(arg4);
7930             if (!p || !p2)
7931                 ret = -TARGET_EFAULT;
7932             else
7933                 ret = get_errno(renameat(arg1, p, arg3, p2));
7934             unlock_user(p2, arg4, 0);
7935             unlock_user(p, arg2, 0);
7936         }
7937         return ret;
7938 #endif
7939 #if defined(TARGET_NR_renameat2)
7940     case TARGET_NR_renameat2:
7941         {
7942             void *p2;
7943             p  = lock_user_string(arg2);
7944             p2 = lock_user_string(arg4);
7945             if (!p || !p2) {
7946                 ret = -TARGET_EFAULT;
7947             } else {
7948                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7949             }
7950             unlock_user(p2, arg4, 0);
7951             unlock_user(p, arg2, 0);
7952         }
7953         return ret;
7954 #endif
7955 #ifdef TARGET_NR_mkdir
7956     case TARGET_NR_mkdir:
7957         if (!(p = lock_user_string(arg1)))
7958             return -TARGET_EFAULT;
7959         ret = get_errno(mkdir(p, arg2));
7960         unlock_user(p, arg1, 0);
7961         return ret;
7962 #endif
7963 #if defined(TARGET_NR_mkdirat)
7964     case TARGET_NR_mkdirat:
7965         if (!(p = lock_user_string(arg2)))
7966             return -TARGET_EFAULT;
7967         ret = get_errno(mkdirat(arg1, p, arg3));
7968         unlock_user(p, arg2, 0);
7969         return ret;
7970 #endif
7971 #ifdef TARGET_NR_rmdir
7972     case TARGET_NR_rmdir:
7973         if (!(p = lock_user_string(arg1)))
7974             return -TARGET_EFAULT;
7975         ret = get_errno(rmdir(p));
7976         unlock_user(p, arg1, 0);
7977         return ret;
7978 #endif
7979     case TARGET_NR_dup:
7980         ret = get_errno(dup(arg1));
7981         if (ret >= 0) {
7982             fd_trans_dup(arg1, ret);
7983         }
7984         return ret;
7985 #ifdef TARGET_NR_pipe
7986     case TARGET_NR_pipe:
7987         return do_pipe(cpu_env, arg1, 0, 0);
7988 #endif
7989 #ifdef TARGET_NR_pipe2
7990     case TARGET_NR_pipe2:
7991         return do_pipe(cpu_env, arg1,
7992                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7993 #endif
7994     case TARGET_NR_times:
7995         {
7996             struct target_tms *tmsp;
7997             struct tms tms;
7998             ret = get_errno(times(&tms));
7999             if (arg1) {
8000                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8001                 if (!tmsp)
8002                     return -TARGET_EFAULT;
8003                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8004                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8005                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8006                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8007             }
8008             if (!is_error(ret))
8009                 ret = host_to_target_clock_t(ret);
8010         }
8011         return ret;
8012     case TARGET_NR_acct:
8013         if (arg1 == 0) {
8014             ret = get_errno(acct(NULL));
8015         } else {
8016             if (!(p = lock_user_string(arg1))) {
8017                 return -TARGET_EFAULT;
8018             }
8019             ret = get_errno(acct(path(p)));
8020             unlock_user(p, arg1, 0);
8021         }
8022         return ret;
8023 #ifdef TARGET_NR_umount2
8024     case TARGET_NR_umount2:
8025         if (!(p = lock_user_string(arg1)))
8026             return -TARGET_EFAULT;
8027         ret = get_errno(umount2(p, arg2));
8028         unlock_user(p, arg1, 0);
8029         return ret;
8030 #endif
8031     case TARGET_NR_ioctl:
8032         return do_ioctl(arg1, arg2, arg3);
8033 #ifdef TARGET_NR_fcntl
8034     case TARGET_NR_fcntl:
8035         return do_fcntl(arg1, arg2, arg3);
8036 #endif
8037     case TARGET_NR_setpgid:
8038         return get_errno(setpgid(arg1, arg2));
8039     case TARGET_NR_umask:
8040         return get_errno(umask(arg1));
8041     case TARGET_NR_chroot:
8042         if (!(p = lock_user_string(arg1)))
8043             return -TARGET_EFAULT;
8044         ret = get_errno(chroot(p));
8045         unlock_user(p, arg1, 0);
8046         return ret;
8047 #ifdef TARGET_NR_dup2
8048     case TARGET_NR_dup2:
8049         ret = get_errno(dup2(arg1, arg2));
8050         if (ret >= 0) {
8051             fd_trans_dup(arg1, arg2);
8052         }
8053         return ret;
8054 #endif
8055 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8056     case TARGET_NR_dup3:
8057     {
8058         int host_flags;
8059 
8060         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8061             return -EINVAL;
8062         }
8063         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8064         ret = get_errno(dup3(arg1, arg2, host_flags));
8065         if (ret >= 0) {
8066             fd_trans_dup(arg1, arg2);
8067         }
8068         return ret;
8069     }
8070 #endif
8071 #ifdef TARGET_NR_getppid /* not on alpha */
8072     case TARGET_NR_getppid:
8073         return get_errno(getppid());
8074 #endif
8075 #ifdef TARGET_NR_getpgrp
8076     case TARGET_NR_getpgrp:
8077         return get_errno(getpgrp());
8078 #endif
8079     case TARGET_NR_setsid:
8080         return get_errno(setsid());
8081 #ifdef TARGET_NR_sigaction
8082     case TARGET_NR_sigaction:
8083         {
8084 #if defined(TARGET_ALPHA)
8085             struct target_sigaction act, oact, *pact = 0;
8086             struct target_old_sigaction *old_act;
8087             if (arg2) {
8088                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8089                     return -TARGET_EFAULT;
8090                 act._sa_handler = old_act->_sa_handler;
8091                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8092                 act.sa_flags = old_act->sa_flags;
8093                 act.sa_restorer = 0;
8094                 unlock_user_struct(old_act, arg2, 0);
8095                 pact = &act;
8096             }
8097             ret = get_errno(do_sigaction(arg1, pact, &oact));
8098             if (!is_error(ret) && arg3) {
8099                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8100                     return -TARGET_EFAULT;
8101                 old_act->_sa_handler = oact._sa_handler;
8102                 old_act->sa_mask = oact.sa_mask.sig[0];
8103                 old_act->sa_flags = oact.sa_flags;
8104                 unlock_user_struct(old_act, arg3, 1);
8105             }
8106 #elif defined(TARGET_MIPS)
8107 	    struct target_sigaction act, oact, *pact, *old_act;
8108 
8109 	    if (arg2) {
8110                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8111                     return -TARGET_EFAULT;
8112 		act._sa_handler = old_act->_sa_handler;
8113 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8114 		act.sa_flags = old_act->sa_flags;
8115 		unlock_user_struct(old_act, arg2, 0);
8116 		pact = &act;
8117 	    } else {
8118 		pact = NULL;
8119 	    }
8120 
8121 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8122 
8123 	    if (!is_error(ret) && arg3) {
8124                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8125                     return -TARGET_EFAULT;
8126 		old_act->_sa_handler = oact._sa_handler;
8127 		old_act->sa_flags = oact.sa_flags;
8128 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8129 		old_act->sa_mask.sig[1] = 0;
8130 		old_act->sa_mask.sig[2] = 0;
8131 		old_act->sa_mask.sig[3] = 0;
8132 		unlock_user_struct(old_act, arg3, 1);
8133 	    }
8134 #else
8135             struct target_old_sigaction *old_act;
8136             struct target_sigaction act, oact, *pact;
8137             if (arg2) {
8138                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8139                     return -TARGET_EFAULT;
8140                 act._sa_handler = old_act->_sa_handler;
8141                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8142                 act.sa_flags = old_act->sa_flags;
8143                 act.sa_restorer = old_act->sa_restorer;
8144 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8145                 act.ka_restorer = 0;
8146 #endif
8147                 unlock_user_struct(old_act, arg2, 0);
8148                 pact = &act;
8149             } else {
8150                 pact = NULL;
8151             }
8152             ret = get_errno(do_sigaction(arg1, pact, &oact));
8153             if (!is_error(ret) && arg3) {
8154                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8155                     return -TARGET_EFAULT;
8156                 old_act->_sa_handler = oact._sa_handler;
8157                 old_act->sa_mask = oact.sa_mask.sig[0];
8158                 old_act->sa_flags = oact.sa_flags;
8159                 old_act->sa_restorer = oact.sa_restorer;
8160                 unlock_user_struct(old_act, arg3, 1);
8161             }
8162 #endif
8163         }
8164         return ret;
8165 #endif
8166     case TARGET_NR_rt_sigaction:
8167         {
8168 #if defined(TARGET_ALPHA)
8169             /* For Alpha and SPARC this is a 5 argument syscall, with
8170              * a 'restorer' parameter which must be copied into the
8171              * sa_restorer field of the sigaction struct.
8172              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8173              * and arg5 is the sigsetsize.
8174              * Alpha also has a separate rt_sigaction struct that it uses
8175              * here; SPARC uses the usual sigaction struct.
8176              */
8177             struct target_rt_sigaction *rt_act;
8178             struct target_sigaction act, oact, *pact = 0;
8179 
8180             if (arg4 != sizeof(target_sigset_t)) {
8181                 return -TARGET_EINVAL;
8182             }
8183             if (arg2) {
8184                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8185                     return -TARGET_EFAULT;
8186                 act._sa_handler = rt_act->_sa_handler;
8187                 act.sa_mask = rt_act->sa_mask;
8188                 act.sa_flags = rt_act->sa_flags;
8189                 act.sa_restorer = arg5;
8190                 unlock_user_struct(rt_act, arg2, 0);
8191                 pact = &act;
8192             }
8193             ret = get_errno(do_sigaction(arg1, pact, &oact));
8194             if (!is_error(ret) && arg3) {
8195                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8196                     return -TARGET_EFAULT;
8197                 rt_act->_sa_handler = oact._sa_handler;
8198                 rt_act->sa_mask = oact.sa_mask;
8199                 rt_act->sa_flags = oact.sa_flags;
8200                 unlock_user_struct(rt_act, arg3, 1);
8201             }
8202 #else
8203 #ifdef TARGET_SPARC
8204             target_ulong restorer = arg4;
8205             target_ulong sigsetsize = arg5;
8206 #else
8207             target_ulong sigsetsize = arg4;
8208 #endif
8209             struct target_sigaction *act;
8210             struct target_sigaction *oact;
8211 
8212             if (sigsetsize != sizeof(target_sigset_t)) {
8213                 return -TARGET_EINVAL;
8214             }
8215             if (arg2) {
8216                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8217                     return -TARGET_EFAULT;
8218                 }
8219 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8220                 act->ka_restorer = restorer;
8221 #endif
8222             } else {
8223                 act = NULL;
8224             }
8225             if (arg3) {
8226                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8227                     ret = -TARGET_EFAULT;
8228                     goto rt_sigaction_fail;
8229                 }
8230             } else
8231                 oact = NULL;
8232             ret = get_errno(do_sigaction(arg1, act, oact));
8233 	rt_sigaction_fail:
8234             if (act)
8235                 unlock_user_struct(act, arg2, 0);
8236             if (oact)
8237                 unlock_user_struct(oact, arg3, 1);
8238 #endif
8239         }
8240         return ret;
8241 #ifdef TARGET_NR_sgetmask /* not on alpha */
8242     case TARGET_NR_sgetmask:
8243         {
8244             sigset_t cur_set;
8245             abi_ulong target_set;
8246             ret = do_sigprocmask(0, NULL, &cur_set);
8247             if (!ret) {
8248                 host_to_target_old_sigset(&target_set, &cur_set);
8249                 ret = target_set;
8250             }
8251         }
8252         return ret;
8253 #endif
8254 #ifdef TARGET_NR_ssetmask /* not on alpha */
8255     case TARGET_NR_ssetmask:
8256         {
8257             sigset_t set, oset;
8258             abi_ulong target_set = arg1;
8259             target_to_host_old_sigset(&set, &target_set);
8260             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8261             if (!ret) {
8262                 host_to_target_old_sigset(&target_set, &oset);
8263                 ret = target_set;
8264             }
8265         }
8266         return ret;
8267 #endif
8268 #ifdef TARGET_NR_sigprocmask
8269     case TARGET_NR_sigprocmask:
8270         {
8271 #if defined(TARGET_ALPHA)
8272             sigset_t set, oldset;
8273             abi_ulong mask;
8274             int how;
8275 
8276             switch (arg1) {
8277             case TARGET_SIG_BLOCK:
8278                 how = SIG_BLOCK;
8279                 break;
8280             case TARGET_SIG_UNBLOCK:
8281                 how = SIG_UNBLOCK;
8282                 break;
8283             case TARGET_SIG_SETMASK:
8284                 how = SIG_SETMASK;
8285                 break;
8286             default:
8287                 return -TARGET_EINVAL;
8288             }
8289             mask = arg2;
8290             target_to_host_old_sigset(&set, &mask);
8291 
8292             ret = do_sigprocmask(how, &set, &oldset);
8293             if (!is_error(ret)) {
8294                 host_to_target_old_sigset(&mask, &oldset);
8295                 ret = mask;
8296                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8297             }
8298 #else
8299             sigset_t set, oldset, *set_ptr;
8300             int how;
8301 
8302             if (arg2) {
8303                 switch (arg1) {
8304                 case TARGET_SIG_BLOCK:
8305                     how = SIG_BLOCK;
8306                     break;
8307                 case TARGET_SIG_UNBLOCK:
8308                     how = SIG_UNBLOCK;
8309                     break;
8310                 case TARGET_SIG_SETMASK:
8311                     how = SIG_SETMASK;
8312                     break;
8313                 default:
8314                     return -TARGET_EINVAL;
8315                 }
8316                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8317                     return -TARGET_EFAULT;
8318                 target_to_host_old_sigset(&set, p);
8319                 unlock_user(p, arg2, 0);
8320                 set_ptr = &set;
8321             } else {
8322                 how = 0;
8323                 set_ptr = NULL;
8324             }
8325             ret = do_sigprocmask(how, set_ptr, &oldset);
8326             if (!is_error(ret) && arg3) {
8327                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8328                     return -TARGET_EFAULT;
8329                 host_to_target_old_sigset(p, &oldset);
8330                 unlock_user(p, arg3, sizeof(target_sigset_t));
8331             }
8332 #endif
8333         }
8334         return ret;
8335 #endif
8336     case TARGET_NR_rt_sigprocmask:
8337         {
8338             int how = arg1;
8339             sigset_t set, oldset, *set_ptr;
8340 
8341             if (arg4 != sizeof(target_sigset_t)) {
8342                 return -TARGET_EINVAL;
8343             }
8344 
8345             if (arg2) {
8346                 switch(how) {
8347                 case TARGET_SIG_BLOCK:
8348                     how = SIG_BLOCK;
8349                     break;
8350                 case TARGET_SIG_UNBLOCK:
8351                     how = SIG_UNBLOCK;
8352                     break;
8353                 case TARGET_SIG_SETMASK:
8354                     how = SIG_SETMASK;
8355                     break;
8356                 default:
8357                     return -TARGET_EINVAL;
8358                 }
8359                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8360                     return -TARGET_EFAULT;
8361                 target_to_host_sigset(&set, p);
8362                 unlock_user(p, arg2, 0);
8363                 set_ptr = &set;
8364             } else {
8365                 how = 0;
8366                 set_ptr = NULL;
8367             }
8368             ret = do_sigprocmask(how, set_ptr, &oldset);
8369             if (!is_error(ret) && arg3) {
8370                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8371                     return -TARGET_EFAULT;
8372                 host_to_target_sigset(p, &oldset);
8373                 unlock_user(p, arg3, sizeof(target_sigset_t));
8374             }
8375         }
8376         return ret;
8377 #ifdef TARGET_NR_sigpending
8378     case TARGET_NR_sigpending:
8379         {
8380             sigset_t set;
8381             ret = get_errno(sigpending(&set));
8382             if (!is_error(ret)) {
8383                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8384                     return -TARGET_EFAULT;
8385                 host_to_target_old_sigset(p, &set);
8386                 unlock_user(p, arg1, sizeof(target_sigset_t));
8387             }
8388         }
8389         return ret;
8390 #endif
8391     case TARGET_NR_rt_sigpending:
8392         {
8393             sigset_t set;
8394 
8395             /* Yes, this check is >, not != like most. We follow the kernel's
8396              * logic and it does it like this because it implements
8397              * NR_sigpending through the same code path, and in that case
8398              * the old_sigset_t is smaller in size.
8399              */
8400             if (arg2 > sizeof(target_sigset_t)) {
8401                 return -TARGET_EINVAL;
8402             }
8403 
8404             ret = get_errno(sigpending(&set));
8405             if (!is_error(ret)) {
8406                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8407                     return -TARGET_EFAULT;
8408                 host_to_target_sigset(p, &set);
8409                 unlock_user(p, arg1, sizeof(target_sigset_t));
8410             }
8411         }
8412         return ret;
8413 #ifdef TARGET_NR_sigsuspend
8414     case TARGET_NR_sigsuspend:
8415         {
8416             TaskState *ts = cpu->opaque;
8417 #if defined(TARGET_ALPHA)
8418             abi_ulong mask = arg1;
8419             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8420 #else
8421             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8422                 return -TARGET_EFAULT;
8423             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8424             unlock_user(p, arg1, 0);
8425 #endif
8426             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8427                                                SIGSET_T_SIZE));
8428             if (ret != -TARGET_ERESTARTSYS) {
8429                 ts->in_sigsuspend = 1;
8430             }
8431         }
8432         return ret;
8433 #endif
8434     case TARGET_NR_rt_sigsuspend:
8435         {
8436             TaskState *ts = cpu->opaque;
8437 
8438             if (arg2 != sizeof(target_sigset_t)) {
8439                 return -TARGET_EINVAL;
8440             }
8441             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8442                 return -TARGET_EFAULT;
8443             target_to_host_sigset(&ts->sigsuspend_mask, p);
8444             unlock_user(p, arg1, 0);
8445             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8446                                                SIGSET_T_SIZE));
8447             if (ret != -TARGET_ERESTARTSYS) {
8448                 ts->in_sigsuspend = 1;
8449             }
8450         }
8451         return ret;
8452     case TARGET_NR_rt_sigtimedwait:
8453         {
8454             sigset_t set;
8455             struct timespec uts, *puts;
8456             siginfo_t uinfo;
8457 
8458             if (arg4 != sizeof(target_sigset_t)) {
8459                 return -TARGET_EINVAL;
8460             }
8461 
8462             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8463                 return -TARGET_EFAULT;
8464             target_to_host_sigset(&set, p);
8465             unlock_user(p, arg1, 0);
8466             if (arg3) {
8467                 puts = &uts;
8468                 target_to_host_timespec(puts, arg3);
8469             } else {
8470                 puts = NULL;
8471             }
8472             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8473                                                  SIGSET_T_SIZE));
8474             if (!is_error(ret)) {
8475                 if (arg2) {
8476                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8477                                   0);
8478                     if (!p) {
8479                         return -TARGET_EFAULT;
8480                     }
8481                     host_to_target_siginfo(p, &uinfo);
8482                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8483                 }
8484                 ret = host_to_target_signal(ret);
8485             }
8486         }
8487         return ret;
8488     case TARGET_NR_rt_sigqueueinfo:
8489         {
8490             siginfo_t uinfo;
8491 
8492             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8493             if (!p) {
8494                 return -TARGET_EFAULT;
8495             }
8496             target_to_host_siginfo(&uinfo, p);
8497             unlock_user(p, arg3, 0);
8498             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8499         }
8500         return ret;
8501     case TARGET_NR_rt_tgsigqueueinfo:
8502         {
8503             siginfo_t uinfo;
8504 
8505             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8506             if (!p) {
8507                 return -TARGET_EFAULT;
8508             }
8509             target_to_host_siginfo(&uinfo, p);
8510             unlock_user(p, arg4, 0);
8511             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8512         }
8513         return ret;
8514 #ifdef TARGET_NR_sigreturn
8515     case TARGET_NR_sigreturn:
8516         if (block_signals()) {
8517             return -TARGET_ERESTARTSYS;
8518         }
8519         return do_sigreturn(cpu_env);
8520 #endif
8521     case TARGET_NR_rt_sigreturn:
8522         if (block_signals()) {
8523             return -TARGET_ERESTARTSYS;
8524         }
8525         return do_rt_sigreturn(cpu_env);
8526     case TARGET_NR_sethostname:
8527         if (!(p = lock_user_string(arg1)))
8528             return -TARGET_EFAULT;
8529         ret = get_errno(sethostname(p, arg2));
8530         unlock_user(p, arg1, 0);
8531         return ret;
8532 #ifdef TARGET_NR_setrlimit
8533     case TARGET_NR_setrlimit:
8534         {
8535             int resource = target_to_host_resource(arg1);
8536             struct target_rlimit *target_rlim;
8537             struct rlimit rlim;
8538             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8539                 return -TARGET_EFAULT;
8540             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8541             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8542             unlock_user_struct(target_rlim, arg2, 0);
8543             /*
8544              * If we just passed through resource limit settings for memory then
8545              * they would also apply to QEMU's own allocations, and QEMU will
8546              * crash or hang or die if its allocations fail. Ideally we would
8547              * track the guest allocations in QEMU and apply the limits ourselves.
8548              * For now, just tell the guest the call succeeded but don't actually
8549              * limit anything.
8550              */
8551             if (resource != RLIMIT_AS &&
8552                 resource != RLIMIT_DATA &&
8553                 resource != RLIMIT_STACK) {
8554                 return get_errno(setrlimit(resource, &rlim));
8555             } else {
8556                 return 0;
8557             }
8558         }
8559 #endif
8560 #ifdef TARGET_NR_getrlimit
8561     case TARGET_NR_getrlimit:
8562         {
8563             int resource = target_to_host_resource(arg1);
8564             struct target_rlimit *target_rlim;
8565             struct rlimit rlim;
8566 
8567             ret = get_errno(getrlimit(resource, &rlim));
8568             if (!is_error(ret)) {
8569                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8570                     return -TARGET_EFAULT;
8571                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8572                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8573                 unlock_user_struct(target_rlim, arg2, 1);
8574             }
8575         }
8576         return ret;
8577 #endif
8578     case TARGET_NR_getrusage:
8579         {
8580             struct rusage rusage;
8581             ret = get_errno(getrusage(arg1, &rusage));
8582             if (!is_error(ret)) {
8583                 ret = host_to_target_rusage(arg2, &rusage);
8584             }
8585         }
8586         return ret;
8587     case TARGET_NR_gettimeofday:
8588         {
8589             struct timeval tv;
8590             ret = get_errno(gettimeofday(&tv, NULL));
8591             if (!is_error(ret)) {
8592                 if (copy_to_user_timeval(arg1, &tv))
8593                     return -TARGET_EFAULT;
8594             }
8595         }
8596         return ret;
8597     case TARGET_NR_settimeofday:
8598         {
8599             struct timeval tv, *ptv = NULL;
8600             struct timezone tz, *ptz = NULL;
8601 
8602             if (arg1) {
8603                 if (copy_from_user_timeval(&tv, arg1)) {
8604                     return -TARGET_EFAULT;
8605                 }
8606                 ptv = &tv;
8607             }
8608 
8609             if (arg2) {
8610                 if (copy_from_user_timezone(&tz, arg2)) {
8611                     return -TARGET_EFAULT;
8612                 }
8613                 ptz = &tz;
8614             }
8615 
8616             return get_errno(settimeofday(ptv, ptz));
8617         }
8618 #if defined(TARGET_NR_select)
8619     case TARGET_NR_select:
8620 #if defined(TARGET_WANT_NI_OLD_SELECT)
8621         /* some architectures used to have old_select here
8622          * but now ENOSYS it.
8623          */
8624         ret = -TARGET_ENOSYS;
8625 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8626         ret = do_old_select(arg1);
8627 #else
8628         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8629 #endif
8630         return ret;
8631 #endif
8632 #ifdef TARGET_NR_pselect6
8633     case TARGET_NR_pselect6:
8634         {
8635             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8636             fd_set rfds, wfds, efds;
8637             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8638             struct timespec ts, *ts_ptr;
8639 
8640             /*
8641              * The 6th arg is actually two args smashed together,
8642              * so we cannot use the C library.
8643              */
8644             sigset_t set;
8645             struct {
8646                 sigset_t *set;
8647                 size_t size;
8648             } sig, *sig_ptr;
8649 
8650             abi_ulong arg_sigset, arg_sigsize, *arg7;
8651             target_sigset_t *target_sigset;
8652 
8653             n = arg1;
8654             rfd_addr = arg2;
8655             wfd_addr = arg3;
8656             efd_addr = arg4;
8657             ts_addr = arg5;
8658 
8659             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8660             if (ret) {
8661                 return ret;
8662             }
8663             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8664             if (ret) {
8665                 return ret;
8666             }
8667             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8668             if (ret) {
8669                 return ret;
8670             }
8671 
8672             /*
8673              * This takes a timespec, and not a timeval, so we cannot
8674              * use the do_select() helper ...
8675              */
8676             if (ts_addr) {
8677                 if (target_to_host_timespec(&ts, ts_addr)) {
8678                     return -TARGET_EFAULT;
8679                 }
8680                 ts_ptr = &ts;
8681             } else {
8682                 ts_ptr = NULL;
8683             }
8684 
8685             /* Extract the two packed args for the sigset */
8686             if (arg6) {
8687                 sig_ptr = &sig;
8688                 sig.size = SIGSET_T_SIZE;
8689 
8690                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8691                 if (!arg7) {
8692                     return -TARGET_EFAULT;
8693                 }
8694                 arg_sigset = tswapal(arg7[0]);
8695                 arg_sigsize = tswapal(arg7[1]);
8696                 unlock_user(arg7, arg6, 0);
8697 
8698                 if (arg_sigset) {
8699                     sig.set = &set;
8700                     if (arg_sigsize != sizeof(*target_sigset)) {
8701                         /* Like the kernel, we enforce correct size sigsets */
8702                         return -TARGET_EINVAL;
8703                     }
8704                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8705                                               sizeof(*target_sigset), 1);
8706                     if (!target_sigset) {
8707                         return -TARGET_EFAULT;
8708                     }
8709                     target_to_host_sigset(&set, target_sigset);
8710                     unlock_user(target_sigset, arg_sigset, 0);
8711                 } else {
8712                     sig.set = NULL;
8713                 }
8714             } else {
8715                 sig_ptr = NULL;
8716             }
8717 
8718             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8719                                           ts_ptr, sig_ptr));
8720 
8721             if (!is_error(ret)) {
8722                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8723                     return -TARGET_EFAULT;
8724                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8725                     return -TARGET_EFAULT;
8726                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8727                     return -TARGET_EFAULT;
8728 
8729                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8730                     return -TARGET_EFAULT;
8731             }
8732         }
8733         return ret;
8734 #endif
8735 #ifdef TARGET_NR_symlink
8736     case TARGET_NR_symlink:
8737         {
8738             void *p2;
8739             p = lock_user_string(arg1);
8740             p2 = lock_user_string(arg2);
8741             if (!p || !p2)
8742                 ret = -TARGET_EFAULT;
8743             else
8744                 ret = get_errno(symlink(p, p2));
8745             unlock_user(p2, arg2, 0);
8746             unlock_user(p, arg1, 0);
8747         }
8748         return ret;
8749 #endif
8750 #if defined(TARGET_NR_symlinkat)
8751     case TARGET_NR_symlinkat:
8752         {
8753             void *p2;
8754             p  = lock_user_string(arg1);
8755             p2 = lock_user_string(arg3);
8756             if (!p || !p2)
8757                 ret = -TARGET_EFAULT;
8758             else
8759                 ret = get_errno(symlinkat(p, arg2, p2));
8760             unlock_user(p2, arg3, 0);
8761             unlock_user(p, arg1, 0);
8762         }
8763         return ret;
8764 #endif
8765 #ifdef TARGET_NR_readlink
8766     case TARGET_NR_readlink:
8767         {
8768             void *p2;
8769             p = lock_user_string(arg1);
8770             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8771             if (!p || !p2) {
8772                 ret = -TARGET_EFAULT;
8773             } else if (!arg3) {
8774                 /* Short circuit this for the magic exe check. */
8775                 ret = -TARGET_EINVAL;
8776             } else if (is_proc_myself((const char *)p, "exe")) {
8777                 char real[PATH_MAX], *temp;
8778                 temp = realpath(exec_path, real);
8779                 /* Return value is # of bytes that we wrote to the buffer. */
8780                 if (temp == NULL) {
8781                     ret = get_errno(-1);
8782                 } else {
8783                     /* Don't worry about sign mismatch as earlier mapping
8784                      * logic would have thrown a bad address error. */
8785                     ret = MIN(strlen(real), arg3);
8786                     /* We cannot NUL terminate the string. */
8787                     memcpy(p2, real, ret);
8788                 }
8789             } else {
8790                 ret = get_errno(readlink(path(p), p2, arg3));
8791             }
8792             unlock_user(p2, arg2, ret);
8793             unlock_user(p, arg1, 0);
8794         }
8795         return ret;
8796 #endif
8797 #if defined(TARGET_NR_readlinkat)
8798     case TARGET_NR_readlinkat:
8799         {
8800             void *p2;
8801             p  = lock_user_string(arg2);
8802             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8803             if (!p || !p2) {
8804                 ret = -TARGET_EFAULT;
8805             } else if (is_proc_myself((const char *)p, "exe")) {
8806                 char real[PATH_MAX], *temp;
8807                 temp = realpath(exec_path, real);
8808                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8809                 snprintf((char *)p2, arg4, "%s", real);
8810             } else {
8811                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8812             }
8813             unlock_user(p2, arg3, ret);
8814             unlock_user(p, arg2, 0);
8815         }
8816         return ret;
8817 #endif
8818 #ifdef TARGET_NR_swapon
8819     case TARGET_NR_swapon:
8820         if (!(p = lock_user_string(arg1)))
8821             return -TARGET_EFAULT;
8822         ret = get_errno(swapon(p, arg2));
8823         unlock_user(p, arg1, 0);
8824         return ret;
8825 #endif
8826     case TARGET_NR_reboot:
8827         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8828            /* arg4 must be ignored in all other cases */
8829            p = lock_user_string(arg4);
8830            if (!p) {
8831                return -TARGET_EFAULT;
8832            }
8833            ret = get_errno(reboot(arg1, arg2, arg3, p));
8834            unlock_user(p, arg4, 0);
8835         } else {
8836            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8837         }
8838         return ret;
8839 #ifdef TARGET_NR_mmap
8840     case TARGET_NR_mmap:
8841 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8842     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8843     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8844     || defined(TARGET_S390X)
8845         {
8846             abi_ulong *v;
8847             abi_ulong v1, v2, v3, v4, v5, v6;
8848             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8849                 return -TARGET_EFAULT;
8850             v1 = tswapal(v[0]);
8851             v2 = tswapal(v[1]);
8852             v3 = tswapal(v[2]);
8853             v4 = tswapal(v[3]);
8854             v5 = tswapal(v[4]);
8855             v6 = tswapal(v[5]);
8856             unlock_user(v, arg1, 0);
8857             ret = get_errno(target_mmap(v1, v2, v3,
8858                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8859                                         v5, v6));
8860         }
8861 #else
8862         ret = get_errno(target_mmap(arg1, arg2, arg3,
8863                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8864                                     arg5,
8865                                     arg6));
8866 #endif
8867         return ret;
8868 #endif
8869 #ifdef TARGET_NR_mmap2
8870     case TARGET_NR_mmap2:
8871 #ifndef MMAP_SHIFT
8872 #define MMAP_SHIFT 12
8873 #endif
8874         ret = target_mmap(arg1, arg2, arg3,
8875                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8876                           arg5, arg6 << MMAP_SHIFT);
8877         return get_errno(ret);
8878 #endif
8879     case TARGET_NR_munmap:
8880         return get_errno(target_munmap(arg1, arg2));
8881     case TARGET_NR_mprotect:
8882         {
8883             TaskState *ts = cpu->opaque;
8884             /* Special hack to detect libc making the stack executable.  */
8885             if ((arg3 & PROT_GROWSDOWN)
8886                 && arg1 >= ts->info->stack_limit
8887                 && arg1 <= ts->info->start_stack) {
8888                 arg3 &= ~PROT_GROWSDOWN;
8889                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8890                 arg1 = ts->info->stack_limit;
8891             }
8892         }
8893         return get_errno(target_mprotect(arg1, arg2, arg3));
8894 #ifdef TARGET_NR_mremap
8895     case TARGET_NR_mremap:
8896         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8897 #endif
8898         /* ??? msync/mlock/munlock are broken for softmmu.  */
8899 #ifdef TARGET_NR_msync
8900     case TARGET_NR_msync:
8901         return get_errno(msync(g2h(arg1), arg2, arg3));
8902 #endif
8903 #ifdef TARGET_NR_mlock
8904     case TARGET_NR_mlock:
8905         return get_errno(mlock(g2h(arg1), arg2));
8906 #endif
8907 #ifdef TARGET_NR_munlock
8908     case TARGET_NR_munlock:
8909         return get_errno(munlock(g2h(arg1), arg2));
8910 #endif
8911 #ifdef TARGET_NR_mlockall
8912     case TARGET_NR_mlockall:
8913         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8914 #endif
8915 #ifdef TARGET_NR_munlockall
8916     case TARGET_NR_munlockall:
8917         return get_errno(munlockall());
8918 #endif
8919 #ifdef TARGET_NR_truncate
8920     case TARGET_NR_truncate:
8921         if (!(p = lock_user_string(arg1)))
8922             return -TARGET_EFAULT;
8923         ret = get_errno(truncate(p, arg2));
8924         unlock_user(p, arg1, 0);
8925         return ret;
8926 #endif
8927 #ifdef TARGET_NR_ftruncate
8928     case TARGET_NR_ftruncate:
8929         return get_errno(ftruncate(arg1, arg2));
8930 #endif
8931     case TARGET_NR_fchmod:
8932         return get_errno(fchmod(arg1, arg2));
8933 #if defined(TARGET_NR_fchmodat)
8934     case TARGET_NR_fchmodat:
8935         if (!(p = lock_user_string(arg2)))
8936             return -TARGET_EFAULT;
8937         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8938         unlock_user(p, arg2, 0);
8939         return ret;
8940 #endif
8941     case TARGET_NR_getpriority:
8942         /* Note that negative values are valid for getpriority, so we must
8943            differentiate based on errno settings.  */
8944         errno = 0;
8945         ret = getpriority(arg1, arg2);
8946         if (ret == -1 && errno != 0) {
8947             return -host_to_target_errno(errno);
8948         }
8949 #ifdef TARGET_ALPHA
8950         /* Return value is the unbiased priority.  Signal no error.  */
8951         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8952 #else
8953         /* Return value is a biased priority to avoid negative numbers.  */
8954         ret = 20 - ret;
8955 #endif
8956         return ret;
8957     case TARGET_NR_setpriority:
8958         return get_errno(setpriority(arg1, arg2, arg3));
8959 #ifdef TARGET_NR_statfs
8960     case TARGET_NR_statfs:
8961         if (!(p = lock_user_string(arg1))) {
8962             return -TARGET_EFAULT;
8963         }
8964         ret = get_errno(statfs(path(p), &stfs));
8965         unlock_user(p, arg1, 0);
8966     convert_statfs:
8967         if (!is_error(ret)) {
8968             struct target_statfs *target_stfs;
8969 
8970             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8971                 return -TARGET_EFAULT;
8972             __put_user(stfs.f_type, &target_stfs->f_type);
8973             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8974             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8975             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8976             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8977             __put_user(stfs.f_files, &target_stfs->f_files);
8978             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8979             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8980             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8981             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8982             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8983 #ifdef _STATFS_F_FLAGS
8984             __put_user(stfs.f_flags, &target_stfs->f_flags);
8985 #else
8986             __put_user(0, &target_stfs->f_flags);
8987 #endif
8988             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8989             unlock_user_struct(target_stfs, arg2, 1);
8990         }
8991         return ret;
8992 #endif
8993 #ifdef TARGET_NR_fstatfs
8994     case TARGET_NR_fstatfs:
8995         ret = get_errno(fstatfs(arg1, &stfs));
8996         goto convert_statfs;
8997 #endif
8998 #ifdef TARGET_NR_statfs64
8999     case TARGET_NR_statfs64:
9000         if (!(p = lock_user_string(arg1))) {
9001             return -TARGET_EFAULT;
9002         }
9003         ret = get_errno(statfs(path(p), &stfs));
9004         unlock_user(p, arg1, 0);
9005     convert_statfs64:
9006         if (!is_error(ret)) {
9007             struct target_statfs64 *target_stfs;
9008 
9009             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9010                 return -TARGET_EFAULT;
9011             __put_user(stfs.f_type, &target_stfs->f_type);
9012             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9013             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9014             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9015             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9016             __put_user(stfs.f_files, &target_stfs->f_files);
9017             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9018             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9019             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9020             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9021             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9022             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9023             unlock_user_struct(target_stfs, arg3, 1);
9024         }
9025         return ret;
9026     case TARGET_NR_fstatfs64:
9027         ret = get_errno(fstatfs(arg1, &stfs));
9028         goto convert_statfs64;
9029 #endif
9030 #ifdef TARGET_NR_socketcall
9031     case TARGET_NR_socketcall:
9032         return do_socketcall(arg1, arg2);
9033 #endif
9034 #ifdef TARGET_NR_accept
9035     case TARGET_NR_accept:
9036         return do_accept4(arg1, arg2, arg3, 0);
9037 #endif
9038 #ifdef TARGET_NR_accept4
9039     case TARGET_NR_accept4:
9040         return do_accept4(arg1, arg2, arg3, arg4);
9041 #endif
9042 #ifdef TARGET_NR_bind
9043     case TARGET_NR_bind:
9044         return do_bind(arg1, arg2, arg3);
9045 #endif
9046 #ifdef TARGET_NR_connect
9047     case TARGET_NR_connect:
9048         return do_connect(arg1, arg2, arg3);
9049 #endif
9050 #ifdef TARGET_NR_getpeername
9051     case TARGET_NR_getpeername:
9052         return do_getpeername(arg1, arg2, arg3);
9053 #endif
9054 #ifdef TARGET_NR_getsockname
9055     case TARGET_NR_getsockname:
9056         return do_getsockname(arg1, arg2, arg3);
9057 #endif
9058 #ifdef TARGET_NR_getsockopt
9059     case TARGET_NR_getsockopt:
9060         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9061 #endif
9062 #ifdef TARGET_NR_listen
9063     case TARGET_NR_listen:
9064         return get_errno(listen(arg1, arg2));
9065 #endif
9066 #ifdef TARGET_NR_recv
9067     case TARGET_NR_recv:
9068         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9069 #endif
9070 #ifdef TARGET_NR_recvfrom
9071     case TARGET_NR_recvfrom:
9072         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9073 #endif
9074 #ifdef TARGET_NR_recvmsg
9075     case TARGET_NR_recvmsg:
9076         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9077 #endif
9078 #ifdef TARGET_NR_send
9079     case TARGET_NR_send:
9080         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9081 #endif
9082 #ifdef TARGET_NR_sendmsg
9083     case TARGET_NR_sendmsg:
9084         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9085 #endif
9086 #ifdef TARGET_NR_sendmmsg
9087     case TARGET_NR_sendmmsg:
9088         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9089     case TARGET_NR_recvmmsg:
9090         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9091 #endif
9092 #ifdef TARGET_NR_sendto
9093     case TARGET_NR_sendto:
9094         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9095 #endif
9096 #ifdef TARGET_NR_shutdown
9097     case TARGET_NR_shutdown:
9098         return get_errno(shutdown(arg1, arg2));
9099 #endif
9100 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9101     case TARGET_NR_getrandom:
9102         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9103         if (!p) {
9104             return -TARGET_EFAULT;
9105         }
9106         ret = get_errno(getrandom(p, arg2, arg3));
9107         unlock_user(p, arg1, ret);
9108         return ret;
9109 #endif
9110 #ifdef TARGET_NR_socket
9111     case TARGET_NR_socket:
9112         return do_socket(arg1, arg2, arg3);
9113 #endif
9114 #ifdef TARGET_NR_socketpair
9115     case TARGET_NR_socketpair:
9116         return do_socketpair(arg1, arg2, arg3, arg4);
9117 #endif
9118 #ifdef TARGET_NR_setsockopt
9119     case TARGET_NR_setsockopt:
9120         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9121 #endif
9122 #if defined(TARGET_NR_syslog)
9123     case TARGET_NR_syslog:
9124         {
9125             int len = arg2;
9126 
9127             switch (arg1) {
9128             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9129             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9130             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9131             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9132             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9133             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9134             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9135             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9136                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9137             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9138             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9139             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9140                 {
9141                     if (len < 0) {
9142                         return -TARGET_EINVAL;
9143                     }
9144                     if (len == 0) {
9145                         return 0;
9146                     }
9147                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9148                     if (!p) {
9149                         return -TARGET_EFAULT;
9150                     }
9151                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9152                     unlock_user(p, arg2, arg3);
9153                 }
9154                 return ret;
9155             default:
9156                 return -TARGET_EINVAL;
9157             }
9158         }
9159         break;
9160 #endif
9161     case TARGET_NR_setitimer:
9162         {
9163             struct itimerval value, ovalue, *pvalue;
9164 
9165             if (arg2) {
9166                 pvalue = &value;
9167                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9168                     || copy_from_user_timeval(&pvalue->it_value,
9169                                               arg2 + sizeof(struct target_timeval)))
9170                     return -TARGET_EFAULT;
9171             } else {
9172                 pvalue = NULL;
9173             }
9174             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9175             if (!is_error(ret) && arg3) {
9176                 if (copy_to_user_timeval(arg3,
9177                                          &ovalue.it_interval)
9178                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9179                                             &ovalue.it_value))
9180                     return -TARGET_EFAULT;
9181             }
9182         }
9183         return ret;
9184     case TARGET_NR_getitimer:
9185         {
9186             struct itimerval value;
9187 
9188             ret = get_errno(getitimer(arg1, &value));
9189             if (!is_error(ret) && arg2) {
9190                 if (copy_to_user_timeval(arg2,
9191                                          &value.it_interval)
9192                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9193                                             &value.it_value))
9194                     return -TARGET_EFAULT;
9195             }
9196         }
9197         return ret;
9198 #ifdef TARGET_NR_stat
9199     case TARGET_NR_stat:
9200         if (!(p = lock_user_string(arg1))) {
9201             return -TARGET_EFAULT;
9202         }
9203         ret = get_errno(stat(path(p), &st));
9204         unlock_user(p, arg1, 0);
9205         goto do_stat;
9206 #endif
9207 #ifdef TARGET_NR_lstat
9208     case TARGET_NR_lstat:
9209         if (!(p = lock_user_string(arg1))) {
9210             return -TARGET_EFAULT;
9211         }
9212         ret = get_errno(lstat(path(p), &st));
9213         unlock_user(p, arg1, 0);
9214         goto do_stat;
9215 #endif
9216 #ifdef TARGET_NR_fstat
9217     case TARGET_NR_fstat:
9218         {
9219             ret = get_errno(fstat(arg1, &st));
9220 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9221         do_stat:
9222 #endif
9223             if (!is_error(ret)) {
9224                 struct target_stat *target_st;
9225 
9226                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9227                     return -TARGET_EFAULT;
9228                 memset(target_st, 0, sizeof(*target_st));
9229                 __put_user(st.st_dev, &target_st->st_dev);
9230                 __put_user(st.st_ino, &target_st->st_ino);
9231                 __put_user(st.st_mode, &target_st->st_mode);
9232                 __put_user(st.st_uid, &target_st->st_uid);
9233                 __put_user(st.st_gid, &target_st->st_gid);
9234                 __put_user(st.st_nlink, &target_st->st_nlink);
9235                 __put_user(st.st_rdev, &target_st->st_rdev);
9236                 __put_user(st.st_size, &target_st->st_size);
9237                 __put_user(st.st_blksize, &target_st->st_blksize);
9238                 __put_user(st.st_blocks, &target_st->st_blocks);
9239                 __put_user(st.st_atime, &target_st->target_st_atime);
9240                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9241                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9242 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9243     defined(TARGET_STAT_HAVE_NSEC)
9244                 __put_user(st.st_atim.tv_nsec,
9245                            &target_st->target_st_atime_nsec);
9246                 __put_user(st.st_mtim.tv_nsec,
9247                            &target_st->target_st_mtime_nsec);
9248                 __put_user(st.st_ctim.tv_nsec,
9249                            &target_st->target_st_ctime_nsec);
9250 #endif
9251                 unlock_user_struct(target_st, arg2, 1);
9252             }
9253         }
9254         return ret;
9255 #endif
9256     case TARGET_NR_vhangup:
9257         return get_errno(vhangup());
9258 #ifdef TARGET_NR_syscall
9259     case TARGET_NR_syscall:
9260         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9261                           arg6, arg7, arg8, 0);
9262 #endif
9263     case TARGET_NR_wait4:
9264         {
9265             int status;
9266             abi_long status_ptr = arg2;
9267             struct rusage rusage, *rusage_ptr;
9268             abi_ulong target_rusage = arg4;
9269             abi_long rusage_err;
9270             if (target_rusage)
9271                 rusage_ptr = &rusage;
9272             else
9273                 rusage_ptr = NULL;
9274             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9275             if (!is_error(ret)) {
9276                 if (status_ptr && ret) {
9277                     status = host_to_target_waitstatus(status);
9278                     if (put_user_s32(status, status_ptr))
9279                         return -TARGET_EFAULT;
9280                 }
9281                 if (target_rusage) {
9282                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9283                     if (rusage_err) {
9284                         ret = rusage_err;
9285                     }
9286                 }
9287             }
9288         }
9289         return ret;
9290 #ifdef TARGET_NR_swapoff
9291     case TARGET_NR_swapoff:
9292         if (!(p = lock_user_string(arg1)))
9293             return -TARGET_EFAULT;
9294         ret = get_errno(swapoff(p));
9295         unlock_user(p, arg1, 0);
9296         return ret;
9297 #endif
9298     case TARGET_NR_sysinfo:
9299         {
9300             struct target_sysinfo *target_value;
9301             struct sysinfo value;
9302             ret = get_errno(sysinfo(&value));
9303             if (!is_error(ret) && arg1)
9304             {
9305                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9306                     return -TARGET_EFAULT;
9307                 __put_user(value.uptime, &target_value->uptime);
9308                 __put_user(value.loads[0], &target_value->loads[0]);
9309                 __put_user(value.loads[1], &target_value->loads[1]);
9310                 __put_user(value.loads[2], &target_value->loads[2]);
9311                 __put_user(value.totalram, &target_value->totalram);
9312                 __put_user(value.freeram, &target_value->freeram);
9313                 __put_user(value.sharedram, &target_value->sharedram);
9314                 __put_user(value.bufferram, &target_value->bufferram);
9315                 __put_user(value.totalswap, &target_value->totalswap);
9316                 __put_user(value.freeswap, &target_value->freeswap);
9317                 __put_user(value.procs, &target_value->procs);
9318                 __put_user(value.totalhigh, &target_value->totalhigh);
9319                 __put_user(value.freehigh, &target_value->freehigh);
9320                 __put_user(value.mem_unit, &target_value->mem_unit);
9321                 unlock_user_struct(target_value, arg1, 1);
9322             }
9323         }
9324         return ret;
9325 #ifdef TARGET_NR_ipc
9326     case TARGET_NR_ipc:
9327         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9328 #endif
9329 #ifdef TARGET_NR_semget
9330     case TARGET_NR_semget:
9331         return get_errno(semget(arg1, arg2, arg3));
9332 #endif
9333 #ifdef TARGET_NR_semop
9334     case TARGET_NR_semop:
9335         return do_semop(arg1, arg2, arg3);
9336 #endif
9337 #ifdef TARGET_NR_semctl
9338     case TARGET_NR_semctl:
9339         return do_semctl(arg1, arg2, arg3, arg4);
9340 #endif
9341 #ifdef TARGET_NR_msgctl
9342     case TARGET_NR_msgctl:
9343         return do_msgctl(arg1, arg2, arg3);
9344 #endif
9345 #ifdef TARGET_NR_msgget
9346     case TARGET_NR_msgget:
9347         return get_errno(msgget(arg1, arg2));
9348 #endif
9349 #ifdef TARGET_NR_msgrcv
9350     case TARGET_NR_msgrcv:
9351         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9352 #endif
9353 #ifdef TARGET_NR_msgsnd
9354     case TARGET_NR_msgsnd:
9355         return do_msgsnd(arg1, arg2, arg3, arg4);
9356 #endif
9357 #ifdef TARGET_NR_shmget
9358     case TARGET_NR_shmget:
9359         return get_errno(shmget(arg1, arg2, arg3));
9360 #endif
9361 #ifdef TARGET_NR_shmctl
9362     case TARGET_NR_shmctl:
9363         return do_shmctl(arg1, arg2, arg3);
9364 #endif
9365 #ifdef TARGET_NR_shmat
9366     case TARGET_NR_shmat:
9367         return do_shmat(cpu_env, arg1, arg2, arg3);
9368 #endif
9369 #ifdef TARGET_NR_shmdt
9370     case TARGET_NR_shmdt:
9371         return do_shmdt(arg1);
9372 #endif
9373     case TARGET_NR_fsync:
9374         return get_errno(fsync(arg1));
9375     case TARGET_NR_clone:
9376         /* Linux manages to have three different orderings for its
9377          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9378          * match the kernel's CONFIG_CLONE_* settings.
9379          * Microblaze is further special in that it uses a sixth
9380          * implicit argument to clone for the TLS pointer.
9381          */
9382 #if defined(TARGET_MICROBLAZE)
9383         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9384 #elif defined(TARGET_CLONE_BACKWARDS)
9385         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9386 #elif defined(TARGET_CLONE_BACKWARDS2)
9387         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9388 #else
9389         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9390 #endif
9391         return ret;
9392 #ifdef __NR_exit_group
9393         /* new thread calls */
9394     case TARGET_NR_exit_group:
9395         preexit_cleanup(cpu_env, arg1);
9396         return get_errno(exit_group(arg1));
9397 #endif
9398     case TARGET_NR_setdomainname:
9399         if (!(p = lock_user_string(arg1)))
9400             return -TARGET_EFAULT;
9401         ret = get_errno(setdomainname(p, arg2));
9402         unlock_user(p, arg1, 0);
9403         return ret;
9404     case TARGET_NR_uname:
9405         /* no need to transcode because we use the linux syscall */
9406         {
9407             struct new_utsname * buf;
9408 
9409             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9410                 return -TARGET_EFAULT;
9411             ret = get_errno(sys_uname(buf));
9412             if (!is_error(ret)) {
9413                 /* Overwrite the native machine name with whatever is being
9414                    emulated. */
9415                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9416                           sizeof(buf->machine));
9417                 /* Allow the user to override the reported release.  */
9418                 if (qemu_uname_release && *qemu_uname_release) {
9419                     g_strlcpy(buf->release, qemu_uname_release,
9420                               sizeof(buf->release));
9421                 }
9422             }
9423             unlock_user_struct(buf, arg1, 1);
9424         }
9425         return ret;
9426 #ifdef TARGET_I386
9427     case TARGET_NR_modify_ldt:
9428         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9429 #if !defined(TARGET_X86_64)
9430     case TARGET_NR_vm86:
9431         return do_vm86(cpu_env, arg1, arg2);
9432 #endif
9433 #endif
9434     case TARGET_NR_adjtimex:
9435         {
9436             struct timex host_buf;
9437 
9438             if (target_to_host_timex(&host_buf, arg1) != 0) {
9439                 return -TARGET_EFAULT;
9440             }
9441             ret = get_errno(adjtimex(&host_buf));
9442             if (!is_error(ret)) {
9443                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9444                     return -TARGET_EFAULT;
9445                 }
9446             }
9447         }
9448         return ret;
9449 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9450     case TARGET_NR_clock_adjtime:
9451         {
9452             struct timex htx, *phtx = &htx;
9453 
9454             if (target_to_host_timex(phtx, arg2) != 0) {
9455                 return -TARGET_EFAULT;
9456             }
9457             ret = get_errno(clock_adjtime(arg1, phtx));
9458             if (!is_error(ret) && phtx) {
9459                 if (host_to_target_timex(arg2, phtx) != 0) {
9460                     return -TARGET_EFAULT;
9461                 }
9462             }
9463         }
9464         return ret;
9465 #endif
9466     case TARGET_NR_getpgid:
9467         return get_errno(getpgid(arg1));
9468     case TARGET_NR_fchdir:
9469         return get_errno(fchdir(arg1));
9470     case TARGET_NR_personality:
9471         return get_errno(personality(arg1));
9472 #ifdef TARGET_NR__llseek /* Not on alpha */
9473     case TARGET_NR__llseek:
9474         {
9475             int64_t res;
9476 #if !defined(__NR_llseek)
9477             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9478             if (res == -1) {
9479                 ret = get_errno(res);
9480             } else {
9481                 ret = 0;
9482             }
9483 #else
9484             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9485 #endif
9486             if ((ret == 0) && put_user_s64(res, arg4)) {
9487                 return -TARGET_EFAULT;
9488             }
9489         }
9490         return ret;
9491 #endif
9492 #ifdef TARGET_NR_getdents
9493     case TARGET_NR_getdents:
9494 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9495 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9496         {
9497             struct target_dirent *target_dirp;
9498             struct linux_dirent *dirp;
9499             abi_long count = arg3;
9500 
9501             dirp = g_try_malloc(count);
9502             if (!dirp) {
9503                 return -TARGET_ENOMEM;
9504             }
9505 
9506             ret = get_errno(sys_getdents(arg1, dirp, count));
9507             if (!is_error(ret)) {
9508                 struct linux_dirent *de;
9509 		struct target_dirent *tde;
9510                 int len = ret;
9511                 int reclen, treclen;
9512 		int count1, tnamelen;
9513 
9514 		count1 = 0;
9515                 de = dirp;
9516                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9517                     return -TARGET_EFAULT;
9518 		tde = target_dirp;
9519                 while (len > 0) {
9520                     reclen = de->d_reclen;
9521                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9522                     assert(tnamelen >= 0);
9523                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9524                     assert(count1 + treclen <= count);
9525                     tde->d_reclen = tswap16(treclen);
9526                     tde->d_ino = tswapal(de->d_ino);
9527                     tde->d_off = tswapal(de->d_off);
9528                     memcpy(tde->d_name, de->d_name, tnamelen);
9529                     de = (struct linux_dirent *)((char *)de + reclen);
9530                     len -= reclen;
9531                     tde = (struct target_dirent *)((char *)tde + treclen);
9532 		    count1 += treclen;
9533                 }
9534 		ret = count1;
9535                 unlock_user(target_dirp, arg2, ret);
9536             }
9537             g_free(dirp);
9538         }
9539 #else
9540         {
9541             struct linux_dirent *dirp;
9542             abi_long count = arg3;
9543 
9544             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9545                 return -TARGET_EFAULT;
9546             ret = get_errno(sys_getdents(arg1, dirp, count));
9547             if (!is_error(ret)) {
9548                 struct linux_dirent *de;
9549                 int len = ret;
9550                 int reclen;
9551                 de = dirp;
9552                 while (len > 0) {
9553                     reclen = de->d_reclen;
9554                     if (reclen > len)
9555                         break;
9556                     de->d_reclen = tswap16(reclen);
9557                     tswapls(&de->d_ino);
9558                     tswapls(&de->d_off);
9559                     de = (struct linux_dirent *)((char *)de + reclen);
9560                     len -= reclen;
9561                 }
9562             }
9563             unlock_user(dirp, arg2, ret);
9564         }
9565 #endif
9566 #else
9567         /* Implement getdents in terms of getdents64 */
9568         {
9569             struct linux_dirent64 *dirp;
9570             abi_long count = arg3;
9571 
9572             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9573             if (!dirp) {
9574                 return -TARGET_EFAULT;
9575             }
9576             ret = get_errno(sys_getdents64(arg1, dirp, count));
9577             if (!is_error(ret)) {
9578                 /* Convert the dirent64 structs to target dirent.  We do this
9579                  * in-place, since we can guarantee that a target_dirent is no
9580                  * larger than a dirent64; however this means we have to be
9581                  * careful to read everything before writing in the new format.
9582                  */
9583                 struct linux_dirent64 *de;
9584                 struct target_dirent *tde;
9585                 int len = ret;
9586                 int tlen = 0;
9587 
9588                 de = dirp;
9589                 tde = (struct target_dirent *)dirp;
9590                 while (len > 0) {
9591                     int namelen, treclen;
9592                     int reclen = de->d_reclen;
9593                     uint64_t ino = de->d_ino;
9594                     int64_t off = de->d_off;
9595                     uint8_t type = de->d_type;
9596 
9597                     namelen = strlen(de->d_name);
9598                     treclen = offsetof(struct target_dirent, d_name)
9599                         + namelen + 2;
9600                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9601 
9602                     memmove(tde->d_name, de->d_name, namelen + 1);
9603                     tde->d_ino = tswapal(ino);
9604                     tde->d_off = tswapal(off);
9605                     tde->d_reclen = tswap16(treclen);
9606                     /* The target_dirent type is in what was formerly a padding
9607                      * byte at the end of the structure:
9608                      */
9609                     *(((char *)tde) + treclen - 1) = type;
9610 
9611                     de = (struct linux_dirent64 *)((char *)de + reclen);
9612                     tde = (struct target_dirent *)((char *)tde + treclen);
9613                     len -= reclen;
9614                     tlen += treclen;
9615                 }
9616                 ret = tlen;
9617             }
9618             unlock_user(dirp, arg2, ret);
9619         }
9620 #endif
9621         return ret;
9622 #endif /* TARGET_NR_getdents */
9623 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9624     case TARGET_NR_getdents64:
9625         {
9626             struct linux_dirent64 *dirp;
9627             abi_long count = arg3;
9628             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9629                 return -TARGET_EFAULT;
9630             ret = get_errno(sys_getdents64(arg1, dirp, count));
9631             if (!is_error(ret)) {
9632                 struct linux_dirent64 *de;
9633                 int len = ret;
9634                 int reclen;
9635                 de = dirp;
9636                 while (len > 0) {
9637                     reclen = de->d_reclen;
9638                     if (reclen > len)
9639                         break;
9640                     de->d_reclen = tswap16(reclen);
9641                     tswap64s((uint64_t *)&de->d_ino);
9642                     tswap64s((uint64_t *)&de->d_off);
9643                     de = (struct linux_dirent64 *)((char *)de + reclen);
9644                     len -= reclen;
9645                 }
9646             }
9647             unlock_user(dirp, arg2, ret);
9648         }
9649         return ret;
9650 #endif /* TARGET_NR_getdents64 */
9651 #if defined(TARGET_NR__newselect)
9652     case TARGET_NR__newselect:
9653         return do_select(arg1, arg2, arg3, arg4, arg5);
9654 #endif
9655 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9656 # ifdef TARGET_NR_poll
9657     case TARGET_NR_poll:
9658 # endif
9659 # ifdef TARGET_NR_ppoll
9660     case TARGET_NR_ppoll:
9661 # endif
9662         {
9663             struct target_pollfd *target_pfd;
9664             unsigned int nfds = arg2;
9665             struct pollfd *pfd;
9666             unsigned int i;
9667 
9668             pfd = NULL;
9669             target_pfd = NULL;
9670             if (nfds) {
9671                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9672                     return -TARGET_EINVAL;
9673                 }
9674 
9675                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9676                                        sizeof(struct target_pollfd) * nfds, 1);
9677                 if (!target_pfd) {
9678                     return -TARGET_EFAULT;
9679                 }
9680 
9681                 pfd = alloca(sizeof(struct pollfd) * nfds);
9682                 for (i = 0; i < nfds; i++) {
9683                     pfd[i].fd = tswap32(target_pfd[i].fd);
9684                     pfd[i].events = tswap16(target_pfd[i].events);
9685                 }
9686             }
9687 
9688             switch (num) {
9689 # ifdef TARGET_NR_ppoll
9690             case TARGET_NR_ppoll:
9691             {
9692                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9693                 target_sigset_t *target_set;
9694                 sigset_t _set, *set = &_set;
9695 
9696                 if (arg3) {
9697                     if (target_to_host_timespec(timeout_ts, arg3)) {
9698                         unlock_user(target_pfd, arg1, 0);
9699                         return -TARGET_EFAULT;
9700                     }
9701                 } else {
9702                     timeout_ts = NULL;
9703                 }
9704 
9705                 if (arg4) {
9706                     if (arg5 != sizeof(target_sigset_t)) {
9707                         unlock_user(target_pfd, arg1, 0);
9708                         return -TARGET_EINVAL;
9709                     }
9710 
9711                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9712                     if (!target_set) {
9713                         unlock_user(target_pfd, arg1, 0);
9714                         return -TARGET_EFAULT;
9715                     }
9716                     target_to_host_sigset(set, target_set);
9717                 } else {
9718                     set = NULL;
9719                 }
9720 
9721                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9722                                            set, SIGSET_T_SIZE));
9723 
9724                 if (!is_error(ret) && arg3) {
9725                     host_to_target_timespec(arg3, timeout_ts);
9726                 }
9727                 if (arg4) {
9728                     unlock_user(target_set, arg4, 0);
9729                 }
9730                 break;
9731             }
9732 # endif
9733 # ifdef TARGET_NR_poll
9734             case TARGET_NR_poll:
9735             {
9736                 struct timespec ts, *pts;
9737 
9738                 if (arg3 >= 0) {
9739                     /* Convert ms to secs, ns */
9740                     ts.tv_sec = arg3 / 1000;
9741                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9742                     pts = &ts;
9743                 } else {
9744                     /* -ve poll() timeout means "infinite" */
9745                     pts = NULL;
9746                 }
9747                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9748                 break;
9749             }
9750 # endif
9751             default:
9752                 g_assert_not_reached();
9753             }
9754 
9755             if (!is_error(ret)) {
9756                 for(i = 0; i < nfds; i++) {
9757                     target_pfd[i].revents = tswap16(pfd[i].revents);
9758                 }
9759             }
9760             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9761         }
9762         return ret;
9763 #endif
9764     case TARGET_NR_flock:
9765         /* NOTE: the flock constant seems to be the same for every
9766            Linux platform */
9767         return get_errno(safe_flock(arg1, arg2));
9768     case TARGET_NR_readv:
9769         {
9770             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9771             if (vec != NULL) {
9772                 ret = get_errno(safe_readv(arg1, vec, arg3));
9773                 unlock_iovec(vec, arg2, arg3, 1);
9774             } else {
9775                 ret = -host_to_target_errno(errno);
9776             }
9777         }
9778         return ret;
9779     case TARGET_NR_writev:
9780         {
9781             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9782             if (vec != NULL) {
9783                 ret = get_errno(safe_writev(arg1, vec, arg3));
9784                 unlock_iovec(vec, arg2, arg3, 0);
9785             } else {
9786                 ret = -host_to_target_errno(errno);
9787             }
9788         }
9789         return ret;
9790 #if defined(TARGET_NR_preadv)
9791     case TARGET_NR_preadv:
9792         {
9793             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9794             if (vec != NULL) {
9795                 unsigned long low, high;
9796 
9797                 target_to_host_low_high(arg4, arg5, &low, &high);
9798                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9799                 unlock_iovec(vec, arg2, arg3, 1);
9800             } else {
9801                 ret = -host_to_target_errno(errno);
9802            }
9803         }
9804         return ret;
9805 #endif
9806 #if defined(TARGET_NR_pwritev)
9807     case TARGET_NR_pwritev:
9808         {
9809             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9810             if (vec != NULL) {
9811                 unsigned long low, high;
9812 
9813                 target_to_host_low_high(arg4, arg5, &low, &high);
9814                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9815                 unlock_iovec(vec, arg2, arg3, 0);
9816             } else {
9817                 ret = -host_to_target_errno(errno);
9818            }
9819         }
9820         return ret;
9821 #endif
9822     case TARGET_NR_getsid:
9823         return get_errno(getsid(arg1));
9824 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9825     case TARGET_NR_fdatasync:
9826         return get_errno(fdatasync(arg1));
9827 #endif
9828 #ifdef TARGET_NR__sysctl
9829     case TARGET_NR__sysctl:
9830         /* We don't implement this, but ENOTDIR is always a safe
9831            return value. */
9832         return -TARGET_ENOTDIR;
9833 #endif
9834     case TARGET_NR_sched_getaffinity:
9835         {
9836             unsigned int mask_size;
9837             unsigned long *mask;
9838 
9839             /*
9840              * sched_getaffinity needs multiples of ulong, so need to take
9841              * care of mismatches between target ulong and host ulong sizes.
9842              */
9843             if (arg2 & (sizeof(abi_ulong) - 1)) {
9844                 return -TARGET_EINVAL;
9845             }
9846             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9847 
9848             mask = alloca(mask_size);
9849             memset(mask, 0, mask_size);
9850             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9851 
9852             if (!is_error(ret)) {
9853                 if (ret > arg2) {
9854                     /* More data returned than the caller's buffer will fit.
9855                      * This only happens if sizeof(abi_long) < sizeof(long)
9856                      * and the caller passed us a buffer holding an odd number
9857                      * of abi_longs. If the host kernel is actually using the
9858                      * extra 4 bytes then fail EINVAL; otherwise we can just
9859                      * ignore them and only copy the interesting part.
9860                      */
9861                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9862                     if (numcpus > arg2 * 8) {
9863                         return -TARGET_EINVAL;
9864                     }
9865                     ret = arg2;
9866                 }
9867 
9868                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9869                     return -TARGET_EFAULT;
9870                 }
9871             }
9872         }
9873         return ret;
9874     case TARGET_NR_sched_setaffinity:
9875         {
9876             unsigned int mask_size;
9877             unsigned long *mask;
9878 
9879             /*
9880              * sched_setaffinity needs multiples of ulong, so need to take
9881              * care of mismatches between target ulong and host ulong sizes.
9882              */
9883             if (arg2 & (sizeof(abi_ulong) - 1)) {
9884                 return -TARGET_EINVAL;
9885             }
9886             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9887             mask = alloca(mask_size);
9888 
9889             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9890             if (ret) {
9891                 return ret;
9892             }
9893 
9894             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9895         }
9896     case TARGET_NR_getcpu:
9897         {
9898             unsigned cpu, node;
9899             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9900                                        arg2 ? &node : NULL,
9901                                        NULL));
9902             if (is_error(ret)) {
9903                 return ret;
9904             }
9905             if (arg1 && put_user_u32(cpu, arg1)) {
9906                 return -TARGET_EFAULT;
9907             }
9908             if (arg2 && put_user_u32(node, arg2)) {
9909                 return -TARGET_EFAULT;
9910             }
9911         }
9912         return ret;
9913     case TARGET_NR_sched_setparam:
9914         {
9915             struct sched_param *target_schp;
9916             struct sched_param schp;
9917 
9918             if (arg2 == 0) {
9919                 return -TARGET_EINVAL;
9920             }
9921             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9922                 return -TARGET_EFAULT;
9923             schp.sched_priority = tswap32(target_schp->sched_priority);
9924             unlock_user_struct(target_schp, arg2, 0);
9925             return get_errno(sched_setparam(arg1, &schp));
9926         }
9927     case TARGET_NR_sched_getparam:
9928         {
9929             struct sched_param *target_schp;
9930             struct sched_param schp;
9931 
9932             if (arg2 == 0) {
9933                 return -TARGET_EINVAL;
9934             }
9935             ret = get_errno(sched_getparam(arg1, &schp));
9936             if (!is_error(ret)) {
9937                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9938                     return -TARGET_EFAULT;
9939                 target_schp->sched_priority = tswap32(schp.sched_priority);
9940                 unlock_user_struct(target_schp, arg2, 1);
9941             }
9942         }
9943         return ret;
9944     case TARGET_NR_sched_setscheduler:
9945         {
9946             struct sched_param *target_schp;
9947             struct sched_param schp;
9948             if (arg3 == 0) {
9949                 return -TARGET_EINVAL;
9950             }
9951             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9952                 return -TARGET_EFAULT;
9953             schp.sched_priority = tswap32(target_schp->sched_priority);
9954             unlock_user_struct(target_schp, arg3, 0);
9955             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9956         }
9957     case TARGET_NR_sched_getscheduler:
9958         return get_errno(sched_getscheduler(arg1));
9959     case TARGET_NR_sched_yield:
9960         return get_errno(sched_yield());
9961     case TARGET_NR_sched_get_priority_max:
9962         return get_errno(sched_get_priority_max(arg1));
9963     case TARGET_NR_sched_get_priority_min:
9964         return get_errno(sched_get_priority_min(arg1));
9965     case TARGET_NR_sched_rr_get_interval:
9966         {
9967             struct timespec ts;
9968             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9969             if (!is_error(ret)) {
9970                 ret = host_to_target_timespec(arg2, &ts);
9971             }
9972         }
9973         return ret;
9974     case TARGET_NR_nanosleep:
9975         {
9976             struct timespec req, rem;
9977             target_to_host_timespec(&req, arg1);
9978             ret = get_errno(safe_nanosleep(&req, &rem));
9979             if (is_error(ret) && arg2) {
9980                 host_to_target_timespec(arg2, &rem);
9981             }
9982         }
9983         return ret;
9984     case TARGET_NR_prctl:
9985         switch (arg1) {
9986         case PR_GET_PDEATHSIG:
9987         {
9988             int deathsig;
9989             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9990             if (!is_error(ret) && arg2
9991                 && put_user_ual(deathsig, arg2)) {
9992                 return -TARGET_EFAULT;
9993             }
9994             return ret;
9995         }
9996 #ifdef PR_GET_NAME
9997         case PR_GET_NAME:
9998         {
9999             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10000             if (!name) {
10001                 return -TARGET_EFAULT;
10002             }
10003             ret = get_errno(prctl(arg1, (unsigned long)name,
10004                                   arg3, arg4, arg5));
10005             unlock_user(name, arg2, 16);
10006             return ret;
10007         }
10008         case PR_SET_NAME:
10009         {
10010             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10011             if (!name) {
10012                 return -TARGET_EFAULT;
10013             }
10014             ret = get_errno(prctl(arg1, (unsigned long)name,
10015                                   arg3, arg4, arg5));
10016             unlock_user(name, arg2, 0);
10017             return ret;
10018         }
10019 #endif
10020 #ifdef TARGET_MIPS
10021         case TARGET_PR_GET_FP_MODE:
10022         {
10023             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10024             ret = 0;
10025             if (env->CP0_Status & (1 << CP0St_FR)) {
10026                 ret |= TARGET_PR_FP_MODE_FR;
10027             }
10028             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10029                 ret |= TARGET_PR_FP_MODE_FRE;
10030             }
10031             return ret;
10032         }
10033         case TARGET_PR_SET_FP_MODE:
10034         {
10035             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10036             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10037             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10038             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10039             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10040 
10041             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10042                                             TARGET_PR_FP_MODE_FRE;
10043 
10044             /* If nothing to change, return right away, successfully.  */
10045             if (old_fr == new_fr && old_fre == new_fre) {
10046                 return 0;
10047             }
10048             /* Check the value is valid */
10049             if (arg2 & ~known_bits) {
10050                 return -TARGET_EOPNOTSUPP;
10051             }
10052             /* Setting FRE without FR is not supported.  */
10053             if (new_fre && !new_fr) {
10054                 return -TARGET_EOPNOTSUPP;
10055             }
10056             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10057                 /* FR1 is not supported */
10058                 return -TARGET_EOPNOTSUPP;
10059             }
10060             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10061                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10062                 /* cannot set FR=0 */
10063                 return -TARGET_EOPNOTSUPP;
10064             }
10065             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10066                 /* Cannot set FRE=1 */
10067                 return -TARGET_EOPNOTSUPP;
10068             }
10069 
10070             int i;
10071             fpr_t *fpr = env->active_fpu.fpr;
10072             for (i = 0; i < 32 ; i += 2) {
10073                 if (!old_fr && new_fr) {
10074                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10075                 } else if (old_fr && !new_fr) {
10076                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10077                 }
10078             }
10079 
10080             if (new_fr) {
10081                 env->CP0_Status |= (1 << CP0St_FR);
10082                 env->hflags |= MIPS_HFLAG_F64;
10083             } else {
10084                 env->CP0_Status &= ~(1 << CP0St_FR);
10085                 env->hflags &= ~MIPS_HFLAG_F64;
10086             }
10087             if (new_fre) {
10088                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10089                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10090                     env->hflags |= MIPS_HFLAG_FRE;
10091                 }
10092             } else {
10093                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10094                 env->hflags &= ~MIPS_HFLAG_FRE;
10095             }
10096 
10097             return 0;
10098         }
10099 #endif /* MIPS */
10100 #ifdef TARGET_AARCH64
10101         case TARGET_PR_SVE_SET_VL:
10102             /*
10103              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10104              * PR_SVE_VL_INHERIT.  Note the kernel definition
10105              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10106              * even though the current architectural maximum is VQ=16.
10107              */
10108             ret = -TARGET_EINVAL;
10109             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10110                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10111                 CPUARMState *env = cpu_env;
10112                 ARMCPU *cpu = env_archcpu(env);
10113                 uint32_t vq, old_vq;
10114 
10115                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10116                 vq = MAX(arg2 / 16, 1);
10117                 vq = MIN(vq, cpu->sve_max_vq);
10118 
10119                 if (vq < old_vq) {
10120                     aarch64_sve_narrow_vq(env, vq);
10121                 }
10122                 env->vfp.zcr_el[1] = vq - 1;
10123                 arm_rebuild_hflags(env);
10124                 ret = vq * 16;
10125             }
10126             return ret;
10127         case TARGET_PR_SVE_GET_VL:
10128             ret = -TARGET_EINVAL;
10129             {
10130                 ARMCPU *cpu = env_archcpu(cpu_env);
10131                 if (cpu_isar_feature(aa64_sve, cpu)) {
10132                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10133                 }
10134             }
10135             return ret;
10136         case TARGET_PR_PAC_RESET_KEYS:
10137             {
10138                 CPUARMState *env = cpu_env;
10139                 ARMCPU *cpu = env_archcpu(env);
10140 
10141                 if (arg3 || arg4 || arg5) {
10142                     return -TARGET_EINVAL;
10143                 }
10144                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10145                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10146                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10147                                TARGET_PR_PAC_APGAKEY);
10148                     int ret = 0;
10149                     Error *err = NULL;
10150 
10151                     if (arg2 == 0) {
10152                         arg2 = all;
10153                     } else if (arg2 & ~all) {
10154                         return -TARGET_EINVAL;
10155                     }
10156                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10157                         ret |= qemu_guest_getrandom(&env->keys.apia,
10158                                                     sizeof(ARMPACKey), &err);
10159                     }
10160                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10161                         ret |= qemu_guest_getrandom(&env->keys.apib,
10162                                                     sizeof(ARMPACKey), &err);
10163                     }
10164                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10165                         ret |= qemu_guest_getrandom(&env->keys.apda,
10166                                                     sizeof(ARMPACKey), &err);
10167                     }
10168                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10169                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10170                                                     sizeof(ARMPACKey), &err);
10171                     }
10172                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10173                         ret |= qemu_guest_getrandom(&env->keys.apga,
10174                                                     sizeof(ARMPACKey), &err);
10175                     }
10176                     if (ret != 0) {
10177                         /*
10178                          * Some unknown failure in the crypto.  The best
10179                          * we can do is log it and fail the syscall.
10180                          * The real syscall cannot fail this way.
10181                          */
10182                         qemu_log_mask(LOG_UNIMP,
10183                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10184                                       error_get_pretty(err));
10185                         error_free(err);
10186                         return -TARGET_EIO;
10187                     }
10188                     return 0;
10189                 }
10190             }
10191             return -TARGET_EINVAL;
10192 #endif /* AARCH64 */
10193         case PR_GET_SECCOMP:
10194         case PR_SET_SECCOMP:
10195             /* Disable seccomp to prevent the target disabling syscalls we
10196              * need. */
10197             return -TARGET_EINVAL;
10198         default:
10199             /* Most prctl options have no pointer arguments */
10200             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10201         }
10202         break;
10203 #ifdef TARGET_NR_arch_prctl
10204     case TARGET_NR_arch_prctl:
10205 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10206         return do_arch_prctl(cpu_env, arg1, arg2);
10207 #else
10208 #error unreachable
10209 #endif
10210 #endif
10211 #ifdef TARGET_NR_pread64
10212     case TARGET_NR_pread64:
10213         if (regpairs_aligned(cpu_env, num)) {
10214             arg4 = arg5;
10215             arg5 = arg6;
10216         }
10217         if (arg2 == 0 && arg3 == 0) {
10218             /* Special-case NULL buffer and zero length, which should succeed */
10219             p = 0;
10220         } else {
10221             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10222             if (!p) {
10223                 return -TARGET_EFAULT;
10224             }
10225         }
10226         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10227         unlock_user(p, arg2, ret);
10228         return ret;
10229     case TARGET_NR_pwrite64:
10230         if (regpairs_aligned(cpu_env, num)) {
10231             arg4 = arg5;
10232             arg5 = arg6;
10233         }
10234         if (arg2 == 0 && arg3 == 0) {
10235             /* Special-case NULL buffer and zero length, which should succeed */
10236             p = 0;
10237         } else {
10238             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10239             if (!p) {
10240                 return -TARGET_EFAULT;
10241             }
10242         }
10243         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10244         unlock_user(p, arg2, 0);
10245         return ret;
10246 #endif
10247     case TARGET_NR_getcwd:
10248         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10249             return -TARGET_EFAULT;
10250         ret = get_errno(sys_getcwd1(p, arg2));
10251         unlock_user(p, arg1, ret);
10252         return ret;
10253     case TARGET_NR_capget:
10254     case TARGET_NR_capset:
10255     {
10256         struct target_user_cap_header *target_header;
10257         struct target_user_cap_data *target_data = NULL;
10258         struct __user_cap_header_struct header;
10259         struct __user_cap_data_struct data[2];
10260         struct __user_cap_data_struct *dataptr = NULL;
10261         int i, target_datalen;
10262         int data_items = 1;
10263 
10264         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10265             return -TARGET_EFAULT;
10266         }
10267         header.version = tswap32(target_header->version);
10268         header.pid = tswap32(target_header->pid);
10269 
10270         if (header.version != _LINUX_CAPABILITY_VERSION) {
10271             /* Version 2 and up takes pointer to two user_data structs */
10272             data_items = 2;
10273         }
10274 
10275         target_datalen = sizeof(*target_data) * data_items;
10276 
10277         if (arg2) {
10278             if (num == TARGET_NR_capget) {
10279                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10280             } else {
10281                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10282             }
10283             if (!target_data) {
10284                 unlock_user_struct(target_header, arg1, 0);
10285                 return -TARGET_EFAULT;
10286             }
10287 
10288             if (num == TARGET_NR_capset) {
10289                 for (i = 0; i < data_items; i++) {
10290                     data[i].effective = tswap32(target_data[i].effective);
10291                     data[i].permitted = tswap32(target_data[i].permitted);
10292                     data[i].inheritable = tswap32(target_data[i].inheritable);
10293                 }
10294             }
10295 
10296             dataptr = data;
10297         }
10298 
10299         if (num == TARGET_NR_capget) {
10300             ret = get_errno(capget(&header, dataptr));
10301         } else {
10302             ret = get_errno(capset(&header, dataptr));
10303         }
10304 
10305         /* The kernel always updates version for both capget and capset */
10306         target_header->version = tswap32(header.version);
10307         unlock_user_struct(target_header, arg1, 1);
10308 
10309         if (arg2) {
10310             if (num == TARGET_NR_capget) {
10311                 for (i = 0; i < data_items; i++) {
10312                     target_data[i].effective = tswap32(data[i].effective);
10313                     target_data[i].permitted = tswap32(data[i].permitted);
10314                     target_data[i].inheritable = tswap32(data[i].inheritable);
10315                 }
10316                 unlock_user(target_data, arg2, target_datalen);
10317             } else {
10318                 unlock_user(target_data, arg2, 0);
10319             }
10320         }
10321         return ret;
10322     }
10323     case TARGET_NR_sigaltstack:
10324         return do_sigaltstack(arg1, arg2,
10325                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10326 
10327 #ifdef CONFIG_SENDFILE
10328 #ifdef TARGET_NR_sendfile
10329     case TARGET_NR_sendfile:
10330     {
10331         off_t *offp = NULL;
10332         off_t off;
10333         if (arg3) {
10334             ret = get_user_sal(off, arg3);
10335             if (is_error(ret)) {
10336                 return ret;
10337             }
10338             offp = &off;
10339         }
10340         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10341         if (!is_error(ret) && arg3) {
10342             abi_long ret2 = put_user_sal(off, arg3);
10343             if (is_error(ret2)) {
10344                 ret = ret2;
10345             }
10346         }
10347         return ret;
10348     }
10349 #endif
10350 #ifdef TARGET_NR_sendfile64
10351     case TARGET_NR_sendfile64:
10352     {
10353         off_t *offp = NULL;
10354         off_t off;
10355         if (arg3) {
10356             ret = get_user_s64(off, arg3);
10357             if (is_error(ret)) {
10358                 return ret;
10359             }
10360             offp = &off;
10361         }
10362         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10363         if (!is_error(ret) && arg3) {
10364             abi_long ret2 = put_user_s64(off, arg3);
10365             if (is_error(ret2)) {
10366                 ret = ret2;
10367             }
10368         }
10369         return ret;
10370     }
10371 #endif
10372 #endif
10373 #ifdef TARGET_NR_vfork
10374     case TARGET_NR_vfork:
10375         return get_errno(do_fork(cpu_env,
10376                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10377                          0, 0, 0, 0));
10378 #endif
10379 #ifdef TARGET_NR_ugetrlimit
10380     case TARGET_NR_ugetrlimit:
10381     {
10382 	struct rlimit rlim;
10383 	int resource = target_to_host_resource(arg1);
10384 	ret = get_errno(getrlimit(resource, &rlim));
10385 	if (!is_error(ret)) {
10386 	    struct target_rlimit *target_rlim;
10387             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10388                 return -TARGET_EFAULT;
10389 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10390 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10391             unlock_user_struct(target_rlim, arg2, 1);
10392 	}
10393         return ret;
10394     }
10395 #endif
10396 #ifdef TARGET_NR_truncate64
10397     case TARGET_NR_truncate64:
10398         if (!(p = lock_user_string(arg1)))
10399             return -TARGET_EFAULT;
10400 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10401         unlock_user(p, arg1, 0);
10402         return ret;
10403 #endif
10404 #ifdef TARGET_NR_ftruncate64
10405     case TARGET_NR_ftruncate64:
10406         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10407 #endif
10408 #ifdef TARGET_NR_stat64
10409     case TARGET_NR_stat64:
10410         if (!(p = lock_user_string(arg1))) {
10411             return -TARGET_EFAULT;
10412         }
10413         ret = get_errno(stat(path(p), &st));
10414         unlock_user(p, arg1, 0);
10415         if (!is_error(ret))
10416             ret = host_to_target_stat64(cpu_env, arg2, &st);
10417         return ret;
10418 #endif
10419 #ifdef TARGET_NR_lstat64
10420     case TARGET_NR_lstat64:
10421         if (!(p = lock_user_string(arg1))) {
10422             return -TARGET_EFAULT;
10423         }
10424         ret = get_errno(lstat(path(p), &st));
10425         unlock_user(p, arg1, 0);
10426         if (!is_error(ret))
10427             ret = host_to_target_stat64(cpu_env, arg2, &st);
10428         return ret;
10429 #endif
10430 #ifdef TARGET_NR_fstat64
10431     case TARGET_NR_fstat64:
10432         ret = get_errno(fstat(arg1, &st));
10433         if (!is_error(ret))
10434             ret = host_to_target_stat64(cpu_env, arg2, &st);
10435         return ret;
10436 #endif
10437 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10438 #ifdef TARGET_NR_fstatat64
10439     case TARGET_NR_fstatat64:
10440 #endif
10441 #ifdef TARGET_NR_newfstatat
10442     case TARGET_NR_newfstatat:
10443 #endif
10444         if (!(p = lock_user_string(arg2))) {
10445             return -TARGET_EFAULT;
10446         }
10447         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10448         unlock_user(p, arg2, 0);
10449         if (!is_error(ret))
10450             ret = host_to_target_stat64(cpu_env, arg3, &st);
10451         return ret;
10452 #endif
10453 #if defined(TARGET_NR_statx)
10454     case TARGET_NR_statx:
10455         {
10456             struct target_statx *target_stx;
10457             int dirfd = arg1;
10458             int flags = arg3;
10459 
10460             p = lock_user_string(arg2);
10461             if (p == NULL) {
10462                 return -TARGET_EFAULT;
10463             }
10464 #if defined(__NR_statx)
10465             {
10466                 /*
10467                  * It is assumed that struct statx is architecture independent.
10468                  */
10469                 struct target_statx host_stx;
10470                 int mask = arg4;
10471 
10472                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10473                 if (!is_error(ret)) {
10474                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10475                         unlock_user(p, arg2, 0);
10476                         return -TARGET_EFAULT;
10477                     }
10478                 }
10479 
10480                 if (ret != -TARGET_ENOSYS) {
10481                     unlock_user(p, arg2, 0);
10482                     return ret;
10483                 }
10484             }
10485 #endif
10486             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10487             unlock_user(p, arg2, 0);
10488 
10489             if (!is_error(ret)) {
10490                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10491                     return -TARGET_EFAULT;
10492                 }
10493                 memset(target_stx, 0, sizeof(*target_stx));
10494                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10495                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10496                 __put_user(st.st_ino, &target_stx->stx_ino);
10497                 __put_user(st.st_mode, &target_stx->stx_mode);
10498                 __put_user(st.st_uid, &target_stx->stx_uid);
10499                 __put_user(st.st_gid, &target_stx->stx_gid);
10500                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10501                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10502                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10503                 __put_user(st.st_size, &target_stx->stx_size);
10504                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10505                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10506                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10507                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10508                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10509                 unlock_user_struct(target_stx, arg5, 1);
10510             }
10511         }
10512         return ret;
10513 #endif
10514 #ifdef TARGET_NR_lchown
10515     case TARGET_NR_lchown:
10516         if (!(p = lock_user_string(arg1)))
10517             return -TARGET_EFAULT;
10518         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10519         unlock_user(p, arg1, 0);
10520         return ret;
10521 #endif
10522 #ifdef TARGET_NR_getuid
10523     case TARGET_NR_getuid:
10524         return get_errno(high2lowuid(getuid()));
10525 #endif
10526 #ifdef TARGET_NR_getgid
10527     case TARGET_NR_getgid:
10528         return get_errno(high2lowgid(getgid()));
10529 #endif
10530 #ifdef TARGET_NR_geteuid
10531     case TARGET_NR_geteuid:
10532         return get_errno(high2lowuid(geteuid()));
10533 #endif
10534 #ifdef TARGET_NR_getegid
10535     case TARGET_NR_getegid:
10536         return get_errno(high2lowgid(getegid()));
10537 #endif
10538     case TARGET_NR_setreuid:
10539         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10540     case TARGET_NR_setregid:
10541         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10542     case TARGET_NR_getgroups:
10543         {
10544             int gidsetsize = arg1;
10545             target_id *target_grouplist;
10546             gid_t *grouplist;
10547             int i;
10548 
10549             grouplist = alloca(gidsetsize * sizeof(gid_t));
10550             ret = get_errno(getgroups(gidsetsize, grouplist));
10551             if (gidsetsize == 0)
10552                 return ret;
10553             if (!is_error(ret)) {
10554                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10555                 if (!target_grouplist)
10556                     return -TARGET_EFAULT;
10557                 for(i = 0;i < ret; i++)
10558                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10559                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10560             }
10561         }
10562         return ret;
10563     case TARGET_NR_setgroups:
10564         {
10565             int gidsetsize = arg1;
10566             target_id *target_grouplist;
10567             gid_t *grouplist = NULL;
10568             int i;
10569             if (gidsetsize) {
10570                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10571                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10572                 if (!target_grouplist) {
10573                     return -TARGET_EFAULT;
10574                 }
10575                 for (i = 0; i < gidsetsize; i++) {
10576                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10577                 }
10578                 unlock_user(target_grouplist, arg2, 0);
10579             }
10580             return get_errno(setgroups(gidsetsize, grouplist));
10581         }
10582     case TARGET_NR_fchown:
10583         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10584 #if defined(TARGET_NR_fchownat)
10585     case TARGET_NR_fchownat:
10586         if (!(p = lock_user_string(arg2)))
10587             return -TARGET_EFAULT;
10588         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10589                                  low2highgid(arg4), arg5));
10590         unlock_user(p, arg2, 0);
10591         return ret;
10592 #endif
10593 #ifdef TARGET_NR_setresuid
10594     case TARGET_NR_setresuid:
10595         return get_errno(sys_setresuid(low2highuid(arg1),
10596                                        low2highuid(arg2),
10597                                        low2highuid(arg3)));
10598 #endif
10599 #ifdef TARGET_NR_getresuid
10600     case TARGET_NR_getresuid:
10601         {
10602             uid_t ruid, euid, suid;
10603             ret = get_errno(getresuid(&ruid, &euid, &suid));
10604             if (!is_error(ret)) {
10605                 if (put_user_id(high2lowuid(ruid), arg1)
10606                     || put_user_id(high2lowuid(euid), arg2)
10607                     || put_user_id(high2lowuid(suid), arg3))
10608                     return -TARGET_EFAULT;
10609             }
10610         }
10611         return ret;
10612 #endif
10613 #ifdef TARGET_NR_getresgid
10614     case TARGET_NR_setresgid:
10615         return get_errno(sys_setresgid(low2highgid(arg1),
10616                                        low2highgid(arg2),
10617                                        low2highgid(arg3)));
10618 #endif
10619 #ifdef TARGET_NR_getresgid
10620     case TARGET_NR_getresgid:
10621         {
10622             gid_t rgid, egid, sgid;
10623             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10624             if (!is_error(ret)) {
10625                 if (put_user_id(high2lowgid(rgid), arg1)
10626                     || put_user_id(high2lowgid(egid), arg2)
10627                     || put_user_id(high2lowgid(sgid), arg3))
10628                     return -TARGET_EFAULT;
10629             }
10630         }
10631         return ret;
10632 #endif
10633 #ifdef TARGET_NR_chown
10634     case TARGET_NR_chown:
10635         if (!(p = lock_user_string(arg1)))
10636             return -TARGET_EFAULT;
10637         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10638         unlock_user(p, arg1, 0);
10639         return ret;
10640 #endif
10641     case TARGET_NR_setuid:
10642         return get_errno(sys_setuid(low2highuid(arg1)));
10643     case TARGET_NR_setgid:
10644         return get_errno(sys_setgid(low2highgid(arg1)));
10645     case TARGET_NR_setfsuid:
10646         return get_errno(setfsuid(arg1));
10647     case TARGET_NR_setfsgid:
10648         return get_errno(setfsgid(arg1));
10649 
10650 #ifdef TARGET_NR_lchown32
10651     case TARGET_NR_lchown32:
10652         if (!(p = lock_user_string(arg1)))
10653             return -TARGET_EFAULT;
10654         ret = get_errno(lchown(p, arg2, arg3));
10655         unlock_user(p, arg1, 0);
10656         return ret;
10657 #endif
10658 #ifdef TARGET_NR_getuid32
10659     case TARGET_NR_getuid32:
10660         return get_errno(getuid());
10661 #endif
10662 
10663 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10664    /* Alpha specific */
10665     case TARGET_NR_getxuid:
10666          {
10667             uid_t euid;
10668             euid=geteuid();
10669             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10670          }
10671         return get_errno(getuid());
10672 #endif
10673 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10674    /* Alpha specific */
10675     case TARGET_NR_getxgid:
10676          {
10677             uid_t egid;
10678             egid=getegid();
10679             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10680          }
10681         return get_errno(getgid());
10682 #endif
10683 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10684     /* Alpha specific */
10685     case TARGET_NR_osf_getsysinfo:
10686         ret = -TARGET_EOPNOTSUPP;
10687         switch (arg1) {
10688           case TARGET_GSI_IEEE_FP_CONTROL:
10689             {
10690                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10691                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10692 
10693                 swcr &= ~SWCR_STATUS_MASK;
10694                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10695 
10696                 if (put_user_u64 (swcr, arg2))
10697                         return -TARGET_EFAULT;
10698                 ret = 0;
10699             }
10700             break;
10701 
10702           /* case GSI_IEEE_STATE_AT_SIGNAL:
10703              -- Not implemented in linux kernel.
10704              case GSI_UACPROC:
10705              -- Retrieves current unaligned access state; not much used.
10706              case GSI_PROC_TYPE:
10707              -- Retrieves implver information; surely not used.
10708              case GSI_GET_HWRPB:
10709              -- Grabs a copy of the HWRPB; surely not used.
10710           */
10711         }
10712         return ret;
10713 #endif
10714 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10715     /* Alpha specific */
10716     case TARGET_NR_osf_setsysinfo:
10717         ret = -TARGET_EOPNOTSUPP;
10718         switch (arg1) {
10719           case TARGET_SSI_IEEE_FP_CONTROL:
10720             {
10721                 uint64_t swcr, fpcr;
10722 
10723                 if (get_user_u64 (swcr, arg2)) {
10724                     return -TARGET_EFAULT;
10725                 }
10726 
10727                 /*
10728                  * The kernel calls swcr_update_status to update the
10729                  * status bits from the fpcr at every point that it
10730                  * could be queried.  Therefore, we store the status
10731                  * bits only in FPCR.
10732                  */
10733                 ((CPUAlphaState *)cpu_env)->swcr
10734                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10735 
10736                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10737                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10738                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10739                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10740                 ret = 0;
10741             }
10742             break;
10743 
10744           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10745             {
10746                 uint64_t exc, fpcr, fex;
10747 
10748                 if (get_user_u64(exc, arg2)) {
10749                     return -TARGET_EFAULT;
10750                 }
10751                 exc &= SWCR_STATUS_MASK;
10752                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10753 
10754                 /* Old exceptions are not signaled.  */
10755                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10756                 fex = exc & ~fex;
10757                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10758                 fex &= ((CPUArchState *)cpu_env)->swcr;
10759 
10760                 /* Update the hardware fpcr.  */
10761                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10762                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10763 
10764                 if (fex) {
10765                     int si_code = TARGET_FPE_FLTUNK;
10766                     target_siginfo_t info;
10767 
10768                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10769                         si_code = TARGET_FPE_FLTUND;
10770                     }
10771                     if (fex & SWCR_TRAP_ENABLE_INE) {
10772                         si_code = TARGET_FPE_FLTRES;
10773                     }
10774                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10775                         si_code = TARGET_FPE_FLTUND;
10776                     }
10777                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10778                         si_code = TARGET_FPE_FLTOVF;
10779                     }
10780                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10781                         si_code = TARGET_FPE_FLTDIV;
10782                     }
10783                     if (fex & SWCR_TRAP_ENABLE_INV) {
10784                         si_code = TARGET_FPE_FLTINV;
10785                     }
10786 
10787                     info.si_signo = SIGFPE;
10788                     info.si_errno = 0;
10789                     info.si_code = si_code;
10790                     info._sifields._sigfault._addr
10791                         = ((CPUArchState *)cpu_env)->pc;
10792                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10793                                  QEMU_SI_FAULT, &info);
10794                 }
10795                 ret = 0;
10796             }
10797             break;
10798 
10799           /* case SSI_NVPAIRS:
10800              -- Used with SSIN_UACPROC to enable unaligned accesses.
10801              case SSI_IEEE_STATE_AT_SIGNAL:
10802              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10803              -- Not implemented in linux kernel
10804           */
10805         }
10806         return ret;
10807 #endif
10808 #ifdef TARGET_NR_osf_sigprocmask
10809     /* Alpha specific.  */
10810     case TARGET_NR_osf_sigprocmask:
10811         {
10812             abi_ulong mask;
10813             int how;
10814             sigset_t set, oldset;
10815 
10816             switch(arg1) {
10817             case TARGET_SIG_BLOCK:
10818                 how = SIG_BLOCK;
10819                 break;
10820             case TARGET_SIG_UNBLOCK:
10821                 how = SIG_UNBLOCK;
10822                 break;
10823             case TARGET_SIG_SETMASK:
10824                 how = SIG_SETMASK;
10825                 break;
10826             default:
10827                 return -TARGET_EINVAL;
10828             }
10829             mask = arg2;
10830             target_to_host_old_sigset(&set, &mask);
10831             ret = do_sigprocmask(how, &set, &oldset);
10832             if (!ret) {
10833                 host_to_target_old_sigset(&mask, &oldset);
10834                 ret = mask;
10835             }
10836         }
10837         return ret;
10838 #endif
10839 
10840 #ifdef TARGET_NR_getgid32
10841     case TARGET_NR_getgid32:
10842         return get_errno(getgid());
10843 #endif
10844 #ifdef TARGET_NR_geteuid32
10845     case TARGET_NR_geteuid32:
10846         return get_errno(geteuid());
10847 #endif
10848 #ifdef TARGET_NR_getegid32
10849     case TARGET_NR_getegid32:
10850         return get_errno(getegid());
10851 #endif
10852 #ifdef TARGET_NR_setreuid32
10853     case TARGET_NR_setreuid32:
10854         return get_errno(setreuid(arg1, arg2));
10855 #endif
10856 #ifdef TARGET_NR_setregid32
10857     case TARGET_NR_setregid32:
10858         return get_errno(setregid(arg1, arg2));
10859 #endif
10860 #ifdef TARGET_NR_getgroups32
10861     case TARGET_NR_getgroups32:
10862         {
10863             int gidsetsize = arg1;
10864             uint32_t *target_grouplist;
10865             gid_t *grouplist;
10866             int i;
10867 
10868             grouplist = alloca(gidsetsize * sizeof(gid_t));
10869             ret = get_errno(getgroups(gidsetsize, grouplist));
10870             if (gidsetsize == 0)
10871                 return ret;
10872             if (!is_error(ret)) {
10873                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10874                 if (!target_grouplist) {
10875                     return -TARGET_EFAULT;
10876                 }
10877                 for(i = 0;i < ret; i++)
10878                     target_grouplist[i] = tswap32(grouplist[i]);
10879                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10880             }
10881         }
10882         return ret;
10883 #endif
10884 #ifdef TARGET_NR_setgroups32
10885     case TARGET_NR_setgroups32:
10886         {
10887             int gidsetsize = arg1;
10888             uint32_t *target_grouplist;
10889             gid_t *grouplist;
10890             int i;
10891 
10892             grouplist = alloca(gidsetsize * sizeof(gid_t));
10893             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10894             if (!target_grouplist) {
10895                 return -TARGET_EFAULT;
10896             }
10897             for(i = 0;i < gidsetsize; i++)
10898                 grouplist[i] = tswap32(target_grouplist[i]);
10899             unlock_user(target_grouplist, arg2, 0);
10900             return get_errno(setgroups(gidsetsize, grouplist));
10901         }
10902 #endif
10903 #ifdef TARGET_NR_fchown32
10904     case TARGET_NR_fchown32:
10905         return get_errno(fchown(arg1, arg2, arg3));
10906 #endif
10907 #ifdef TARGET_NR_setresuid32
10908     case TARGET_NR_setresuid32:
10909         return get_errno(sys_setresuid(arg1, arg2, arg3));
10910 #endif
10911 #ifdef TARGET_NR_getresuid32
10912     case TARGET_NR_getresuid32:
10913         {
10914             uid_t ruid, euid, suid;
10915             ret = get_errno(getresuid(&ruid, &euid, &suid));
10916             if (!is_error(ret)) {
10917                 if (put_user_u32(ruid, arg1)
10918                     || put_user_u32(euid, arg2)
10919                     || put_user_u32(suid, arg3))
10920                     return -TARGET_EFAULT;
10921             }
10922         }
10923         return ret;
10924 #endif
10925 #ifdef TARGET_NR_setresgid32
10926     case TARGET_NR_setresgid32:
10927         return get_errno(sys_setresgid(arg1, arg2, arg3));
10928 #endif
10929 #ifdef TARGET_NR_getresgid32
10930     case TARGET_NR_getresgid32:
10931         {
10932             gid_t rgid, egid, sgid;
10933             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10934             if (!is_error(ret)) {
10935                 if (put_user_u32(rgid, arg1)
10936                     || put_user_u32(egid, arg2)
10937                     || put_user_u32(sgid, arg3))
10938                     return -TARGET_EFAULT;
10939             }
10940         }
10941         return ret;
10942 #endif
10943 #ifdef TARGET_NR_chown32
10944     case TARGET_NR_chown32:
10945         if (!(p = lock_user_string(arg1)))
10946             return -TARGET_EFAULT;
10947         ret = get_errno(chown(p, arg2, arg3));
10948         unlock_user(p, arg1, 0);
10949         return ret;
10950 #endif
10951 #ifdef TARGET_NR_setuid32
10952     case TARGET_NR_setuid32:
10953         return get_errno(sys_setuid(arg1));
10954 #endif
10955 #ifdef TARGET_NR_setgid32
10956     case TARGET_NR_setgid32:
10957         return get_errno(sys_setgid(arg1));
10958 #endif
10959 #ifdef TARGET_NR_setfsuid32
10960     case TARGET_NR_setfsuid32:
10961         return get_errno(setfsuid(arg1));
10962 #endif
10963 #ifdef TARGET_NR_setfsgid32
10964     case TARGET_NR_setfsgid32:
10965         return get_errno(setfsgid(arg1));
10966 #endif
10967 #ifdef TARGET_NR_mincore
10968     case TARGET_NR_mincore:
10969         {
10970             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10971             if (!a) {
10972                 return -TARGET_ENOMEM;
10973             }
10974             p = lock_user_string(arg3);
10975             if (!p) {
10976                 ret = -TARGET_EFAULT;
10977             } else {
10978                 ret = get_errno(mincore(a, arg2, p));
10979                 unlock_user(p, arg3, ret);
10980             }
10981             unlock_user(a, arg1, 0);
10982         }
10983         return ret;
10984 #endif
10985 #ifdef TARGET_NR_arm_fadvise64_64
10986     case TARGET_NR_arm_fadvise64_64:
10987         /* arm_fadvise64_64 looks like fadvise64_64 but
10988          * with different argument order: fd, advice, offset, len
10989          * rather than the usual fd, offset, len, advice.
10990          * Note that offset and len are both 64-bit so appear as
10991          * pairs of 32-bit registers.
10992          */
10993         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10994                             target_offset64(arg5, arg6), arg2);
10995         return -host_to_target_errno(ret);
10996 #endif
10997 
10998 #if TARGET_ABI_BITS == 32
10999 
11000 #ifdef TARGET_NR_fadvise64_64
11001     case TARGET_NR_fadvise64_64:
11002 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11003         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11004         ret = arg2;
11005         arg2 = arg3;
11006         arg3 = arg4;
11007         arg4 = arg5;
11008         arg5 = arg6;
11009         arg6 = ret;
11010 #else
11011         /* 6 args: fd, offset (high, low), len (high, low), advice */
11012         if (regpairs_aligned(cpu_env, num)) {
11013             /* offset is in (3,4), len in (5,6) and advice in 7 */
11014             arg2 = arg3;
11015             arg3 = arg4;
11016             arg4 = arg5;
11017             arg5 = arg6;
11018             arg6 = arg7;
11019         }
11020 #endif
11021         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11022                             target_offset64(arg4, arg5), arg6);
11023         return -host_to_target_errno(ret);
11024 #endif
11025 
11026 #ifdef TARGET_NR_fadvise64
11027     case TARGET_NR_fadvise64:
11028         /* 5 args: fd, offset (high, low), len, advice */
11029         if (regpairs_aligned(cpu_env, num)) {
11030             /* offset is in (3,4), len in 5 and advice in 6 */
11031             arg2 = arg3;
11032             arg3 = arg4;
11033             arg4 = arg5;
11034             arg5 = arg6;
11035         }
11036         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11037         return -host_to_target_errno(ret);
11038 #endif
11039 
11040 #else /* not a 32-bit ABI */
11041 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11042 #ifdef TARGET_NR_fadvise64_64
11043     case TARGET_NR_fadvise64_64:
11044 #endif
11045 #ifdef TARGET_NR_fadvise64
11046     case TARGET_NR_fadvise64:
11047 #endif
11048 #ifdef TARGET_S390X
11049         switch (arg4) {
11050         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11051         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11052         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11053         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11054         default: break;
11055         }
11056 #endif
11057         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11058 #endif
11059 #endif /* end of 64-bit ABI fadvise handling */
11060 
11061 #ifdef TARGET_NR_madvise
11062     case TARGET_NR_madvise:
11063         /* A straight passthrough may not be safe because qemu sometimes
11064            turns private file-backed mappings into anonymous mappings.
11065            This will break MADV_DONTNEED.
11066            This is a hint, so ignoring and returning success is ok.  */
11067         return 0;
11068 #endif
11069 #if TARGET_ABI_BITS == 32
11070     case TARGET_NR_fcntl64:
11071     {
11072 	int cmd;
11073 	struct flock64 fl;
11074         from_flock64_fn *copyfrom = copy_from_user_flock64;
11075         to_flock64_fn *copyto = copy_to_user_flock64;
11076 
11077 #ifdef TARGET_ARM
11078         if (!((CPUARMState *)cpu_env)->eabi) {
11079             copyfrom = copy_from_user_oabi_flock64;
11080             copyto = copy_to_user_oabi_flock64;
11081         }
11082 #endif
11083 
11084 	cmd = target_to_host_fcntl_cmd(arg2);
11085         if (cmd == -TARGET_EINVAL) {
11086             return cmd;
11087         }
11088 
11089         switch(arg2) {
11090         case TARGET_F_GETLK64:
11091             ret = copyfrom(&fl, arg3);
11092             if (ret) {
11093                 break;
11094             }
11095             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11096             if (ret == 0) {
11097                 ret = copyto(arg3, &fl);
11098             }
11099 	    break;
11100 
11101         case TARGET_F_SETLK64:
11102         case TARGET_F_SETLKW64:
11103             ret = copyfrom(&fl, arg3);
11104             if (ret) {
11105                 break;
11106             }
11107             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11108 	    break;
11109         default:
11110             ret = do_fcntl(arg1, arg2, arg3);
11111             break;
11112         }
11113         return ret;
11114     }
11115 #endif
11116 #ifdef TARGET_NR_cacheflush
11117     case TARGET_NR_cacheflush:
11118         /* self-modifying code is handled automatically, so nothing needed */
11119         return 0;
11120 #endif
11121 #ifdef TARGET_NR_getpagesize
11122     case TARGET_NR_getpagesize:
11123         return TARGET_PAGE_SIZE;
11124 #endif
11125     case TARGET_NR_gettid:
11126         return get_errno(sys_gettid());
11127 #ifdef TARGET_NR_readahead
11128     case TARGET_NR_readahead:
11129 #if TARGET_ABI_BITS == 32
11130         if (regpairs_aligned(cpu_env, num)) {
11131             arg2 = arg3;
11132             arg3 = arg4;
11133             arg4 = arg5;
11134         }
11135         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11136 #else
11137         ret = get_errno(readahead(arg1, arg2, arg3));
11138 #endif
11139         return ret;
11140 #endif
11141 #ifdef CONFIG_ATTR
11142 #ifdef TARGET_NR_setxattr
11143     case TARGET_NR_listxattr:
11144     case TARGET_NR_llistxattr:
11145     {
11146         void *p, *b = 0;
11147         if (arg2) {
11148             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11149             if (!b) {
11150                 return -TARGET_EFAULT;
11151             }
11152         }
11153         p = lock_user_string(arg1);
11154         if (p) {
11155             if (num == TARGET_NR_listxattr) {
11156                 ret = get_errno(listxattr(p, b, arg3));
11157             } else {
11158                 ret = get_errno(llistxattr(p, b, arg3));
11159             }
11160         } else {
11161             ret = -TARGET_EFAULT;
11162         }
11163         unlock_user(p, arg1, 0);
11164         unlock_user(b, arg2, arg3);
11165         return ret;
11166     }
11167     case TARGET_NR_flistxattr:
11168     {
11169         void *b = 0;
11170         if (arg2) {
11171             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11172             if (!b) {
11173                 return -TARGET_EFAULT;
11174             }
11175         }
11176         ret = get_errno(flistxattr(arg1, b, arg3));
11177         unlock_user(b, arg2, arg3);
11178         return ret;
11179     }
11180     case TARGET_NR_setxattr:
11181     case TARGET_NR_lsetxattr:
11182         {
11183             void *p, *n, *v = 0;
11184             if (arg3) {
11185                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11186                 if (!v) {
11187                     return -TARGET_EFAULT;
11188                 }
11189             }
11190             p = lock_user_string(arg1);
11191             n = lock_user_string(arg2);
11192             if (p && n) {
11193                 if (num == TARGET_NR_setxattr) {
11194                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11195                 } else {
11196                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11197                 }
11198             } else {
11199                 ret = -TARGET_EFAULT;
11200             }
11201             unlock_user(p, arg1, 0);
11202             unlock_user(n, arg2, 0);
11203             unlock_user(v, arg3, 0);
11204         }
11205         return ret;
11206     case TARGET_NR_fsetxattr:
11207         {
11208             void *n, *v = 0;
11209             if (arg3) {
11210                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11211                 if (!v) {
11212                     return -TARGET_EFAULT;
11213                 }
11214             }
11215             n = lock_user_string(arg2);
11216             if (n) {
11217                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11218             } else {
11219                 ret = -TARGET_EFAULT;
11220             }
11221             unlock_user(n, arg2, 0);
11222             unlock_user(v, arg3, 0);
11223         }
11224         return ret;
11225     case TARGET_NR_getxattr:
11226     case TARGET_NR_lgetxattr:
11227         {
11228             void *p, *n, *v = 0;
11229             if (arg3) {
11230                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11231                 if (!v) {
11232                     return -TARGET_EFAULT;
11233                 }
11234             }
11235             p = lock_user_string(arg1);
11236             n = lock_user_string(arg2);
11237             if (p && n) {
11238                 if (num == TARGET_NR_getxattr) {
11239                     ret = get_errno(getxattr(p, n, v, arg4));
11240                 } else {
11241                     ret = get_errno(lgetxattr(p, n, v, arg4));
11242                 }
11243             } else {
11244                 ret = -TARGET_EFAULT;
11245             }
11246             unlock_user(p, arg1, 0);
11247             unlock_user(n, arg2, 0);
11248             unlock_user(v, arg3, arg4);
11249         }
11250         return ret;
11251     case TARGET_NR_fgetxattr:
11252         {
11253             void *n, *v = 0;
11254             if (arg3) {
11255                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11256                 if (!v) {
11257                     return -TARGET_EFAULT;
11258                 }
11259             }
11260             n = lock_user_string(arg2);
11261             if (n) {
11262                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11263             } else {
11264                 ret = -TARGET_EFAULT;
11265             }
11266             unlock_user(n, arg2, 0);
11267             unlock_user(v, arg3, arg4);
11268         }
11269         return ret;
11270     case TARGET_NR_removexattr:
11271     case TARGET_NR_lremovexattr:
11272         {
11273             void *p, *n;
11274             p = lock_user_string(arg1);
11275             n = lock_user_string(arg2);
11276             if (p && n) {
11277                 if (num == TARGET_NR_removexattr) {
11278                     ret = get_errno(removexattr(p, n));
11279                 } else {
11280                     ret = get_errno(lremovexattr(p, n));
11281                 }
11282             } else {
11283                 ret = -TARGET_EFAULT;
11284             }
11285             unlock_user(p, arg1, 0);
11286             unlock_user(n, arg2, 0);
11287         }
11288         return ret;
11289     case TARGET_NR_fremovexattr:
11290         {
11291             void *n;
11292             n = lock_user_string(arg2);
11293             if (n) {
11294                 ret = get_errno(fremovexattr(arg1, n));
11295             } else {
11296                 ret = -TARGET_EFAULT;
11297             }
11298             unlock_user(n, arg2, 0);
11299         }
11300         return ret;
11301 #endif
11302 #endif /* CONFIG_ATTR */
11303 #ifdef TARGET_NR_set_thread_area
11304     case TARGET_NR_set_thread_area:
11305 #if defined(TARGET_MIPS)
11306       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11307       return 0;
11308 #elif defined(TARGET_CRIS)
11309       if (arg1 & 0xff)
11310           ret = -TARGET_EINVAL;
11311       else {
11312           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11313           ret = 0;
11314       }
11315       return ret;
11316 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11317       return do_set_thread_area(cpu_env, arg1);
11318 #elif defined(TARGET_M68K)
11319       {
11320           TaskState *ts = cpu->opaque;
11321           ts->tp_value = arg1;
11322           return 0;
11323       }
11324 #else
11325       return -TARGET_ENOSYS;
11326 #endif
11327 #endif
11328 #ifdef TARGET_NR_get_thread_area
11329     case TARGET_NR_get_thread_area:
11330 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11331         return do_get_thread_area(cpu_env, arg1);
11332 #elif defined(TARGET_M68K)
11333         {
11334             TaskState *ts = cpu->opaque;
11335             return ts->tp_value;
11336         }
11337 #else
11338         return -TARGET_ENOSYS;
11339 #endif
11340 #endif
11341 #ifdef TARGET_NR_getdomainname
11342     case TARGET_NR_getdomainname:
11343         return -TARGET_ENOSYS;
11344 #endif
11345 
11346 #ifdef TARGET_NR_clock_settime
11347     case TARGET_NR_clock_settime:
11348     {
11349         struct timespec ts;
11350 
11351         ret = target_to_host_timespec(&ts, arg2);
11352         if (!is_error(ret)) {
11353             ret = get_errno(clock_settime(arg1, &ts));
11354         }
11355         return ret;
11356     }
11357 #endif
11358 #ifdef TARGET_NR_clock_gettime
11359     case TARGET_NR_clock_gettime:
11360     {
11361         struct timespec ts;
11362         ret = get_errno(clock_gettime(arg1, &ts));
11363         if (!is_error(ret)) {
11364             ret = host_to_target_timespec(arg2, &ts);
11365         }
11366         return ret;
11367     }
11368 #endif
11369 #ifdef TARGET_NR_clock_getres
11370     case TARGET_NR_clock_getres:
11371     {
11372         struct timespec ts;
11373         ret = get_errno(clock_getres(arg1, &ts));
11374         if (!is_error(ret)) {
11375             host_to_target_timespec(arg2, &ts);
11376         }
11377         return ret;
11378     }
11379 #endif
11380 #ifdef TARGET_NR_clock_nanosleep
11381     case TARGET_NR_clock_nanosleep:
11382     {
11383         struct timespec ts;
11384         target_to_host_timespec(&ts, arg3);
11385         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11386                                              &ts, arg4 ? &ts : NULL));
11387         if (arg4)
11388             host_to_target_timespec(arg4, &ts);
11389 
11390 #if defined(TARGET_PPC)
11391         /* clock_nanosleep is odd in that it returns positive errno values.
11392          * On PPC, CR0 bit 3 should be set in such a situation. */
11393         if (ret && ret != -TARGET_ERESTARTSYS) {
11394             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11395         }
11396 #endif
11397         return ret;
11398     }
11399 #endif
11400 
11401 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11402     case TARGET_NR_set_tid_address:
11403         return get_errno(set_tid_address((int *)g2h(arg1)));
11404 #endif
11405 
11406     case TARGET_NR_tkill:
11407         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11408 
11409     case TARGET_NR_tgkill:
11410         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11411                          target_to_host_signal(arg3)));
11412 
11413 #ifdef TARGET_NR_set_robust_list
11414     case TARGET_NR_set_robust_list:
11415     case TARGET_NR_get_robust_list:
11416         /* The ABI for supporting robust futexes has userspace pass
11417          * the kernel a pointer to a linked list which is updated by
11418          * userspace after the syscall; the list is walked by the kernel
11419          * when the thread exits. Since the linked list in QEMU guest
11420          * memory isn't a valid linked list for the host and we have
11421          * no way to reliably intercept the thread-death event, we can't
11422          * support these. Silently return ENOSYS so that guest userspace
11423          * falls back to a non-robust futex implementation (which should
11424          * be OK except in the corner case of the guest crashing while
11425          * holding a mutex that is shared with another process via
11426          * shared memory).
11427          */
11428         return -TARGET_ENOSYS;
11429 #endif
11430 
11431 #if defined(TARGET_NR_utimensat)
11432     case TARGET_NR_utimensat:
11433         {
11434             struct timespec *tsp, ts[2];
11435             if (!arg3) {
11436                 tsp = NULL;
11437             } else {
11438                 target_to_host_timespec(ts, arg3);
11439                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11440                 tsp = ts;
11441             }
11442             if (!arg2)
11443                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11444             else {
11445                 if (!(p = lock_user_string(arg2))) {
11446                     return -TARGET_EFAULT;
11447                 }
11448                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11449                 unlock_user(p, arg2, 0);
11450             }
11451         }
11452         return ret;
11453 #endif
11454     case TARGET_NR_futex:
11455         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11456 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11457     case TARGET_NR_inotify_init:
11458         ret = get_errno(sys_inotify_init());
11459         if (ret >= 0) {
11460             fd_trans_register(ret, &target_inotify_trans);
11461         }
11462         return ret;
11463 #endif
11464 #ifdef CONFIG_INOTIFY1
11465 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11466     case TARGET_NR_inotify_init1:
11467         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11468                                           fcntl_flags_tbl)));
11469         if (ret >= 0) {
11470             fd_trans_register(ret, &target_inotify_trans);
11471         }
11472         return ret;
11473 #endif
11474 #endif
11475 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11476     case TARGET_NR_inotify_add_watch:
11477         p = lock_user_string(arg2);
11478         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11479         unlock_user(p, arg2, 0);
11480         return ret;
11481 #endif
11482 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11483     case TARGET_NR_inotify_rm_watch:
11484         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11485 #endif
11486 
11487 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11488     case TARGET_NR_mq_open:
11489         {
11490             struct mq_attr posix_mq_attr;
11491             struct mq_attr *pposix_mq_attr;
11492             int host_flags;
11493 
11494             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11495             pposix_mq_attr = NULL;
11496             if (arg4) {
11497                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11498                     return -TARGET_EFAULT;
11499                 }
11500                 pposix_mq_attr = &posix_mq_attr;
11501             }
11502             p = lock_user_string(arg1 - 1);
11503             if (!p) {
11504                 return -TARGET_EFAULT;
11505             }
11506             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11507             unlock_user (p, arg1, 0);
11508         }
11509         return ret;
11510 
11511     case TARGET_NR_mq_unlink:
11512         p = lock_user_string(arg1 - 1);
11513         if (!p) {
11514             return -TARGET_EFAULT;
11515         }
11516         ret = get_errno(mq_unlink(p));
11517         unlock_user (p, arg1, 0);
11518         return ret;
11519 
11520     case TARGET_NR_mq_timedsend:
11521         {
11522             struct timespec ts;
11523 
11524             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11525             if (arg5 != 0) {
11526                 target_to_host_timespec(&ts, arg5);
11527                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11528                 host_to_target_timespec(arg5, &ts);
11529             } else {
11530                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11531             }
11532             unlock_user (p, arg2, arg3);
11533         }
11534         return ret;
11535 
11536     case TARGET_NR_mq_timedreceive:
11537         {
11538             struct timespec ts;
11539             unsigned int prio;
11540 
11541             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11542             if (arg5 != 0) {
11543                 target_to_host_timespec(&ts, arg5);
11544                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11545                                                      &prio, &ts));
11546                 host_to_target_timespec(arg5, &ts);
11547             } else {
11548                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11549                                                      &prio, NULL));
11550             }
11551             unlock_user (p, arg2, arg3);
11552             if (arg4 != 0)
11553                 put_user_u32(prio, arg4);
11554         }
11555         return ret;
11556 
11557     /* Not implemented for now... */
11558 /*     case TARGET_NR_mq_notify: */
11559 /*         break; */
11560 
11561     case TARGET_NR_mq_getsetattr:
11562         {
11563             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11564             ret = 0;
11565             if (arg2 != 0) {
11566                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11567                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11568                                            &posix_mq_attr_out));
11569             } else if (arg3 != 0) {
11570                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11571             }
11572             if (ret == 0 && arg3 != 0) {
11573                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11574             }
11575         }
11576         return ret;
11577 #endif
11578 
11579 #ifdef CONFIG_SPLICE
11580 #ifdef TARGET_NR_tee
11581     case TARGET_NR_tee:
11582         {
11583             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11584         }
11585         return ret;
11586 #endif
11587 #ifdef TARGET_NR_splice
11588     case TARGET_NR_splice:
11589         {
11590             loff_t loff_in, loff_out;
11591             loff_t *ploff_in = NULL, *ploff_out = NULL;
11592             if (arg2) {
11593                 if (get_user_u64(loff_in, arg2)) {
11594                     return -TARGET_EFAULT;
11595                 }
11596                 ploff_in = &loff_in;
11597             }
11598             if (arg4) {
11599                 if (get_user_u64(loff_out, arg4)) {
11600                     return -TARGET_EFAULT;
11601                 }
11602                 ploff_out = &loff_out;
11603             }
11604             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11605             if (arg2) {
11606                 if (put_user_u64(loff_in, arg2)) {
11607                     return -TARGET_EFAULT;
11608                 }
11609             }
11610             if (arg4) {
11611                 if (put_user_u64(loff_out, arg4)) {
11612                     return -TARGET_EFAULT;
11613                 }
11614             }
11615         }
11616         return ret;
11617 #endif
11618 #ifdef TARGET_NR_vmsplice
11619 	case TARGET_NR_vmsplice:
11620         {
11621             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11622             if (vec != NULL) {
11623                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11624                 unlock_iovec(vec, arg2, arg3, 0);
11625             } else {
11626                 ret = -host_to_target_errno(errno);
11627             }
11628         }
11629         return ret;
11630 #endif
11631 #endif /* CONFIG_SPLICE */
11632 #ifdef CONFIG_EVENTFD
11633 #if defined(TARGET_NR_eventfd)
11634     case TARGET_NR_eventfd:
11635         ret = get_errno(eventfd(arg1, 0));
11636         if (ret >= 0) {
11637             fd_trans_register(ret, &target_eventfd_trans);
11638         }
11639         return ret;
11640 #endif
11641 #if defined(TARGET_NR_eventfd2)
11642     case TARGET_NR_eventfd2:
11643     {
11644         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11645         if (arg2 & TARGET_O_NONBLOCK) {
11646             host_flags |= O_NONBLOCK;
11647         }
11648         if (arg2 & TARGET_O_CLOEXEC) {
11649             host_flags |= O_CLOEXEC;
11650         }
11651         ret = get_errno(eventfd(arg1, host_flags));
11652         if (ret >= 0) {
11653             fd_trans_register(ret, &target_eventfd_trans);
11654         }
11655         return ret;
11656     }
11657 #endif
11658 #endif /* CONFIG_EVENTFD  */
11659 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11660     case TARGET_NR_fallocate:
11661 #if TARGET_ABI_BITS == 32
11662         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11663                                   target_offset64(arg5, arg6)));
11664 #else
11665         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11666 #endif
11667         return ret;
11668 #endif
11669 #if defined(CONFIG_SYNC_FILE_RANGE)
11670 #if defined(TARGET_NR_sync_file_range)
11671     case TARGET_NR_sync_file_range:
11672 #if TARGET_ABI_BITS == 32
11673 #if defined(TARGET_MIPS)
11674         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11675                                         target_offset64(arg5, arg6), arg7));
11676 #else
11677         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11678                                         target_offset64(arg4, arg5), arg6));
11679 #endif /* !TARGET_MIPS */
11680 #else
11681         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11682 #endif
11683         return ret;
11684 #endif
11685 #if defined(TARGET_NR_sync_file_range2)
11686     case TARGET_NR_sync_file_range2:
11687         /* This is like sync_file_range but the arguments are reordered */
11688 #if TARGET_ABI_BITS == 32
11689         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11690                                         target_offset64(arg5, arg6), arg2));
11691 #else
11692         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11693 #endif
11694         return ret;
11695 #endif
11696 #endif
11697 #if defined(TARGET_NR_signalfd4)
11698     case TARGET_NR_signalfd4:
11699         return do_signalfd4(arg1, arg2, arg4);
11700 #endif
11701 #if defined(TARGET_NR_signalfd)
11702     case TARGET_NR_signalfd:
11703         return do_signalfd4(arg1, arg2, 0);
11704 #endif
11705 #if defined(CONFIG_EPOLL)
11706 #if defined(TARGET_NR_epoll_create)
11707     case TARGET_NR_epoll_create:
11708         return get_errno(epoll_create(arg1));
11709 #endif
11710 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11711     case TARGET_NR_epoll_create1:
11712         return get_errno(epoll_create1(arg1));
11713 #endif
11714 #if defined(TARGET_NR_epoll_ctl)
11715     case TARGET_NR_epoll_ctl:
11716     {
11717         struct epoll_event ep;
11718         struct epoll_event *epp = 0;
11719         if (arg4) {
11720             struct target_epoll_event *target_ep;
11721             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11722                 return -TARGET_EFAULT;
11723             }
11724             ep.events = tswap32(target_ep->events);
11725             /* The epoll_data_t union is just opaque data to the kernel,
11726              * so we transfer all 64 bits across and need not worry what
11727              * actual data type it is.
11728              */
11729             ep.data.u64 = tswap64(target_ep->data.u64);
11730             unlock_user_struct(target_ep, arg4, 0);
11731             epp = &ep;
11732         }
11733         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11734     }
11735 #endif
11736 
11737 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11738 #if defined(TARGET_NR_epoll_wait)
11739     case TARGET_NR_epoll_wait:
11740 #endif
11741 #if defined(TARGET_NR_epoll_pwait)
11742     case TARGET_NR_epoll_pwait:
11743 #endif
11744     {
11745         struct target_epoll_event *target_ep;
11746         struct epoll_event *ep;
11747         int epfd = arg1;
11748         int maxevents = arg3;
11749         int timeout = arg4;
11750 
11751         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11752             return -TARGET_EINVAL;
11753         }
11754 
11755         target_ep = lock_user(VERIFY_WRITE, arg2,
11756                               maxevents * sizeof(struct target_epoll_event), 1);
11757         if (!target_ep) {
11758             return -TARGET_EFAULT;
11759         }
11760 
11761         ep = g_try_new(struct epoll_event, maxevents);
11762         if (!ep) {
11763             unlock_user(target_ep, arg2, 0);
11764             return -TARGET_ENOMEM;
11765         }
11766 
11767         switch (num) {
11768 #if defined(TARGET_NR_epoll_pwait)
11769         case TARGET_NR_epoll_pwait:
11770         {
11771             target_sigset_t *target_set;
11772             sigset_t _set, *set = &_set;
11773 
11774             if (arg5) {
11775                 if (arg6 != sizeof(target_sigset_t)) {
11776                     ret = -TARGET_EINVAL;
11777                     break;
11778                 }
11779 
11780                 target_set = lock_user(VERIFY_READ, arg5,
11781                                        sizeof(target_sigset_t), 1);
11782                 if (!target_set) {
11783                     ret = -TARGET_EFAULT;
11784                     break;
11785                 }
11786                 target_to_host_sigset(set, target_set);
11787                 unlock_user(target_set, arg5, 0);
11788             } else {
11789                 set = NULL;
11790             }
11791 
11792             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11793                                              set, SIGSET_T_SIZE));
11794             break;
11795         }
11796 #endif
11797 #if defined(TARGET_NR_epoll_wait)
11798         case TARGET_NR_epoll_wait:
11799             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11800                                              NULL, 0));
11801             break;
11802 #endif
11803         default:
11804             ret = -TARGET_ENOSYS;
11805         }
11806         if (!is_error(ret)) {
11807             int i;
11808             for (i = 0; i < ret; i++) {
11809                 target_ep[i].events = tswap32(ep[i].events);
11810                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11811             }
11812             unlock_user(target_ep, arg2,
11813                         ret * sizeof(struct target_epoll_event));
11814         } else {
11815             unlock_user(target_ep, arg2, 0);
11816         }
11817         g_free(ep);
11818         return ret;
11819     }
11820 #endif
11821 #endif
11822 #ifdef TARGET_NR_prlimit64
11823     case TARGET_NR_prlimit64:
11824     {
11825         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11826         struct target_rlimit64 *target_rnew, *target_rold;
11827         struct host_rlimit64 rnew, rold, *rnewp = 0;
11828         int resource = target_to_host_resource(arg2);
11829         if (arg3) {
11830             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11831                 return -TARGET_EFAULT;
11832             }
11833             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11834             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11835             unlock_user_struct(target_rnew, arg3, 0);
11836             rnewp = &rnew;
11837         }
11838 
11839         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11840         if (!is_error(ret) && arg4) {
11841             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11842                 return -TARGET_EFAULT;
11843             }
11844             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11845             target_rold->rlim_max = tswap64(rold.rlim_max);
11846             unlock_user_struct(target_rold, arg4, 1);
11847         }
11848         return ret;
11849     }
11850 #endif
11851 #ifdef TARGET_NR_gethostname
11852     case TARGET_NR_gethostname:
11853     {
11854         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11855         if (name) {
11856             ret = get_errno(gethostname(name, arg2));
11857             unlock_user(name, arg1, arg2);
11858         } else {
11859             ret = -TARGET_EFAULT;
11860         }
11861         return ret;
11862     }
11863 #endif
11864 #ifdef TARGET_NR_atomic_cmpxchg_32
11865     case TARGET_NR_atomic_cmpxchg_32:
11866     {
11867         /* should use start_exclusive from main.c */
11868         abi_ulong mem_value;
11869         if (get_user_u32(mem_value, arg6)) {
11870             target_siginfo_t info;
11871             info.si_signo = SIGSEGV;
11872             info.si_errno = 0;
11873             info.si_code = TARGET_SEGV_MAPERR;
11874             info._sifields._sigfault._addr = arg6;
11875             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11876                          QEMU_SI_FAULT, &info);
11877             ret = 0xdeadbeef;
11878 
11879         }
11880         if (mem_value == arg2)
11881             put_user_u32(arg1, arg6);
11882         return mem_value;
11883     }
11884 #endif
11885 #ifdef TARGET_NR_atomic_barrier
11886     case TARGET_NR_atomic_barrier:
11887         /* Like the kernel implementation and the
11888            qemu arm barrier, no-op this? */
11889         return 0;
11890 #endif
11891 
11892 #ifdef TARGET_NR_timer_create
11893     case TARGET_NR_timer_create:
11894     {
11895         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11896 
11897         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11898 
11899         int clkid = arg1;
11900         int timer_index = next_free_host_timer();
11901 
11902         if (timer_index < 0) {
11903             ret = -TARGET_EAGAIN;
11904         } else {
11905             timer_t *phtimer = g_posix_timers  + timer_index;
11906 
11907             if (arg2) {
11908                 phost_sevp = &host_sevp;
11909                 ret = target_to_host_sigevent(phost_sevp, arg2);
11910                 if (ret != 0) {
11911                     return ret;
11912                 }
11913             }
11914 
11915             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11916             if (ret) {
11917                 phtimer = NULL;
11918             } else {
11919                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11920                     return -TARGET_EFAULT;
11921                 }
11922             }
11923         }
11924         return ret;
11925     }
11926 #endif
11927 
11928 #ifdef TARGET_NR_timer_settime
11929     case TARGET_NR_timer_settime:
11930     {
11931         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11932          * struct itimerspec * old_value */
11933         target_timer_t timerid = get_timer_id(arg1);
11934 
11935         if (timerid < 0) {
11936             ret = timerid;
11937         } else if (arg3 == 0) {
11938             ret = -TARGET_EINVAL;
11939         } else {
11940             timer_t htimer = g_posix_timers[timerid];
11941             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11942 
11943             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11944                 return -TARGET_EFAULT;
11945             }
11946             ret = get_errno(
11947                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11948             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11949                 return -TARGET_EFAULT;
11950             }
11951         }
11952         return ret;
11953     }
11954 #endif
11955 
11956 #ifdef TARGET_NR_timer_gettime
11957     case TARGET_NR_timer_gettime:
11958     {
11959         /* args: timer_t timerid, struct itimerspec *curr_value */
11960         target_timer_t timerid = get_timer_id(arg1);
11961 
11962         if (timerid < 0) {
11963             ret = timerid;
11964         } else if (!arg2) {
11965             ret = -TARGET_EFAULT;
11966         } else {
11967             timer_t htimer = g_posix_timers[timerid];
11968             struct itimerspec hspec;
11969             ret = get_errno(timer_gettime(htimer, &hspec));
11970 
11971             if (host_to_target_itimerspec(arg2, &hspec)) {
11972                 ret = -TARGET_EFAULT;
11973             }
11974         }
11975         return ret;
11976     }
11977 #endif
11978 
11979 #ifdef TARGET_NR_timer_getoverrun
11980     case TARGET_NR_timer_getoverrun:
11981     {
11982         /* args: timer_t timerid */
11983         target_timer_t timerid = get_timer_id(arg1);
11984 
11985         if (timerid < 0) {
11986             ret = timerid;
11987         } else {
11988             timer_t htimer = g_posix_timers[timerid];
11989             ret = get_errno(timer_getoverrun(htimer));
11990         }
11991         return ret;
11992     }
11993 #endif
11994 
11995 #ifdef TARGET_NR_timer_delete
11996     case TARGET_NR_timer_delete:
11997     {
11998         /* args: timer_t timerid */
11999         target_timer_t timerid = get_timer_id(arg1);
12000 
12001         if (timerid < 0) {
12002             ret = timerid;
12003         } else {
12004             timer_t htimer = g_posix_timers[timerid];
12005             ret = get_errno(timer_delete(htimer));
12006             g_posix_timers[timerid] = 0;
12007         }
12008         return ret;
12009     }
12010 #endif
12011 
12012 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12013     case TARGET_NR_timerfd_create:
12014         return get_errno(timerfd_create(arg1,
12015                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12016 #endif
12017 
12018 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12019     case TARGET_NR_timerfd_gettime:
12020         {
12021             struct itimerspec its_curr;
12022 
12023             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12024 
12025             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12026                 return -TARGET_EFAULT;
12027             }
12028         }
12029         return ret;
12030 #endif
12031 
12032 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12033     case TARGET_NR_timerfd_settime:
12034         {
12035             struct itimerspec its_new, its_old, *p_new;
12036 
12037             if (arg3) {
12038                 if (target_to_host_itimerspec(&its_new, arg3)) {
12039                     return -TARGET_EFAULT;
12040                 }
12041                 p_new = &its_new;
12042             } else {
12043                 p_new = NULL;
12044             }
12045 
12046             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12047 
12048             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12049                 return -TARGET_EFAULT;
12050             }
12051         }
12052         return ret;
12053 #endif
12054 
12055 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12056     case TARGET_NR_ioprio_get:
12057         return get_errno(ioprio_get(arg1, arg2));
12058 #endif
12059 
12060 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12061     case TARGET_NR_ioprio_set:
12062         return get_errno(ioprio_set(arg1, arg2, arg3));
12063 #endif
12064 
12065 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12066     case TARGET_NR_setns:
12067         return get_errno(setns(arg1, arg2));
12068 #endif
12069 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12070     case TARGET_NR_unshare:
12071         return get_errno(unshare(arg1));
12072 #endif
12073 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12074     case TARGET_NR_kcmp:
12075         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12076 #endif
12077 #ifdef TARGET_NR_swapcontext
12078     case TARGET_NR_swapcontext:
12079         /* PowerPC specific.  */
12080         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12081 #endif
12082 #ifdef TARGET_NR_memfd_create
12083     case TARGET_NR_memfd_create:
12084         p = lock_user_string(arg1);
12085         if (!p) {
12086             return -TARGET_EFAULT;
12087         }
12088         ret = get_errno(memfd_create(p, arg2));
12089         fd_trans_unregister(ret);
12090         unlock_user(p, arg1, 0);
12091         return ret;
12092 #endif
12093 
12094     default:
12095         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12096         return -TARGET_ENOSYS;
12097     }
12098     return ret;
12099 }
12100 
12101 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12102                     abi_long arg2, abi_long arg3, abi_long arg4,
12103                     abi_long arg5, abi_long arg6, abi_long arg7,
12104                     abi_long arg8)
12105 {
12106     CPUState *cpu = env_cpu(cpu_env);
12107     abi_long ret;
12108 
12109 #ifdef DEBUG_ERESTARTSYS
12110     /* Debug-only code for exercising the syscall-restart code paths
12111      * in the per-architecture cpu main loops: restart every syscall
12112      * the guest makes once before letting it through.
12113      */
12114     {
12115         static bool flag;
12116         flag = !flag;
12117         if (flag) {
12118             return -TARGET_ERESTARTSYS;
12119         }
12120     }
12121 #endif
12122 
12123     record_syscall_start(cpu, num, arg1,
12124                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12125 
12126     if (unlikely(do_strace)) {
12127         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12128         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12129                           arg5, arg6, arg7, arg8);
12130         print_syscall_ret(num, ret);
12131     } else {
12132         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12133                           arg5, arg6, arg7, arg8);
12134     }
12135 
12136     record_syscall_return(cpu, num, ret);
12137     return ret;
12138 }
12139